query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Creates a utf8 encoded file with each argument in arguments on a separate line.
def CreateArgFile( arguments, tmpDir ): tmpFile = os.path.join( tmpDir, "args.txt" ) with io.open( tmpFile, 'w', encoding="utf-8-sig" ) as fileHandle: fileHandle.write( "\n".join( arguments ) ) return tmpFile
[ "def export_ascii(self, filename):", "def writeArgs(args,out):\n\n\t#Make string with all arguments\n\targString = ''\n\tfor arg,value in sorted(vars(args).items()):\n\t\tif type(value) is list:\n\t\t\tif len(value) > 0:\n\t\t\t\tvalue = ','.join(map(str,value))\n\t\t\telse:\n\t\t\t\tvalue = value[0]\n\t\targString += '%s: %s\\n'%(arg,value)\n\n\t#Write out arguments\n\ttry:\n\t\targOut = open('%s_args.txt'%(out), \"w\")\n\t\targOut.write(argString)\n\t\targOut.close()\n\texcept(IOError):\n\t\tprint 'ERROR: Cannot write in output directory. Exiting...'\n\t\tsys.exit()", "def _encode_command2(args):\n s = []\n s.append( '%(pythonarg)s' )\n s.append( aq(mbuild.join(args.xeddir, 'pysrc', 'enc2gen.py')))\n s.append('--xeddir %s' % aq(args.xeddir))\n s.append('--gendir %s' % aq(args.gendir))\n s.extend( args.config.as_args() )\n if args.test_checked_interface:\n s.append('-chk' ) \n s.append('--output-file-list %s' % aq(args.enc2_output_file))\n return ' '.join(s)", "def dump_args(args, outdir='.'):\n with open( Path(outdir)/'args.txt', 'w' ) as file:\n for k, v in args.items():\n file.write('{}: {}\\n'.format(k, v))", "def generate_file_name(*args):\n file_name = \"\"\n for arg in args:\n file_name = f\"{file_name}_{str(arg)}\"\n return file_name", "def create_usdzconvert_arguments(args: list) -> list:\n usdz_converter_path = current_app.config.get('USDZ_CONVERTER_PATH') / \\\n current_app.config.get('USDZ_CONVERTER_SCRIPT_PATH')\n\n arguments = [_get_converter_interpreter_arg(),\n usdz_converter_path.resolve().as_posix()]\n\n for arg in args:\n arguments.append(arg)\n\n return arguments", "def writeString(self, *args):\n out = self.out\n for arg in args:\n self.writeLength(len(arg))\n out.write(arg)", "def encode_unicode():\n f = open(\"complete_datasets/random_dataset.tsv\", \"r\")\n text = f.readlines()\n f.close()\n f = open(\"encoding_attempt/random_dataset.tsv\", \"w\")\n for line in text:\n line = line.decode('ascii')\n f.write(line.encode('utf8')+\"\\n\")\n f.close()", "def mk_txt(name, fs, encoding=\"utf-8\"):\n\n data = dump(fs)\n\n with open(name, \"wb\") as txt:\n for f_name in sorted(data.keys()):\n if f_name:\n txt.write(f_name.encode(encoding))\n txt.write(\"\\n\")\n #\n #\n #", "def writeFile( str_, *args ):\n filePath = path.join( *args )\n with open( filePath, 'w' ) as fd:\n fd.write(str_)", "def utf_8_scripting(header):\n return \"\\n\".join([\"# -*- coding: utf-8 -*-\", \"\", scripting(header)])", "def format_args(args):\n try:\n if args.som:\n som_split = args.som.split('-')\n if len(args.rev) != 2:\n args.rev = args.rev + '0'\n if args.ksx:\n ksx_split = args.ksx.split('-')\n except IOError as err:\n sys.exit('BOM argument is malformed. Exiting.')", "def unicode_arg(arg):\n if sys.stdin.encoding:\n return arg.decode(sys.stdin.encoding)\n\n return arg.decode(sys.getfilesystemencoding())", "def encode_arg(arg):\n arg_utf8 = utf8(arg)\n\n return ELEM_SEP.join([str(len(str(arg_utf8))), str(arg_utf8)])", "def read(self, *args):\n return utf8(self._file.read(*args))", "def transform_file_to_utf_8_from(file_path, in_encoding=\"latin1\", out_file_name=\"\"):\n in_file = codecs.open(file_path, encoding=in_encoding)\n in_lines = in_file.readlines()\n if not out_file_name:\n out_file_name = file_path.replace(\".txt\", \".utf8.txt\")\n out_file = codecs.open(out_file_name, \"w+\")\n for line in in_lines:\n out_file.write(line)\n out_file.close()", "def save_args(logdir, args):\n with open(os.path.join(logdir,'args.txt'), 'w') as f:\n for arg, value in vars(args).items():\n f.write('--' + arg +'\\n')\n if isinstance(value, list):\n f.write(\"\\n\".join(str(v) for v in value) + \"\\n\")\n else:\n f.write(str(value) + '\\n')", "def create_helptext_bin(file_path: str, data: list):\n with open(file_path, \"wb\") as f:\n for item in data:\n if len(item[1]) >= 0x90:\n raise ValueError(f\"'{item[1]}' exceeds character count ({len(item[1])}/143\")\n f.write(pack(\">I90s\", item[0], item[1].encode(\"cp932\")))", "def safe_print_unicode(*args, **kwargs):\n sep = kwargs.pop('sep', u' ')\n end = kwargs.pop('end', u'\\n')\n errors = kwargs.pop('errors', 'replace')\n if PY3:\n func = sys.stdout.buffer.write\n else:\n func = sys.stdout.write\n line = sep.join(args) + end\n encoding = sys.stdout.encoding or 'utf8'\n func(line.encode(encoding, errors))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run DeadlineCommand with the specified arguments returning the standard out
def CallDeadlineCommand(arguments, hideWindow=True, useArgFile=False, useDeadlineBg=False, raiseOnExitCode=False): deadlineCommand = GetDeadlineCommand( useDeadlineBg ) tmpdir = None if useArgFile or useDeadlineBg: tmpdir = tempfile.mkdtemp() if useDeadlineBg: arguments = [ "-outputfiles", os.path.join( tmpdir, "dlout.txt" ), os.path.join( tmpdir, "dlexit.txt" ) ] + arguments startupinfo = None creationflags = 0 if os.name == 'nt': if hideWindow: # Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both. if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW else: # still show top-level windows, but don't show a console window CREATE_NO_WINDOW = 0x08000000 # MSDN process creation flag creationflags = CREATE_NO_WINDOW if useArgFile: arguments = [ CreateArgFile( arguments, tmpdir ) ] arguments.insert( 0, deadlineCommand ) # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards. proc = subprocess.Popen( arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags ) output, errors = proc.communicate() if raiseOnExitCode and proc.returncode != 0: try: # The quote function was moved to shutil in python 3 from shutil import quote as shell_quote except ImportError: # In python 2, quote lived in the pipes module from pipes import quote as shell_quote cmd = ' '.join([shell_quote(arg) for arg in arguments]) raise subprocess.CalledProcessError(proc.returncode, cmd, output) if useDeadlineBg: with io.open( os.path.join( tmpdir, "dlout.txt" ), 'r', encoding='utf-8' ) as fileHandle: output = fileHandle.read() if tmpdir: try: shutil.rmtree( tmpdir ) except: print( 'Failed to remove temp directory: "%s"' % tmpdir ) return output.strip()
[ "def _call_deadline_command_raw(self, arguments):\n # make a copy so we don't mutate the caller's reference\n arguments = list(arguments)\n arguments.insert(0, self._deadline_command_path)\n try:\n proc = subprocess.Popen(\n arguments,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except:\n raise Exception('Failed to call Deadline.')\n\n output, errors = proc.communicate()\n if proc.returncode != 0:\n raise ValueError('DeadlineCommandError: \\n%s\\n%s' % (output, errors))\n return output.decode('utf8')", "def execute_depot(args):\n with LogBlock(\"Depot\"):\n log.debug(args)\n return run(**vars(args))", "def run(*args, **kwargs):\n kwargs[\"check\"] = True\n print(\"+\", \" \".join(args[0]))\n return subprocess.run(*args, **kwargs)", "def test_with_command_line_arguments(self, arguments):\n fixed_arguments = self.get_argument_string(arguments)\n result = self.run(\n arguments=fixed_arguments,\n timeout=self.full_timeout,\n use_fresh_profile=True)\n return self._handle_test_result(result)", "def execute(args):", "def RunCommandWithOptions():\n pass", "def cli_runner(args):\n config = CLIConfig()\n\n set_logger_levels(args.debug)\n\n LOGGER.info('Issues? Report here: https://github.com/airbnb/streamalert/issues')\n\n cmds = {\n 'app': lambda opts: app_handler(opts, config),\n 'athena': lambda opts: athena_handler(opts, config),\n 'build': lambda opts: terraform_build_handler(opts, config),\n 'clean': lambda opts: terraform_clean_handler(),\n 'configure': lambda opts: configure_handler(opts, config),\n 'create-alarm': lambda opts: _create_alarm_handler(opts, config),\n 'create-cluster-alarm': lambda opts: _create_alarm_handler(opts, config),\n 'custom-metrics': lambda opts: _custom_metrics_handler(opts, config),\n 'deploy': lambda opts: deploy_handler(opts, config),\n 'destroy': lambda opts: terraform_destroy_handler(opts, config),\n 'generate': lambda opts: terraform_generate_handler(config, check_creds=False),\n 'init': lambda opts: terraform_init(opts, config),\n 'kinesis': lambda opts: kinesis_handler(opts, config),\n 'list-targets': lambda opts: terraform_list_targets(config),\n 'output': lambda opts: output_handler(opts, config),\n 'rollback': lambda opts: rollback_handler(opts, config),\n 'rule-staging': lambda opts: rule_staging_handler(opts, config),\n 'status': lambda opts: _status_handler(config),\n 'test': lambda opts: test_handler(opts, config),\n 'threat-intel': lambda opts: _threat_intel_handler(opts, config),\n 'threat-intel-downloader': lambda opts: threat_intel_downloader_handler(opts, config),\n }\n\n result = cmds[args.command](args)\n LOGGER.info('Completed')\n return result", "def tidy_cli(arguments):\n TidyCommandLine().execute_cli(arguments)", "def run(argv: Optional[list[str]] = None) -> tuple[str, str]:\n argv = argv if argv is not None else []\n\n with PipeStream() as stdin:\n stdin.writer.close()\n\n with PipeStream() as stdout:\n with PipeStream() as stderr:\n gada.main(\n [\"gada\"] + argv,\n stdin=stdin.reader,\n stdout=stdout.writer,\n stderr=stderr.writer,\n )\n stdout.writer.close()\n stderr.writer.close()\n return (\n stdout.reader.read().decode(errors=\"ignore\"),\n stderr.reader.read().decode(errors=\"ignore\"),\n )", "def call_das_cli(*args):\n oldarg = deepcopy(sys.argv)\n sys.argv += args\n print sys.argv\n ret = das_cli.main()\n sys.argv = oldarg\n return ret", "def main(argv=None):\n argv = argv or sys.argv[1:]\n args = docopt(__doc__, version=\"Foreman Tools %s\" % foreman_tools.__version__, argv=argv)\n\n logging.basicConfig(level=logging.INFO)\n urllib3.disable_warnings()\n\n foreman_url = args[\"--url\"] or DEFAULT_URL\n fs = ForemanSession(foreman_url=foreman_url)\n\n if args[\"list\"] and args[\"hosts\"]:\n # https://www.theforeman.org/api/1.14/apidoc/v2/hosts.html#description-index\n count, hosts = fs.get_all(\"hosts\")\n for host in hosts:\n last_report = datetime.strptime(host[\"last_report\"], \"%Y-%m-%dT%H:%M:%SZ\")\n last_report_age = (datetime.utcnow() - last_report).total_seconds()\n if last_report_age <= 60 * 60:\n age_color = \"green\"\n elif last_report_age <= 2 * 60 * 60:\n age_color = \"yellow\"\n else:\n age_color = \"red\"\n print(\"%-40s %-15s %-10s\" % (host[\"name\"],\n host[\"ip\"],\n ansi_wrap(format_timespan(last_report_age), color=age_color)))\n\n elif args[\"start\"] and args[\"<host>\"]:\n fs.power(args[\"<host>\"], \"start\")\n\n elif args[\"stop\"] and args[\"<host>\"]:\n fs.power(args[\"<host>\"], \"stop\")", "def run(*args):\n try:\n return subprocess.check_output(args,\n shell=False,\n stderr=subprocess.STDOUT).strip()\n except subprocess.CalledProcessError as e:\n fail(\"Command failed: \", args, \"Output:\", e.output)", "def test_print_graveyard_removal(capfd):\n os.system(\"python svalinn_tools/graveyard_removal.py \" + test_file_path + test_file + \" -p\")\n out, err = capfd.readouterr()\n assert (\"12682136550675318127\" in out) == True", "def run_command_line(cls, args):\n if args:\n self = cls.from_args(args)\n if self is not None:\n # call the solver\n if self.run():\n return 0\n\n # failure, output usage message\n print(join(cls._usage(), sep=nl))\n return -1", "def _go(args):\n cmd = Command(args.cmd, args.stdout_path, args.stderr_path)\n succeeded = False \\\n if (cmd.return_code != 0 or (not args.ignore_stderr and cmd.stderr)) \\\n else True\n\n if succeeded is True and args.email_on_success is False:\n # success! we're done\n return\n\n mailer = Mailer(args.smtp_host, args.mail_from, args.mail_to, args.mail_subject_prefix, args.testing_email_mode)\n cmd_info = {\n \"result\": \"success\" if succeeded else \"failure\",\n \"name\": args.name,\n \"desc\": \"%s\\n\\n\" % args.desc if args.desc else \"\",\n \"cmd\": args.cmd,\n \"run_time\": cmd.run_time,\n \"return_code\": cmd.return_code,\n \"hostname\": socket.gethostname(),\n \"username\": getpass.getuser(),\n \"outfile\": args.stdout_path or '/dev/null',\n \"outfile_size\": len(cmd.stdout),\n \"errfile\": args.stderr_path or '/dev/null',\n \"errfile_size\": len(cmd.stderr)\n }\n subject = SUBJECT_LINE_TMPL % cmd_info\n body = STANDARD_MSG_TMPL % cmd_info\n if cmd.stderr:\n body += ERROR_MSG_TMPL % _truncate(cmd.stderr, 100000)\n if args.include_stdout_in_email and cmd.stdout:\n body += STDOUT_MSG_TMPL % _truncate(cmd.stdout)\n mailer.send_email(subject, body)", "def test_run_stdout_stderr_exit(self):\n stdoutText = \"output out o text\"\n stderrText = \"error err e text\"\n exitCode = 42\n pythonScript = (\"import sys;\"\n \"sys.stdout.write('%s');\"\n \"sys.stderr.write('%s');\"\n \"exit(%i);\"\n % (stdoutText, stderrText, exitCode))\n command = \"%s -c %r\" % (PYTHON_COMMAND, pythonScript)\n\n disconnectedDeferred = defer.Deferred()\n protocol = ProcessProtocol()\n protocol.processEnded = disconnectedDeferred.callback\n resultDeferred = self.inductor.run(command, uid=UID)\n resultDeferred.addErrback(self.fail)\n @resultDeferred.addCallback\n def checkResult(res):\n r_stdoutText, r_stderrText, r_exitCode = res\n self.assertEqual(r_stdoutText, stdoutText, \"stdout not as expected\")\n self.assertEqual(r_stderrText, stderrText, \"stderr not as expected\")\n self.assertEqual(r_exitCode, exitCode, \"unexpected exit code\")\n return resultDeferred", "def main():\n parser = argparse.ArgumentParser(description='this function can be used '\n 'to notify after an event occured via mail and console')\n\n parser.add_argument(\"-e\", \"--event\", action=\"store\", dest=\"event\",\n help=\"summery of the event (e.g. -e 'Exception caught!')\")\n\n parser.add_argument(\"-d\", \"--event_details\", action=\"store\", dest=\"event_details\",\n help=\"event details (e.g. -d 'NullPointerException ...')\")\n\n parser.add_argument(\"-u\", \"--mail_user\", action=\"store\", dest=\"mail_user\",\n help=\"email user which is equal to email itself (e.g. -u test@gmail.com)\")\n\n parser.add_argument(\"-p\", \"--mail_password\", action=\"store\", dest=\"mail_password\",\n help=\"email password (e.g. -p 123456)\")\n\n parser.add_argument(\"-o\", \"--host_name\", action=\"store\", dest=\"host_name\",\n help=\"host name that the issue occured on (e.g. -o storage-ge4-test.scl.lab.tlv.redhat.com)\")\n\n parser.add_argument(\"-t\", \"--test_name\", action=\"store\", dest=\"test_name\",\n help=\"name of the test (e.g. -e 'TestCase18145')\")\n\n parser.add_argument(\"-x\", \"--target_mail\", action=\"store\", dest=\"target_mail\",\n help=\"email target address (e.g. -u 'target@gmail.com')\")\n\n parser.add_argument(\"-l\", \"--log_path\", action=\"store\", dest=\"log_path\",\n help=\"the path of the log directory (e.g -l '/tmp/bug_hunter_logs')\")\n\n options = parser.parse_args()\n\n notify_via_mail_and_console(\n options.event, options.event_details, options.target_mail, options.mail_user, options.mail_password,\n options.host_name, options.test_name, options.log_path\n )", "def execute_tool(description, *args):\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)", "def _Run(self, toolname, args, expected_returncode=0):\n tool_path = os.path.join(self._ibmperf_dir, toolname)\n cmd_line = [tool_path] + args\n _LOGGER.debug(\"Running command '%s'.\", \" \".join(cmd_line))\n cmd = self._Popen(cmd_line)\n stdout, stderr = cmd.communicate()\n returncode = cmd.returncode\n if returncode != expected_returncode:\n raise ExecutionFailed(\"'%s' returned code '%d'.\\n STDOUT: %s\\n\"\n \" STDERR: %s\\n\" %\n (toolname, returncode, stdout, stderr))\n # Pylint doesn't know the type of 'stdout', so complains about a missing\n # member function. Ignore it.\n # pylint: disable=E1103\n return stdout.splitlines()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the path to the file where we will store sticky settings
def GetStickySettingsFilePath(): global submissionInfo deadlineHome = submissionInfo[ "UserHomeDir" ].strip() return os.path.join( deadlineHome, "settings", "katana_sticky.json" )
[ "def getMeteorSettingsFilePath(self):\n directory = tempfile.gettempdir()\n name = self.settingsFile.name\n return os.path.join(directory, name)", "def settingsFilePath(self):\n return self._settingsFilePath", "def bot_settings_file(self):\r\n return os.path.join(self.bot_folder, 'settings.json')", "def get_preference_file():\n\n return \"{}/{}\".format(_MANAGER_PREFERENCE_PATH, _MANAGER_PREFERENCE_FILE)", "def get_cached_addon_path():\n settingspath = get_cached_setting_path()\n if not settingspath:\n logger.error(\"#SETTINGSPATH# resolution required but was not found\")\n return\n\n return os.path.join(settingspath, \"Addons\") + \"\\\\\"", "def get_settings_file(app_name: str):\n default_dir = get_default_dir(app_name)\n file_name = os.path.join(default_dir, \".settings.json\")\n return file_name", "def get_tool_settings_file_path(self, tool_id):\n\n settings_path = path_utils.get_user_data_dir(appname=tool_id)\n settings_file = path_utils.clean_path(os.path.expandvars(os.path.join(settings_path, 'settings.cfg')))\n\n return settings_file", "def config_file(self) -> Path:\n return self._working_directory / \"zookeeper.properties\"", "def default_config_filename():\n home = os.path.expanduser(\"~\")\n return os.path.join(home, \".twarc\")", "def findSettingsFile():\n settingsName = 'oct-fire-settings.json'\n userPath = os.path.expanduser('~')\n if os.path.exists(settingsName):\n return settingsName\n elif os.path.exists(os.path.join(userPath, settingsName)):\n return os.path.join(userPath, settingsName)\n elif os.path.exists(os.path.join(userPath, 'Desktop', settingsName)):\n return os.path.join(userPath, 'Desktop', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Documents', settingsName)):\n return os.path.join(userPath, 'Documents', settingsName)\n elif os.path.exists(os.path.join(userPath, 'Downloads', settingsName)):\n return os.path.join(userPath, 'Downloads', settingsName)\n raise Exception('Could not locate settings file')", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def get_user_settings_dir():\n settings_dir = os.environ.get(\"JUPYTERLAB_SETTINGS_DIR\")\n settings_dir = settings_dir or pjoin(jupyter_config_dir(), \"lab\", \"user-settings\")\n return osp.abspath(settings_dir)", "def get_wallet_filepath():\n return wallet_filepath", "def get_default_config_path(cls) -> str:\n return os.path.join(os.path.expanduser(\"~\"), '.wallpaper_changer', 'config.ini')", "def settings_file_env_var(self) -> str:\n return self._build_env_var(self.prefix, self.settings_file_suffix)", "def settings_save_path(ctx):\n click.echo(ctx.obj['save_path'])", "def getPathFile():\n\t\n\treturn getConfDir()+'paths'", "def file_path():\n # Get path to LightWave's config folder, and join with the filename\n folder = lwsdk.LWDirInfoFunc(lwsdk.LWFTYPE_SETTING)\n file_path = os.path.join(folder, PRESETS_FILE)\n return file_path", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the current settings from Submitter UI to the sticky settings file.
def WriteStickySettings( gui ): global stickySettingWidgets, stickyWidgetSaveFunctions print( "Writing sticky settings..." ) configFile = GetStickySettingsFilePath() stickySettings = {} for setting, widgetName in stickySettingWidgets.iteritems(): try: widget = getattr( gui, widgetName ) stickySettings[setting] = stickyWidgetSaveFunctions[ type( widget ) ]( widget ) except AttributeError: print( traceback.format_exc() ) try: fileContents = json.dumps( stickySettings, encoding="utf-8" ) with io.open( configFile, "w", encoding="utf-8" ) as fileHandle: fileHandle.write( fileContents.decode("utf-8") ) except IOError: print( "Could not write sticky settings" ) print( traceback.format_exc() )
[ "def save_settings(self):\n logging.info(\"Saving settings.\")\n write_settings(self.settings)", "def save_settings(self):\n with open(self.settings_path, \"w\") as f:\n json.dump(self.settings, f, indent=4)", "def save_settings(self):\n settings = self.get_settings()\n json.dump(settings, self.config_file)", "def settings_save(self):\n save_msg = MsgSettingsSave()\n self.framer(save_msg)", "def save_settings(self):\n self._click_button('save_settings')", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def __writeSettingsToFile(self):\n \n fileName = \"settings.txt\"\n \n # Opening file and creating it if it doesn't exist yet\n with open(fileName, \"w+\") as file:\n\n # Writing the selected playlist to the file\n file.write(\"selectedPlaylist : \" + self.__selectedPlaylist)", "def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))", "def save_setting(self):\n if self.is_checked.get():\n if \"Email\" not in s.alert:\n s.updateAlert(\"Email\")\n s.updateEmail(self.email_addr_entry.get())\n if not self.is_checked.get():\n if \"Email\" in s.alert:\n s.deleteAlert(\"Email\")\n s.deleteEmail()\n # Check the refresh interval\n if self.is_minimize_to_system_tray.get():\n s.updateMinimize(\"True\")\n else:\n s.updateMinimize(\"False\")\n\n if self.is_launch_at_start_up.get():\n s.updateLaunchAtStartup(\"True\")\n become_persistent(__file__)\n else:\n s.updateLaunchAtStartup(\"False\")\n remove_startup()\n\n s.updateSetting(self.interval_entry.get())\n Tracker.save_state(Tracker.FILENAME, s)", "def save(self):\n\n # Try to find user settings file\n file_path = os.path.join(info.USER_PATH, self.settings_filename)\n\n # try to save data to file, will raise exception on failure\n self.write_to_file(file_path, self._data)", "def saveit(self):\n data = self.settings()\n print(data)\n\n try:\n with open('mutespeak.json', 'w') as fp:\n json.dump(data,fp)\n except IOError:\n print('Could not save settings')", "def saveSettings(self):\n self.userFiles.applyData()\n self.userPersonal.applyData()", "def write_setting_file(deduper, filename='settings'):\n settings_file = filename\n with open(settings_file, 'wb') as sf:\n deduper.writeSettings(sf)\n print(\"Setting file saved\")", "def _saveSettings(self):\n\t\tsettingsfile = path.join(self.workdir, 'proc.settings.yaml')\n\n\t\tprops = self.props.copy()\n\t\tprops.lock = None\n\t\tprops.template = props.template.__name__\n\t\tprops.expect = str(props.expect)\n\t\tprops.expart = [str(ep) for ep in props.expart]\n\t\tprops.depends = [repr(p) for p in self.depends]\n\t\tprops.script = str(props.script)\n\t\tprops.procvars = {}\n\t\tprops.output = OrderedDict([(key, str(val)) for key, val in props.output.items()])\n\t\tprops.to_yaml(filename = settingsfile)\n\n\t\tlogger.debug('Settings saved to: %s' % settingsfile, proc = self.id)", "def writeMeteorSettingsFile(self):\n\n # Construct a dict of all the server URIs by name,\n # taking into account that some of them might have an overridden baseURI\n serverURIs = {}\n for serverName, serverSettings in self.paths.meteor.items():\n if hasattr(serverSettings, \"baseURI\"):\n uri = serverSettings.baseURI + \":\" + serverSettings.webPort\n else:\n uri = serverName + \".\" + self.paths.globals.baseURI\n serverURIs[serverName] = uri\n\n settingsDict = {\n \"public\": {\n \"serverName\": self.serverName,\n \"baseURI\": self.paths.globals.baseURI,\n \"serverURIs\": serverURIs,\n \"configName\": self.paths.globals.configName\n },\n \"paths\": self.paths.toDict()\n }\n\n # We hang onto settingsFile so that it does not get gc'ed. When it does, it is deleted.\n self.settingsFile = tempfile.NamedTemporaryFile(mode=\"w\", prefix=\"meteor-settings-\", suffix=\".json\")\n json.dump(settingsDict, self.settingsFile, sort_keys=True, indent=4, separators=(',', ': '))\n self.settingsFile.flush()\n\n self.logger.info(\"Wrote meteor settings file to: \" + self.getMeteorSettingsFilePath())", "def _write_settings(data: dict) -> None:\n loaders.write(__settings_path, data, env=\"plbmng\")", "def save_settings(self, settings=None):\r\n if settings is None:\r\n settings = self.settings\r\n\r\n # general settings\r\n settings.setValue(\"geometry\", self.saveGeometry())\r\n settings.setValue(\"window_state\", self.saveState())\r\n\r\n # layout settings\r\n settings.beginGroup(\"layout\")\r\n settings.setValue(\"main_splitter\", self.main_splitter.saveState())\r\n settings.setValue(\"objects_splitter\", self.objects_splitter.saveState())\r\n settings.setValue(\"properties_splitter\", self.properties_splitter.saveState())\r\n settings.setValue(\"console_splitter\", self.console_splitter.saveState())\r\n settings.setValue(\"objects_hidden\", self.objects_group.isHidden())\r\n settings.setValue(\"properties_hidden\", self.properties_splitter.isHidden())\r\n settings.setValue(\"console_hidden\", self.console.isHidden())\r\n settings.setValue(\"time_slider_hidden\", self.time_slider_toolbar.isHidden())\r\n settings.setValue(\"viewer_hidden\", self.viewer_group.isHidden())\r\n settings.endGroup()\r\n\r\n # save viewer settings\r\n settings.beginGroup(\"viewer\")\r\n settings.setValue(\"geometry\", self.saveGeometry())\r\n\r\n # save timeslider settings\r\n settings.endGroup()", "def write_preferences_file(self):\n user_data_dir = find_pmag_dir.find_user_data_dir(\"thellier_gui\")\n if not os.path.exists(user_data_dir):\n find_pmag_dir.make_user_data_dir(user_data_dir)\n pref_file = os.path.join(user_data_dir, \"thellier_gui_preferences.json\")\n with open(pref_file, \"w+\") as pfile:\n print('-I- writing preferences to {}'.format(pref_file))\n json.dump(self.preferences, pfile)", "def save_settings(dic):\n json.dump(dic, open(\"resources/files/settings.txt\", \"w\"))\n\n # LEGACY\n # with open(\"resources/files/settings.txt\", \"w\", newline=\"\\n\") as w:\n # for sett, val in dic.items():\n # w.write(sett + '\\\\' + val + '\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads in settings from the sticky settings file, then update the UI with the new settings
def LoadStickySettings( gui ): global stickySettingWidgets, stickyWidgetLoadFunctions configFile = GetStickySettingsFilePath() print( "Reading sticky settings from: %s" % configFile ) stickySettings = None try: with io.open( configFile, "r", encoding="utf-8" ) as fileHandle: stickySettings = json.load( fileHandle, encoding="utf-8" ) except IOError: print( "No sticky settings found. Using default settings." ) except ValueError: print( "Invalid sticky settings. Using default settings." ) print( traceback.format_exc() ) except Exception: print( "Could not read sticky settings. Using default settings." ) print( traceback.format_exc() ) if stickySettings: for setting, value in stickySettings.iteritems(): widgetName = stickySettingWidgets.get(setting) if widgetName: try: widget = getattr(gui, widgetName) stickyWidgetLoadFunctions[ type( widget ) ]( widget, value ) except AttributeError: print( traceback.format_exc() )
[ "def update_settings(settings):", "def update_control_widgets(self):\n logger.info(f'Loading settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name, value in section.items():\n self.set_control_value(setting_name, value)", "def settings(self):\n \n app = MDApp.get_running_app()\n app.open_settings()\n self.dismiss()", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)", "def on_settings(self):\n\n # Pull the current app state from the relay Observer object\n status, interval, ntfc_status, ntfc_state = settings_state.get_state()\n\n # Pass it to the Observable object in order to render the Settings window\n settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(\n status, interval, ntfc_status, ntfc_state, settings_state)\n\n # Register any state changes\n settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)\n\n # If the interval has changed, reprogram scheduler to run at the new interval\n if settings_state.intrvl_change_trig:\n modify_scheduler(JOB_ID, settings_state.settings_interval)\n\n if settings_state.notification_change_trig:\n NewsIndicator.notifications = False if not settings_state.notification_state else True", "def on_settings_updated(self):\n\t\t\n\t\tpersist.debug(\"Settings Updated\", persist.settings.settings)\n\n\t\tself.reset()\n\t\tself.initOpen()", "def update_settings(self):\n self.server_settings.update()", "def update_settings(self):\n settings = {\n \"reference\": self,\n \"draw_tangents\": self.cbDrawTangents.isChecked(),\n }\n if self.cbShowSolarAngle.isChecked():\n settings[\"show_solar_angle\"] = self.cbSolarAngleType.currentText(), self.cbSolarBody.currentText()\n else:\n settings[\"show_solar_angle\"] = None\n\n self.view.set_remote_sensing_appearance(settings)", "def WriteStickySettings( gui ):\n global stickySettingWidgets, stickyWidgetSaveFunctions\n print( \"Writing sticky settings...\" )\n\n configFile = GetStickySettingsFilePath()\n\n stickySettings = {}\n\n for setting, widgetName in stickySettingWidgets.iteritems():\n try:\n widget = getattr( gui, widgetName )\n stickySettings[setting] = stickyWidgetSaveFunctions[ type( widget ) ]( widget )\n except AttributeError:\n print( traceback.format_exc() )\n\n try:\n fileContents = json.dumps( stickySettings, encoding=\"utf-8\" )\n with io.open( configFile, \"w\", encoding=\"utf-8\" ) as fileHandle:\n fileHandle.write( fileContents.decode(\"utf-8\") )\n except IOError:\n print( \"Could not write sticky settings\" )\n print( traceback.format_exc() )", "def load_settings(self):\n\n vartypes = {\n 'bool': tk.BooleanVar,\n 'str': tk.StringVar,\n 'int': tk.IntVar,\n 'float': tk.DoubleVar\n }\n\n # create our dict of settings variables from the model's settings.\n self.settings = {}\n for key, data in self.settings_model.variables.items():\n vartype = vartypes.get(data['type'], tk.StringVar)\n self.settings[key] = vartype(value=data['value'])\n\n # put a trace on the variables so they get stored when changed.\n for var in self.settings.values():\n var.trace('w', self.save_settings)", "def set_settings(self):\n #global SETTINGS\n columns = self.cols.get()\n rows = self.rows.get()\n start_player = self.first_to_move_options_list.get()\n start_piece_color = self.ulp.get()\n win_mode = self.win.get()\n temp_settings = othello.othello_settings(columns,rows,start_player,start_piece_color,win_mode)\n clean_settings(temp_settings)\n print('Settings: ',SETTINGS)\n self._root_window.quit()\n return", "def read_metview_settings(self, settings_file):\n import configparser\n\n cf = configparser.ConfigParser()\n cf.read(settings_file)\n env_section = cf[\"Environment\"]\n for envar in env_section:\n # print('set ', envar.upper(), ' = ', env_section[envar])\n os.environ[envar.upper()] = env_section[envar]\n self.info_section = cf[\"Info\"]", "def global_settings(self):\n self.settings_window = SettingsWindow(self.setup, self.options, parent=self)\n self.settings_window.show()\n self.settings_window.settingsUpdated.connect(lambda: self.update_globals())", "def restoreSettings(self):\n \n # Check for exsisting settings\n if len(self.settings.allKeys()) == 0:\n return\n \n # Widgets state and geometry\n self.settings.beginGroup(\"widgets\")\n geometry = self.settings.value(\"geometry\").toByteArray()\n splitter1_state = self.settings.value(\"splitter1_state\").toByteArray()\n splitter2_state = self.settings.value(\"splitter2_state\").toByteArray()\n window_state = self.settings.value(\"window_state\").toByteArray()\n self.restoreGeometry(geometry)\n self.restoreState(window_state, _app_version)\n self.ui.splitter.restoreState(splitter1_state)\n self.ui.splitter_2.restoreState(splitter2_state)\n self.settings.endGroup()\n \n # Recent files\n recents_data = self.settings.value(\"recent_file_names\")\n recents_file_names = recents_data.toStringList()\n for recent_file_name in recents_file_names:\n self.recent_file_names.append(str(recent_file_name))\n self._updateRecentsMenu()\n \n # User preferences\n self.settings.beginGroup(\"preferences\")\n annotations_active = self.settings.value(\"annotations\").toBool()\n autoindent = self.settings.value(\"autoindent\").toBool()\n autocomp_thresh = self.settings.value(\"autocomp_thresh\").toInt()[0]\n indent_width = self.settings.value(\"indent_width\").toInt()[0]\n indent_use_tabs = self.settings.value(\"indent_use_tabs\").toBool()\n self._auto_execution = self.settings.value(\"autoexecution\").toBool()\n self.code_check_delay = \\\n self.settings.value(\"code_check_delay\").toFloat()[0]\n self.settings.endGroup()\n \n self.editor.annotations_active = annotations_active\n self.editor.setAutoIndent(autoindent)\n self.editor.setAutoCompletionThreshold(autocomp_thresh)\n self.editor.setIndentationWidth(indent_width)\n self.editor.setIndentationsUseTabs(indent_use_tabs)\n \n # 3D View state\n self.settings.beginGroup(\"view_state\")\n persp = self.settings.value(\"perspective\").toBool()\n self.ui.glPreviewWidget.perspective = persp\n self.settings.endGroup()", "def display_settings(self):\n dialog = SettingsDialog(self)\n dialog.run()\n self.refresh()", "def system_settings(open_files_list: Any, config: Config) -> None:\r\n\r\n try:\r\n if open_files_list.selectedItems()[0].text() != 'No DAT files added yet':\r\n config.system_name = dat_details[open_files_list.selectedItems()[0].text()]['system_name']\r\n main_window.ui.labelSystemSettings.setText(f'These settings are only for <b>{config.system_name}</b>.')\r\n else:\r\n return\r\n except:\r\n return\r\n\r\n main_window.ui.listWidgetSystemAvailableLanguages.clear()\r\n main_window.ui.listWidgetSystemSelectedLanguages.clear()\r\n main_window.ui.listWidgetSystemAvailableRegions.clear()\r\n main_window.ui.listWidgetSystemSelectedRegions.clear()\r\n main_window.ui.listWidgetSystemVideoStandards.clear()\r\n\r\n select_checkboxes(system_exclude_checkboxes, False)\r\n select_checkboxes(system_options_checkboxes, False)\r\n\r\n main_window.ui.lineEditSystemOptions1G1RPrefix.clear()\r\n main_window.ui.lineEditSystemOptions1G1RSuffix.clear()\r\n main_window.ui.lineEditSystemOptionsTrace.clear()\r\n\r\n main_window.ui.frameSystemOptions1G1RPrefix.hide()\r\n main_window.ui.frameSystemOptionsTrace.hide()\r\n\r\n # Enable the system settings\r\n main_window.ui.tabWidgetSystemSettings.setEnabled(True)\r\n\r\n # Create the system config file if it's missing\r\n if not pathlib.Path(f'{config.system_settings_path}/{config.system_name}.yaml').is_file():\r\n try:\r\n with open(pathlib.Path(f'{config.system_settings_path}/template.yaml'), 'r', encoding='utf-8') as template_file:\r\n template_str: list[str] = template_file.readlines()\r\n with open(pathlib.Path(f'{config.system_settings_path}/{config.system_name}.yaml'), 'w', encoding='utf-8') as system_config_file:\r\n system_config_file.writelines(template_str)\r\n except OSError as e:\r\n eprint(f'\\n{Font.error_bold}* Error: {Font.end}{str(e)}\\n')\r\n raise\r\n\r\n # Pull the system settings\r\n import_system_settings(\r\n config,\r\n config.system_name,\r\n SYSTEM_LANGUAGE_ORDER_KEY,\r\n SYSTEM_REGION_ORDER_KEY,\r\n SYSTEM_VIDEO_ORDER_KEY,\r\n SYSTEM_LIST_PREFIX_KEY,\r\n SYSTEM_LIST_SUFFIX_KEY,\r\n SYSTEM_OVERRIDE_EXCLUDE_KEY,\r\n SYSTEM_OVERRIDE_INCLUDE_KEY,\r\n SYSTEM_FILTER_KEY,\r\n SYSTEM_EXCLUSIONS_OPTIONS_KEY)\r\n\r\n # Set the system paths UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_user_path_settings:\r\n main_window.ui.checkBoxSystemOverridePaths.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverridePaths.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverridePaths,\r\n [\r\n main_window.ui.buttonChooseSystemOutput,\r\n main_window.ui.labelSelectSystemOutput,\r\n main_window.ui.labelSystemOutputFolder,\r\n main_window.ui.buttonChooseSystemCloneList,\r\n main_window.ui.labelSelectSystemCloneList,\r\n main_window.ui.labelSystemCloneList,\r\n main_window.ui.buttonChooseSystemMetadataFile,\r\n main_window.ui.labelSelectSystemMetadataFile,\r\n main_window.ui.labelSystemMetadataFile,\r\n main_window.ui.buttonClearSystemCloneList,\r\n main_window.ui.buttonClearSystemMetadataFile,\r\n main_window.ui.buttonClearSystemOutput\r\n ]\r\n )\r\n\r\n # Populate the paths\r\n if config.system_output:\r\n main_window.ui.labelSystemOutputFolder.setText(config.system_output)\r\n main_window.system_output_folder = str(config.system_output)\r\n else:\r\n main_window.ui.labelSystemOutputFolder.setText(qtc.QCoreApplication.translate('MainWindow', output_not_found, None)) # type: ignore\r\n main_window.system_output_folder = ''\r\n\r\n if config.system_clone_list:\r\n main_window.ui.labelSystemCloneList.setText(config.system_clone_list)\r\n main_window.system_clone_list = str(config.system_clone_list)\r\n else:\r\n main_window.ui.labelSystemCloneList.setText(qtc.QCoreApplication.translate('MainWindow', clone_list_not_found, None)) # type: ignore\r\n main_window.system_clone_list = ''\r\n\r\n if config.system_metadata_file:\r\n main_window.ui.labelSystemMetadataFile.setText(config.system_metadata_file)\r\n main_window.system_metadata_file = str(config.system_metadata_file)\r\n else:\r\n main_window.ui.labelSystemMetadataFile.setText(qtc.QCoreApplication.translate('MainWindow', metadata_file_not_found, None)) # type: ignore\r\n main_window.system_metadata_file = ''\r\n\r\n # Set the system regions UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_region_order_user:\r\n main_window.ui.checkBoxSystemOverrideRegions.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideRegions.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideRegions,\r\n [\r\n main_window.ui.buttonSystemRegionAllLeft,\r\n main_window.ui.buttonSystemRegionAllRight,\r\n main_window.ui.buttonSystemRegionDown,\r\n main_window.ui.buttonSystemRegionLeft,\r\n main_window.ui.buttonSystemRegionRight,\r\n main_window.ui.buttonSystemRegionUp,\r\n main_window.ui.listWidgetSystemAvailableRegions,\r\n main_window.ui.listWidgetSystemSelectedRegions,\r\n main_window.ui.buttonSystemDefaultRegionOrder\r\n ])\r\n\r\n # Populate the system regions\r\n region_order_user: list[str] = [x for x in config.region_order_user if x != 'United Kingdom']\r\n region_order_default: list[str] = [x for x in config.region_order_default if x != 'United Kingdom']\r\n config.system_region_order_user = [x for x in config.system_region_order_user if x != 'United Kingdom']\r\n\r\n if config.system_region_order_user:\r\n main_window.ui.listWidgetSystemSelectedRegions.addItems([str(x) for x in config.system_region_order_user if x != {'override': 'true'} and x != {'override': 'false'}])\r\n main_window.ui.listWidgetSystemAvailableRegions.addItems([x for x in region_order_default if x not in config.system_region_order_user])\r\n else:\r\n main_window.ui.checkBoxSystemOverrideRegions.setChecked(False)\r\n main_window.ui.listWidgetSystemSelectedRegions.addItems([x for x in region_order_user])\r\n main_window.ui.listWidgetSystemAvailableRegions.addItems([x for x in region_order_default if x not in region_order_user])\r\n\r\n # Populate the system languages\r\n system_languages_user: list[str] = []\r\n\r\n # Add languages to the languages lists\r\n if config.system_languages_user_found:\r\n for languages in config.system_language_order_user:\r\n for key, value in config.languages.items():\r\n if languages == value:\r\n system_languages_user.append(key)\r\n\r\n main_window.ui.listWidgetSystemSelectedLanguages.addItems(system_languages_user)\r\n main_window.ui.listWidgetSystemAvailableLanguages.addItems(sorted([x for x in config.languages if x not in system_languages_user]))\r\n else:\r\n main_window.ui.checkBoxSystemOverrideLanguages.setChecked(False)\r\n\r\n main_window.ui.listWidgetSystemAvailableLanguages.addItems(sorted([x for x in config.languages]))\r\n\r\n # Set the system languages UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_language_order_user:\r\n main_window.ui.checkBoxSystemOverrideLanguages.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideLanguages.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideLanguages,\r\n [\r\n main_window.ui.buttonSystemLanguageAllLeft,\r\n main_window.ui.buttonSystemLanguageAllRight,\r\n main_window.ui.buttonSystemLanguageDown,\r\n main_window.ui.buttonSystemLanguageLeft,\r\n main_window.ui.buttonSystemLanguageRight,\r\n main_window.ui.buttonSystemLanguageUp,\r\n main_window.ui.listWidgetSystemAvailableLanguages,\r\n main_window.ui.listWidgetSystemSelectedLanguages,\r\n ])\r\n\r\n # Set the system video standards UI enabled/disabled depending on override state\r\n if {'override': 'true'} in config.system_video_order_user:\r\n main_window.ui.checkBoxSystemOverrideVideo.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideVideo.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideVideo,\r\n [\r\n main_window.ui.buttonSystemVideoStandardDown,\r\n main_window.ui.buttonSystemVideoStandardUp,\r\n main_window.ui.listWidgetSystemVideoStandards\r\n ])\r\n\r\n # Populate the system video standards\r\n if config.system_video_order_user:\r\n main_window.ui.listWidgetSystemVideoStandards.addItems([str(x) for x in config.system_video_order_user if x != {'override': 'true'} and x != {'override': 'false'}])\r\n else:\r\n main_window.ui.listWidgetSystemVideoStandards.setEnabled(False)\r\n main_window.ui.listWidgetSystemVideoStandards.addItems([x for x in config.video_order_default])\r\n\r\n # Set the system exclusions and options UI enabled/disabled depending on override state\r\n if {'override exclusions': 'true'} in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOverrideExclusions.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideExclusions.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideExclusions,\r\n system_exclude_checkboxes\r\n + [\r\n main_window.ui.buttonSystemDeselectAllExclude,\r\n main_window.ui.buttonSystemSelectAllExclude\r\n ]\r\n )\r\n\r\n if {'override options': 'true'} in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOverrideOptions.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverrideOptions.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverrideOptions,\r\n [main_window.ui.scrollAreaSystemOptions]\r\n )\r\n\r\n # Populate exclusions and options\r\n if config.system_exclusions_options:\r\n if 'r' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsPreferRegions.setChecked(True)\r\n if 'e' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsIncludeHashless.setChecked(True)\r\n if 'z' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsModernPlatforms.setChecked(True)\r\n if 'y' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsDemoteUnlicensed.setChecked(True)\r\n if 'nooverrides' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsDisableOverrides.setChecked(True)\r\n if 'removesdat' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsRemovesDat.setChecked(True)\r\n if 'log' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsKeepRemove.setChecked(True)\r\n if 'originalheader' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsOriginalHeader.setChecked(True)\r\n if 'warnings' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsReportWarnings.setChecked(True)\r\n if 'warningpause' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsPauseWarnings.setChecked(True)\r\n if 'nodtd' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsBypassDTD.setChecked(True)\r\n if 'singlecpu' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptionsDisableMultiCPU.setChecked(True)\r\n # Show the associated lineEdit later, as it takes a while for the checkbox to be enabled\r\n if 'listnames' in config.system_exclusions_options: main_window.ui.checkBoxSystemOptions1G1RNames.setChecked(True)\r\n if 'd' in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOptionsDisable1G1R.setChecked(True)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setEnabled(False)\r\n if 'legacy' in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOptionsLegacy.setChecked(True)\r\n main_window.ui.checkBoxSystemOptionsSplitRegions.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsSplitRegions.setEnabled(False)\r\n main_window.ui.checkBoxSystemOptionsDisable1G1R.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsDisable1G1R.setEnabled(False)\r\n if 'regionsplit' in config.system_exclusions_options:\r\n main_window.ui.checkBoxSystemOptionsSplitRegions.setChecked(True)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setChecked(False)\r\n main_window.ui.checkBoxSystemOptionsLegacy.setEnabled(False)\r\n\r\n system_excludes = [x for x in config.system_exclusions_options if 'exclude' in x and x != {'exclude': ''}]\r\n\r\n if system_excludes:\r\n system_exclude = system_excludes[0]['exclude']\r\n if 'a' in system_exclude: main_window.ui.checkBoxSystemExcludeApplications.setChecked(True)\r\n if 'A' in system_exclude: main_window.ui.checkBoxSystemExcludeAudio.setChecked(True)\r\n if 'b' in system_exclude: main_window.ui.checkBoxSystemExcludeBadDumps.setChecked(True)\r\n if 'B' in system_exclude: main_window.ui.checkBoxSystemExcludeBIOS.setChecked(True)\r\n if 'c' in system_exclude: main_window.ui.checkBoxSystemExcludeCoverdiscs.setChecked(True)\r\n if 'D' in system_exclude: main_window.ui.checkBoxSystemExcludeAddOns.setChecked(True)\r\n if 'd' in system_exclude: main_window.ui.checkBoxSystemExcludeDemos.setChecked(True)\r\n if 'e' in system_exclude: main_window.ui.checkBoxSystemExcludeEducational.setChecked(True)\r\n if 'g' in system_exclude: main_window.ui.checkBoxSystemExcludeGames.setChecked(True)\r\n if 'k' in system_exclude: main_window.ui.checkBoxSystemExcludeMIA.setChecked(True)\r\n if 'm' in system_exclude: main_window.ui.checkBoxSystemExcludeManuals.setChecked(True)\r\n if 'M' in system_exclude: main_window.ui.checkBoxSystemExcludeMultimedia.setChecked(True)\r\n if 'o' in system_exclude: main_window.ui.checkBoxSystemExcludeBonusDiscs.setChecked(True)\r\n if 'p' in system_exclude: main_window.ui.checkBoxSystemExcludePirate.setChecked(True)\r\n if 'P' in system_exclude: main_window.ui.checkBoxSystemExcludePreproduction.setChecked(True)\r\n if 'r' in system_exclude: main_window.ui.checkBoxSystemExcludePromotional.setChecked(True)\r\n if 'u' in system_exclude: main_window.ui.checkBoxSystemExcludeUnlicensed.setChecked(True)\r\n if 'v' in system_exclude: main_window.ui.checkBoxSystemExcludeVideo.setChecked(True)\r\n\r\n if config.system_user_prefix: main_window.ui.lineEditSystemOptions1G1RPrefix.setText(config.system_user_prefix)\r\n if config.system_user_suffix: main_window.ui.lineEditSystemOptions1G1RSuffix.setText(config.system_user_suffix)\r\n\r\n system_trace = [x for x in config.system_exclusions_options if 'trace' in x]\r\n\r\n if system_trace:\r\n # Show the associated lineEdit later, as it takes a while for the checkbox to be enabled\r\n system_trace_str = system_trace[0]['trace']\r\n main_window.ui.checkBoxSystemOptionsTrace.setChecked(True)\r\n main_window.ui.lineEditSystemOptionsTrace.setText(system_trace_str)\r\n\r\n if config.system_exclude:\r\n main_window.ui.textEditSystemExclude.setText('\\n'.join(config.system_exclude))\r\n else:\r\n main_window.ui.textEditSystemExclude.clear()\r\n\r\n if config.system_include:\r\n main_window.ui.textEditSystemInclude.setText('\\n'.join(config.system_include))\r\n else:\r\n main_window.ui.textEditSystemInclude.clear()\r\n\r\n if config.system_filter:\r\n main_window.ui.textEditSystemFilterInclude.setText('\\n'.join([str(x) for x in config.system_filter if x != {'override': 'true'} and x != {'override': 'false'}]))\r\n else:\r\n main_window.ui.textEditSystemFilterInclude.clear()\r\n\r\n # Show lineEdits for certain options if checked\r\n show_hide(main_window.ui.checkBoxSystemOptions1G1RNames, main_window.ui.frameSystemOptions1G1RPrefix)\r\n show_hide(main_window.ui.checkBoxSystemOptionsTrace, main_window.ui.frameSystemOptionsTrace)\r\n\r\n # Set the post filters UI enabled/disabled depending on override state\r\n if config.system_filter:\r\n if {'override': 'true'} in config.system_filter:\r\n main_window.ui.checkBoxSystemOverridePostFilter.setChecked(True)\r\n else:\r\n main_window.ui.checkBoxSystemOverridePostFilter.setChecked(False)\r\n\r\n system_enable(\r\n main_window.ui.checkBoxSystemOverridePostFilter,\r\n [\r\n main_window.ui.textEditSystemFilterInclude\r\n ])\r\n\r\n # Populate the post filters\r\n if config.system_filter:\r\n main_window.ui.textEditSystemFilterInclude.setText('\\n'.join([str(x) for x in config.system_filter if x != {'override': 'true'} and x != {'override': 'false'}]))\r\n else:\r\n main_window.ui.textEditSystemFilterInclude.clear()", "def loadSettings(self):\n _setDict = self.bba.get_settings() #gets dict\n #Write to GUI\n try:\n #Nullpunkt\n _val = int(_setDict['nullpunkt'])\n self.sui.Nullhoehe.setValue(_val)\n #Flammenmitte\n _val = int(_setDict['flammenmitte'])\n self.sui.Flammenmitte.setValue(_val)\n #Grad zwischen den Bildern\n _val = int(_setDict['gradprobild'])\n self.sui.GradZwischenBildern.setValue(_val)\n #Aufloesung\n _val = float(_setDict['aufloesung'])\n self.sui.Aufloesung.setValue(_val)\n #Workspace\n _val = _setDict['workspace']\n self.sui.WorkspaceShow_label.setText(_val)\n except:\n logging.ERROR('Settings konnten nicht gesetzt werden')", "def load(self):\n if not os.path.exists(self.settings_file):\n print('No settings file found. Creating settings file.')\n self.save()\n try:\n with open(self.settings_file, 'r') as fp:\n d = json.load(fp)\n d = {k: d[k] for k in d if d[k] is not None}\n self.d.update(d)\n except Exception as e:\n msg = \"Failed to load settings file. {}\\nDefault settings restored.\".format(e)\n logger.info(msg)\n print(msg)\n self.save()\n self.d['mousemode'] = 'rectangle' # don't change initial mousemode\n self._load_user_information()", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a url patternesque string into a path, given a context dict, and splits the result.
def pathify(urlpattern, **context): repl = lambda match: context[match.group(1)] path = re.sub(r':([a-z]+)', repl, urlpattern) return tuple(path[1:].split('/'))
[ "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def splitpath(self):\n \n pass", "def urlToPath(url):", "def resolveContext(self, context):\n if context is None:\n return context\n elif isinstance(context, tuple):\n return context\n elif isinstance(context, tuple):\n return tuple(context.split('/'))\n else:\n return context.getPhysicalPath()", "def from_path(path_parts, absolute=None, filter_chars=False):\n if filter_chars:\n path_parts = [Url.slugify(part) for part in path_parts]\n url = '/'.join(path_parts)\n url = '/' + url\n\n if absolute:\n url = absolute + url\n return url", "def composeURL(self,splitedURL):\n # 027 With use of SmartURL won't be necessary anymore.\n # 027 was used only in LinklistAdaptor.parse2Queue().parseLine() -> removed (Which actually might jeopardize cll).\n # 027 So actually is NOT used anywhere.\n \n #Could be replaced by string.join() method.\n #Also could be merged with method composePath().\n #Create child of list class with this method. \n \n self.debug.printHeader() \n url=''\n if len(splitedURL)>0:\n for piece in splitedURL:\n if not(piece==splitedURL[0]): url+='/'\n url+=piece\n self.logger.debug(\"Composed url is: %s\" %(url))\n return url\n #return \"/\".join(splitedURL) #026 This will do the same job. But needs to be tested.", "def _parse_path_segment(acc, segment):\n match = _token_re.match(segment)\n if match is not None:\n # XXX: Is it a problem that we're using strings for parameters and\n # strings for literal parts?\n t, token = match.groups()\n # Identifiers are more specific than wildcards.\n constraint, priority = (u'(.*)', 2) if t == u'*' else (u'([^/]+)', 3)\n return acc.transform(\n ['priority'], lambda x: x + priority,\n ['parts'], lambda x: x.append(token),\n ['params'], lambda x: x.append(token),\n ['constraints'], lambda x: x.set(token, constraint))\n else:\n return acc.transform(\n ['priority'], inc,\n ['parts'], lambda x: x.append(segment))", "def url_path_helper(*parts):\n new_parts = []\n for p in parts:\n if hasattr(p, \"__iter__\"):\n #This part is a sequence itself, recurse into it\n p = path_join(*p, **{'sep': \"/\"})\n p = p.strip(\"/\")\n if p in (\"\", \"\\\\\", \"/\"):\n continue\n new_parts.append(p)\n\n if len(new_parts) > 0:\n return \"/\".join(new_parts)\n else:\n return \"/\"", "def split_model_instance_string(self, model_instance):\r\n self.validate_model_instance_string(model_instance)\r\n urls = model_instance.split('/')\r\n return urls[0], urls[1], urls[2], urls[3]", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def context_to_path_string(context: List[str]) -> str:\n if len(context) == 0:\n return \"\"\n elif len(context) == 1:\n return context[0]\n else:\n return f'{context[0]}.{SpreadsheetGenerator.context_to_path_string(context[1:])}'", "def _parse_path(path, parsed=_ParsedRoutePath()):\n return reduce(_parse_path_segment, path, parsed)", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def split_url(url: str):\n if url.endswith(\".git\"):\n url = url[:-4]\n return url[url.find(\"://\") + 3:].split(\"/\")", "def explode(part):\n if isinstance(part, str):\n ans = []\n while len(part) > 0:\n parts = part.partition(\"/\")\n ans.append(parts[0])\n if parts[1] != \"\":\n ans.append(SLASH)\n part = parts[2]\n return ans\n\n return [part]", "def quote_paths(url):\n\n try:\n\n url = py2_enc(url)\n\n if url.startswith('http'):\n\n parsed = urlparse(url)\n processed_path = '/'.join([quote(i) for i in parsed.path.split('/')])\n url = urlunparse(parsed._replace(path=processed_path))\n\n return url\n\n else:\n\n path = '/'.join([quote(i) for i in url.split('/')])\n return path\n\n except Exception:\n\n return url", "def split_es_url(url):\n\n tokens = url.split('/')\n return tokens", "def test_split_fullpath_with_route_domain():\n\n # Expected input must have route specified, otherwise reject\n tests = [\n [\"/Partition/1.2.3.4%0:80\", \"/Partition\", \"1.2.3.4\", 0, 80],\n [\"/Part/Folder/1.2.3.4%1:443\", \"/Part/Folder\", \"1.2.3.4\", 1, 443],\n [\"/Part/::ffff:0:0%2.8080\", \"/Part\", \"::ffff:0:0\", 2, 8080],\n [\"/Part/1.2.3.4:8080\", None, None, None, None],\n [\"/Part/::ffff:0:0.8080\", None, None, None, None]\n ]\n\n for test in tests:\n results = split_fullpath_with_route_domain(test[0])\n assert results[0] == test[1]\n assert results[1] == test[2]\n assert results[2] == test[3]\n assert results[3] == test[4]", "def _get_url(self) -> str:\n if not hasattr(self.template, 'URL'):\n return None\n # Process matched tokens (i.e. named regex capture groups)\n URL = \"\"\"\"\"\"\n for part in self.template.URL:\n for key, value in self.processed_tokens.items():\n if value is None: continue\n part = part.replace('{%s}' % key, value)\n missing_value = re.search('\\{.+\\}', part)\n if not missing_value:\n URL += part\n return URL" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
init cluster_temp for all the center point
def __initCluster(self): data_size, cluster_center = self.data_size, self.cluster_center self.cluster_temp = np.zeros(data_size, dtype=int) self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float) for center in cluster_center: self.cluster_temp[center] = center
[ "def initClusters(self):\n print 'Initializing Cluster Centers'\n numFeatureAddedForCluster = [0]*self.k\n # initialize numclass of -out of-k cluster to be the center of the full sketches\n if self.numclass <= self.k:\n for fidx in self.fullIndex:\n fclass = self.classid[fidx]\n if fclass < self.k:\n # add each full sketch to the corresponding cluster, then divide\n self.clusterCenters[fclass] = map(operator.add,\n self.clusterCenters[fclass],\n self.features[fidx])\n numFeatureAddedForCluster[fclass] += 1\n\n for clusterCenterIdx in range(self.numclass):\n self.clusterCenters[clusterCenterIdx] = [cfloat/numFeatureAddedForCluster[clusterCenterIdx] for cfloat in self.clusterCenters[clusterCenterIdx]]\n\n # for the remaining cluster centers, randomly select from the non-selected features\n numClustSelected = self.numclass\n while numClustSelected < self.k:\n featIdx = randint(0, len(self.features))\n if not self.isFull[featIdx]:\n self.clusterCenters[numClustSelected] = self.features[featIdx]\n numClustSelected += 1", "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def reset_vars(self, k):\n \n # Keep track of which centroid this point is closest to \n # The index represent the point. The val of index represent\n # which centroid it corresponds to \n # Initially all points are assigned to centroid 1 \n self.k = k \n self.clusters = np.array([[float(\"inf\"), 0, 0]]*self.data.shape[0], dtype=object)\n self.prev_centroids = np.array([])", "def initialize(self):\n k = self.k\n df = self.train_df\n rand_centroids = []\n\n for label in range(int(k)):\n rand_location = []\n for column in df:\n rand_location.append(random.choice(df[column]))\n\n # assign new centroid\n rand_centroid = Centroid()\n rand_centroid.label = label\n rand_centroid.location = rand_location\n\n # add the centroid to list of random centroids\n rand_centroids.append(rand_centroid)\n\n self.centroids = rand_centroids", "def __init__(self):\n ## self.clusters[cluster] = list of coordinates\n self.clusters = {}\n ## self.centroids[cluster] = centroid\n self.centroids = {}", "def assign_points(self):\n self.init_clusters()\n index = None\n for k in range(len(self.points)):\n min_cluster_dist = sys.maxsize\n for j in range(len(self.clusters)):\n curr_dist = math.dist(self.points[k], self.clusters[j].center)\n if curr_dist < min_cluster_dist:\n min_cluster_dist = curr_dist\n index = j\n self.clusters[index].c_points.append(self.points[k])\n self.set_radius()", "def _init_centroid(self, data: np.ndarray):\r\n if self.init_pp:\r\n np.random.seed(self.seed)\r\n centroid = [int(np.random.uniform() * len(data))]\r\n for _ in range(1, self.n_cluster):\r\n dist = []\r\n dist = [min([np.inner(data[c] - x, data[c] - x) for c in centroid])\r\n for i, x in enumerate(data)]\r\n dist = np.array(dist)\r\n dist = dist / dist.sum()\r\n cumdist = np.cumsum(dist)\r\n\r\n prob = np.random.rand()\r\n for i, c in enumerate(cumdist):\r\n if prob > c and i not in centroid:\r\n centroid.append(i)\r\n break\r\n centroid = np.array([data[c] for c in centroid])\r\n else:\r\n np.random.seed(self.seed)\r\n idx = np.random.choice(range(len(data)), size=(self.n_cluster))\r\n centroid = data[idx]\r\n # print(centroid)\r\n return centroid", "def updateCenter(self):\n self.centers = []\n for i in xrange(self.K):\n point_sum = np.zeros((1,self.feat_dimension+1))\n for j in self.clusters[i]:\n vec = self.__getSampleVec(self.samples[j])\n point_sum += vec\n point_sum /= len(self.clusters[i])\n self.centers.append(point_sum)", "def _initial_clusters(self):\n clusters = []\n for i in range(self.point_count):\n clusters.append(self._create_cluster_from_index(i))\n return clusters", "def improve_centers(self):\n kmeans= KMeans(n_clusters=len(self.neurons), init=np.vstack([n.c for n in self.neurons]), n_init= 1, max_iter= 30, n_jobs= 1)\n kmeans.fit(self.X)\n for i in range(len(self.neurons)):\n self.neurons[i].c= kmeans.cluster_centers_[i]", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def random_init(self, train_data):\n\n centroids=np.zeros((self.n_clusters_, train_data.shape[1]))\n for c in range(self.n_clusters_):\n for f in range(train_data.shape[1]):\n centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))\n\n return centroids", "def kmeans_intialize_centroids(k, n, data, T):\r\n # cast to a list to be fed to a c extension\r\n sp_initial = kpp.kmeans_pp(k, n, T).astype(int).tolist()\r\n\r\n # cast to a list to be fed to a c extension\r\n km_initial = kpp.kmeans_pp(k, n, data).astype(int).tolist()\r\n\r\n return sp_initial, km_initial", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]", "def initialize_centroids(self, dataframe) :\n self.centroids = random.sample(dataframe.to_numpy().tolist(), self.get_n_clusters())", "def compute_new_centroid(self):\n \n # Update previous centroids\n self.prev_centroids = self.centroids\n \n # Get how many clusters there are \n k = self.k\n \n # Generate new sets of centroids\n self.centroids = [0]*k\n \n # Total \n for elm in range(k):\n # Traverse through clusters\n #all_elm = np.array([np.array(xi[1]) for i, xi in enumerate(self.clusters) if self.clusters[i, 2] == elm])\n all_elm = np.extract(self.clusters[:,2] == elm, self.clusters[:,1])\n \n # Total number of elements in this cluster elm\n total = all_elm.shape[0]\n \n # If the cluster is empty, do not compute 0 \n if total == 0:\n # Grab the old cluster, and use that\n self.centroids[elm] = [np.array(self.prev_centroids[elm][0]), 0]\n continue\n \n # The new centroid point for this cluster\n new_point = np.mean(all_elm, axis=0)\n \n # Append this new centroid data to a temp holder\n self.centroids[elm] = [new_point, total]\n\n \n # Convert list into np.array\n self.prev_centroids = np.array(self.prev_centroids)\n self.centroids = np.array(self.centroids)", "def initialize_centroids(X, k):\n m,features = X.shape\n initial_centroids = np.random.choice(range(0,m),k,replace=False)\n centroids = X[initial_centroids]\n cluster_index = np.array(range(0,k))\n \n return centroids, cluster_index", "def __initialise_smart(self, X, args):\n\t\tcentroids = np.zeros((self.K,self.D))\n\t\tif X.shape[0] > 10*self.K:\n\t\t\tdata = X[:10*self.K,:]\n\t\telse:\n\t\t\tdata = X\n\t\tN = data.shape[0]\n\n\t\t\t#choosing centroids\n\t\t\t#points are chosen from dataset with farhtest point clustering\n\t\tran_index = np.random.choice(N)\n\t\tcentroids[0,:] = data[ran_index]\n\n\t\tfor k in range(1,self.K):\n\t\t\tdistances = np.zeros((N,k)) #(N,K)\n\t\t\tfor k_prime in range(k):\n\t\t\t\tdistances[:,k_prime] = np.sum(np.square(data - centroids[k_prime,:]), axis =1) #(N,K')\n\t\t\tdistances = np.min(distances, axis = 1) #(N,)\n\t\t\tdistances /= np.sum(distances) #normalizing distances to make it a prob vector\n\t\t\tnext_cl_arg = np.random.choice(range(data.shape[0]), p = distances) #chosen argument for the next cluster center\n\t\t\tcentroids[k,:] = data[next_cl_arg,:]\n\n\t\tvar = np.var(X, axis = 0) #(D,)\n\n\t\t\t#computing initial responsibilities\n\t\tr_0 = np.zeros((X.shape[0],self.K))\n\t\tfor k in range(self.K):\n\t\t\tr_0[:,k] = np.sum(np.divide(np.square(X - centroids[k,:]), var), axis = 1) + 1e-5\n\t\tr_0 = np.divide(r_0.T, np.sum(r_0,axis=1)).T\n\n\t\tself.gating.fit(X,r_0, *args)\n\n\t\treturn r_0", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate the delta of each vector save the delta point as master
def calculate_delta(self): rho_des_index, distance, data_size = self.rho_des_index, self.distance, self.data_size self.result[rho_des_index[0]][1] = -1 for i in range(1, data_size): for j in range(0, i): old_i, old_j = rho_des_index[i], rho_des_index[j] min_pos, max_pos = min(old_j, old_i), max(old_j, old_i) if distance[(min_pos, max_pos)] < self.result[old_i][1]: self.result[old_i][1] = distance[(min_pos, max_pos)] self.master[old_i] = old_j self.result[rho_des_index[0]][1] = max(self.result[:, 1])
[ "def delta(vector):\r\n if (len(vector) <= 1):\r\n return 0\r\n \r\n else:\r\n result_vector = []\r\n for index in range(len(vector)-1):\r\n result_vector.append(vector[index+1] - vector[index])\r\n return result_vector", "def calc_dDelta(self):\n self.dDelta = np.zeros((self.nphon, self.nwann, self.nwann))\n #iq, iv, iR\n iR0 = iRlist[(0, 0, 0)]\n iq0 = iqlist[(0, 0, 0)]\n\n for iv in range(self.nphon):\n for ik, k in enumerate(self.klist):\n self.dDelta[iv] += self.kweight[ik] * (\n self.EPCmat_wann_up[iq0, iv, ik, :, :] -\n self.EPCmat_wann_dn[iq0, iv, ik, :, :])", "def calc_deltaE(self):\n\n if 'vdw_unbound' not in self.columns and 'coul_unbound' not in self.columns:\n logger.error('No unbound energy values for vdw and coul in dataframe')\n return\n\n poses = self.poses\n for pose in poses:\n self['vdw_{0}'.format(pose)] = self['vdw_bound_{0}'.format(pose)].sub(self['vdw_unbound'], axis=0)\n self['coul_{0}'.format(pose)] = self['coul_bound_{0}'.format(pose)].sub(self['coul_unbound'], axis=0)\n\n logger.debug(\"Case {0} calculate delta-delta energy for {0} poses\".format(self.cases[0], len(poses)))", "def calc_delta(self):\n portfolio = self.get_portfolio()\n spot_delta = 0\n mark_delta = 0\n for symbol in portfolio:\n item = portfolio[symbol]\n if item['futureType'] == \"Quanto\":\n spot_delta += item['currentQty'] * item['multiplier'] * item['spot']\n mark_delta += item['currentQty'] * item['multiplier'] * item['markPrice']\n elif item['futureType'] == \"Inverse\":\n spot_delta += (item['multiplier'] / item['spot']) * item['currentQty']\n mark_delta += (item['multiplier'] / item['markPrice']) * item['currentQty']\n elif item['futureType'] == \"Linear\":\n spot_delta += item['multiplier'] * item['currentQty']\n mark_delta += item['multiplier'] * item['currentQty']\n basis_delta = mark_delta - spot_delta\n delta = {\n \"spot\": spot_delta,\n \"mark_price\": mark_delta,\n \"basis\": basis_delta\n }\n return delta", "def _get_vector_data(self) -> Tuple[List, List, List, List]:\n var1_values, var2_values = self._get_variable_values().values()\n var1_diffs = diff(var1_values)\n var2_diffs = diff(var2_values)\n\n return var1_values[:-1], var2_values[:-1], var1_diffs, var2_diffs", "def get_delta(data):\n d_array=np.append([0],np.diff(data))\n return d_array", "def _computeDeltaAndDeltaPrime(self):\r\n k = np.arange(self.n)\r\n self.Delta = lambda s: s**self.n + (s**k).dot(self.a) \\\r\n + np.exp(-s*self.tau) * (s**k).dot(self.alpha)\r\n self.DeltaPrime = lambda s: self.n*s**(self.n-1)\\\r\n + (k[1:]*s**k[:-1]).dot(self.a[1:])\\\r\n + np.exp(-s*self.tau)\\\r\n * ((k[1:]*s**(k[:-1])).dot(self.alpha[1:]) \\\r\n - self.tau*(s**k).dot(self.alpha))", "def compute_gradient_from_deltas(self, delta_vectors):\n gradient_w = np.empty((len(delta_vectors)), dtype=object)\n\n for l in range(1, len(self.layers)):\n tmpL = np.empty((len(self.layers[l].neurons[:-1]),len(self.layers[l-1].neurons)), dtype=float)\n for n in range(len(self.layers[l].neurons[:-1])): # exclude bias neuron\n neuron_input = np.array([neuron.getOutput() for neuron in self.layers[l-1].neurons], dtype=float)\n tmpL[n,:] = neuron_input * delta_vectors[l-1][n]\n gradient_w[l-1] = tmpL\n return gradient_w", "def newDelta(delta, N):\n\n\tu,v = np.zeros((N,N)), np.zeros((N,N))\n\n\tenergies, vectors = eigh(Hamiltonian(delta, N))\n\tvectors = np.transpose(vectors)\n\n\tfor i in range(N):\n\t\tu[i] = vectors[i, ::2]\n\t\tv[i] = vectors[i, 1::2]\t\n\n\tdelta=0\n\tfor i in range(N):\n\t\tfor j in range(N):\n\t\t\tdelta += u[i,j]*v[i,j]\n\t\n\tg = 2*0.5/N\n\n\treturn -g* delta, energies", "def Vdiff(D1,D2):\n A=dir2cart([D1[0],D1[1],1.])\n B=dir2cart([D2[0],D2[1],1.])\n C=[]\n for i in range(3):\n C.append(A[i]-B[i])\n return cart2dir(C)", "def delta_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n dv2, dv3 = [], []\r\n for p in p_range:\r\n v1 = c*beta(p, m1)\r\n v2 = c*beta(p, m2)\r\n v3 = c*beta(p, m3)\r\n dv2.append(abs(v1-v2))\r\n dv3.append(abs(v1-v3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n# p1_name = r'K$^+$'\r\n# p2_name = r'$\\pi^+$'\r\n# p3_name = r'p$^+$'\r\n ax.plot(p_range, dv2, 'r', label=r'$\\left|v_{K^+}-v_{\\pi^+}\\right|$')\r\n ax.plot(p_range, dv3, 'b', label=r'$\\left|v_{K^+}-v_{p^+}\\right|$')\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'$\\left|\\Delta v\\right|$ / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20, loc=[0.65, 0.2])\r\n plt.show\r\n return", "def deltaEta(self,v):\n return self.eta()-v.eta()", "def delta(self):\n return (self._stages[EStage.CURRENT] - self._stages[EStage.START]) \\\n / (self._stages[EStage.END] - self._stages[EStage.START])", "def deltapos(rah1,ram1,ras1,decd1,decm1,decs1,rah2,ram2,ras2,decd2,decm2,decs2):\n\t#position 1 in degs\n\tra1 = hmstora(rah1,ram1,ras1)\n\tdec1 = dmstodec(decd1,decm1,decs1)\n\t#position 2 in degs\n\tra2 = hmstora(rah2,ram2,ras2)\n\tdec2 = dmstodec(decd2,decm2,decs2)\n\t#1 calculate the separation in delta in arcsecs\n\tdeltadec = deltasep(dec1,dec2)\n\t#2 calculate separation in ra arcsecs\n\tdeltara = alphasep(ra1,ra2,dec1,dec2)\n\t#3 calculate the positional difference arcsecs\n\tsourcesep = angsep(ra1,dec1,ra2,dec2)\n\treturn deltara,deltadec,sourcesep", "def derivative(vector):\n vector_nm1 = np.concatenate([np.array([vector[-1]]),vector[:-1]])\n return vector - vector_nm1", "def derivadaInterpolada(this):\n o = this.config['ordenAproximacion']\n he = this.elementos[0].he\n numero = this.elementos.size\n def du(x):\n if type(x)==float or type(x)==np.float64:\n for i in range(0,numero):\n xa = this.elementos[i].xa\n if x >= xa and x <= xa + he:\n a = 0\n for j in range(0,o+1):\n a = a + this.elementos[i].Ue[j][0]*FEMSections.dndxfi(x-xa, 1, [j,o,he])\n return a\n else:\n retorno = np.array([])\n for i in range(0,numero):\n xa = this.elementos[i].xa\n for l in range(0,x.size):\n k = x[l]\n if k > xa and k <= xa + he:\n a = 0\n for j in range(0,o+1):\n a = a + this.elementos[i].Ue[j][0]*FEMSections.dndxfi(k-xa, 1, [j,o,he])\n retorno = np.append(retorno, a)\n elif k >= xa and k <= xa + he and xa==0:\n a = 0\n for j in range(0,o+1):\n a = a + this.elementos[i].Ue[j][0]*FEMSections.dndxfi(k-xa, 1, [j,o,he])\n retorno = np.append(retorno, a)\n return retorno\n return du", "def gen_delta(self):\n delta = self.delta.gen_delta(self.mask.good_pix, self.mask.bad_pix,\n self.params.nside, self.params.npix)\n return delta", "def absvec(pmn,delta_vec,fd_vec,kptwt,nkpt,nbnd,nfilled):\n alpha = float(0)\n for ikpt in range(nkpt): \n for ibnd in range(nbnd-nfilled): # empty bands\n fd_ik = fd_vec[ikpt][ibnd+nfilled] \n pmntmp = pmn[ikpt][ibnd]\n deltatmp = delta_vec[ikpt][ibnd]\n for jbnd in range(nfilled): #filled bands\n # Fermi-dirac factor\n fd_jk = fd_vec[ikpt][jbnd]\n fd_fac = fd_jk-fd_ik\n \n alpha += kptwt[ikpt] * fd_fac * (pmntmp[jbnd]**2) * deltatmp[jbnd]\n return alpha", "def deltaDMB(uvec, upvec, tg0, tdm0, alp, ax, ay, f, sgnG):\n ux, uy = uvec\n upx, upy = upvec\n psix, psiy = gauss10(*uvec), gauss01(*uvec)\n psixx, psiyy, psixy = gauss20(*uvec), gauss02(*uvec), gauss11(*uvec)\n duxdf = -2*alp*sgnG*(alp*psiy*psixy - psix*(ay**2 + alp*psiyy))/((ax*ay)**2*f)\n duydf = -2*alp*sgnG*(alp*psix*psixy - psiy*(ax**2 + alp*psixx))/((ax*ay)**2*f)\n deltadm = -pi*f**3*(tdm0*(duydf*psiy + duxdf*psix - 2*gauss(*uvec)/f) + 2*tg0*(ax**2*duxdf*(ux-upx) + ay**2*duydf*(uy-upy)))/(c*re*pctocm)\n return deltadm" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use the multiplication of normalized rho and delta as gamma to determine cluster center
def calculate_gamma(self): result = self.result # scaler = preprocessing.StandardScaler() # train_minmax = scaler.fit_transform(result) # st_rho, st_delta = train_minmax[:, 0], train_minmax[:, 1] # self.gamma = (st_delta + st_rho) / 2 self.gamma = result[:, 0] * result[:, 1] self.gamma_des_index = np.argsort(-self.gamma)
[ "def calculate_cluster_center(self, threshold):\n gamma = self.gamma\n self.cluster_center = np.where(gamma >= threshold)[0]", "def _estimate_gamma_dist(self,mean,std):\n var = std*std\n k = mean*mean/var\n omega = var/mean\n\n assert (k>0) & (omega>0), \"No gamma distribution with mean: \"+ str(mean) + \" and variance: \" + str(var)\n return k,omega", "def get_center_of_mass_allies(self,obs):", "def newCenter(x, vecAlloc, k):\n p = x.shape[1]\n \n kNew = np.unique(vecAlloc).shape[0]\n if kNew < k:\n print(\"Error (lack of developpemet):\\\n the number of clusters has droped as at least one center has no neighbor.\") \n\n center = np.zeros((k, p))\n for ic in range(0, k):\n w = np.where(vecAlloc[:,0] == ic)[0] # [0] because where returns a tuple (array, )\n center[ic,:] = x[w,:].mean(axis = 0)\n\n return(center)", "def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def _update_cluster_center(self, X, mbeta, w_jl, j, l, a_l, n_l):\n attr_values_freq = np.zeros((n_l), dtype=\"float\")\n for t in range(n_l):\n len_fs = len(self.focalsets[j])\n freq = np.sum(mbeta[np.array(X[:, l]) == a_l[t], j])\n attr_values_freq[t] = len_fs**(self.alpha - 1) * freq\n idx_max_freq = np.argmax(attr_values_freq)\n w_jl[idx_max_freq] = 1\n return w_jl", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers", "def find_center(centers):\n return np.mean(centers, axis = 0)", "def get_centerofgravity(self):\r\n atoms=self.get_atoms()\r\n AtomicMass=1\r\n XYZ_M=[0,0,0]\r\n MassofAA=0\r\n for i in atoms:\r\n XYZ_M[0]+=i.Coordinates[0]*AtomicMass\r\n XYZ_M[1]+=i.Coordinates[1]*AtomicMass\r\n XYZ_M[2]+=i.Coordinates[2]*AtomicMass\r\n MassofAA=MassofAA+AtomicMass\r\n return numpy.array([i/MassofAA for i in XYZ_M])", "def test_get_distribution_centers(self):\n pass", "def k_centres_d_space(K, d, per_cluster):\n centres = []\n for k in xrange(K):\n new_centre = np.random.uniform(low= -5.0, high=5.0, size=(1, d))\n while is_too_close(new_centre, centres, 2.0):\n new_centre = np.random.uniform(low=-5.0, high=5.0, size=(1, d))\n centres.append(new_centre)\n #one_dist = np.random.randn(per_cluster, d)\n # one_dist = sci.truncnorm.rvs(-1.0, 1.0, scale=1.0, size=(per_cluster, d))\n pcs = [per_cluster for i in xrange(K)] # + np.random.randint(-5, 6)\n clusters = [2.0 * np.random.uniform(size=(pc, d)) + c for c, pc in zip(centres, pcs)] #0.5 * np.random.randn(pc, d)\n global_opt = _compute_global_opt(clusters, centres, d, sum(pcs))\n return clusters, centres, global_opt", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def kinematic_viscosity(mu, rho):\n\n nu = mu / rho\n\n return nu", "def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]", "def __init__(self, scale=None):\n super(GammaDist, self).__init__(name='gamma', scale=scale)", "def gamma(x):\n return 0.0", "def _get_gamma(self):\n gamma = None\n if self.is_clayey():\n gamma = 16.8 + 0.15*self._data[SoilProperty.N60]\n else:\n gamma = 16 + 0.1 * self._data[SoilProperty.N60]\n gamma=_clamp(gamma,10,2.8*9.81)#do we need this\n return gamma", "def gen_k_centers(k, dim):\n delta = abs(np.random.normal(0.0, 5.0))\n eps = 0.001\n centers = []\n for i in range(k):\n c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))\n if len(centers):\n c1 = centers[0]\n x = np.random.multivariate_normal(c1, np.identity(c1.size)) - c1\n direction = x / np.linalg.norm(x)\n centers.append(c1 + 2.0 * i * delta * direction + eps)\n else:\n centers.append(c)\n return centers, delta" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Intercept a point with gamma greater than 0.2 as the cluster center
def calculate_cluster_center(self, threshold): gamma = self.gamma self.cluster_center = np.where(gamma >= threshold)[0]
[ "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def predict_center(point):\n point_cluster_num = predict_cluster(point)\n center = centers[point_cluster_num]\n return center", "def center(x):\n return x - x.mean()", "def orthogonalIntercept(x0, y0, k, b):\n\tx = x0 + (y0*k - b*k - x0*k**2)/(k**2 + 1)\n\ty = b + x0*k + (y0*k**2 - b*k**2 - x0*k**3)/(k**2 + 1)\n\treturn (x, y)", "def newCenter(x, vecAlloc, k):\n p = x.shape[1]\n \n kNew = np.unique(vecAlloc).shape[0]\n if kNew < k:\n print(\"Error (lack of developpemet):\\\n the number of clusters has droped as at least one center has no neighbor.\") \n\n center = np.zeros((k, p))\n for ic in range(0, k):\n w = np.where(vecAlloc[:,0] == ic)[0] # [0] because where returns a tuple (array, )\n center[ic,:] = x[w,:].mean(axis = 0)\n\n return(center)", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def reparameterize(self, mu, logvar):\n\t\tlogvar = torch.exp(logvar/2)\n\t\tif self.cuda_flag:\n\t\t\tepsilon = torch.randn((mu.size())).float().cuda()\n\t\telse:\n\t\t\tepsilon = torch.randn((mu.size())).float()\n\t\tlatent_vector = torch.mul(epsilon, logvar) + mu \n\t\treturn latent_vector", "def xintercept(self):\n if self.slope() == 0:\n return None\n else:\n return self.c/self.a", "def cauchy(self, loc, gamma):\n c = loc + gamma * np.tan(np.pi * (self.random() - 0.5))\n return c if c > 0 else self.cauchy(loc, gamma)", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def invgamma(x):\n k = 1.461632\n c = 0.036534\n L = np.log((x+c)/np.sqrt(2*np.pi))\n W = special.lambertw(L/np.exp(1))\n return L/W + 0.5", "def rbf_classify(self, point):\n sum = self.b\n for i, center in enumerate(self.centers):\n sum += self.g[i] * np.exp(-self.gamma * distance.euclidean(center, point) ** 2)\n if sum > 0:\n return 1.0\n else:\n return -1.0", "def lgamma(x):\n return 0.0", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def claret_linear(mu, coeff):\n return 1.0 - coeff * (1.0 - mu)", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def predict_center(self, point):\n point_cluster_num = self.predict_cluster(point)\n center = self.centers[point_cluster_num]\n return center", "def gaussian(k, x):\n return (k[0]/(np.sqrt(2*np.pi)*k[2])) * np.exp(-(x-k[1])**2 /(2*k[2]**2)) + k[3]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initial configuration. Used to specify your username, password and domain. Configuration is stored in ~/.accountable/config.yaml.
def configure(username, password, domain): art = r''' Welcome! __ ___. .__ _____ ____ ____ ____ __ __ _____/ |______ \_ |__ | | ____ \__ \ _/ ___\/ ___\/ _ \| | \/ \ __\__ \ | __ \| | _/ __ \ / __ \\ \__\ \__( <_> ) | / | \ | / __ \| \_\ \ |_\ ___/ (____ /\___ >___ >____/|____/|___| /__| (____ /___ /____/\___ > \/ \/ \/ \/ \/ \/ \/ ''' click.secho(art, fg='blue') Config(username=username, password=password, domain=domain)
[ "def login_with_config(self):\n username = self.cfg.get('user', 'username')\n password = token = None\n\n try:\n password = self.cfg.get('user', 'password')\n except configparser.NoOptionError:\n pass\n try:\n token = self.cfg.get('user', 'token')\n except configparser.NoOptionError:\n pass\n\n if password is None and token is None:\n raise KattisConfigError(\n \"Your .kattisrc seems to be corrupted. Please download a new one.\")\n\n loginurl = self.get_url(self.cfg, 'loginurl', 'login')\n return self.login(loginurl, username, password, token)", "def __init__(self):\r\n self.load_config()\r\n self.login()", "def load_credentials(self):\n if self.rc_file is None:\n return\n config = configparser.ConfigParser()\n rc = os.path.expanduser(self.rc_file)\n if os.path.exists(rc):\n config.read(rc)\n trace(1, \"load credentials from\", rc)\n try:\n self.auth(\n config[\"netatmo\"][\"client_id\"],\n config[\"netatmo\"][\"client_secret\"],\n config[\"netatmo\"][\"username\"],\n config[\"netatmo\"][\"password\"],\n )\n if config.has_option(\"netatmo\", \"default_device_id\"):\n self.default_device_id = config[\"netatmo\"][\"default_device_id\"]\n except:\n self.auth(None, None, None, None)", "def init(args):\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()", "def createDefaultCredentials():\n\twith open(\"credentials.txt\", 'w') as cfg:\n\t\t\tcfg.write(\"\"\"\n[offlinemom]\nrepository_port = 8000\nrepository_user = ausername\nrepository_pass = 1234\n\"\"\")", "def setup_user(self):\r\n self.email = 'foo@test.com'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)", "def __init__(self, username=None, password=None, auto_login=True, retain_password=False):\n\n self.SERVER = 'scratch.mit.edu'\n self.API_SERVER = 'api.scratch.mit.edu'\n self.PROJECTS_SERVER = 'projects.scratch.mit.edu'\n self.ASSETS_SERVER = 'assets.scratch.mit.edu'\n self.CDN_SERVER = 'cdn.scratch.mit.edu'\n self.CLOUD = 'clouddata.scratch.mit.edu'\n\n self.username = username\n self.password = None\n self.retain_password = retain_password\n\n self.http_session = _requests.session()\n\n self._save(username, password)\n if username is not None and password is not None and auto_login:\n self.login(username, password)", "def setup(self):\n messages = [\n \"Please enter you Holberton email: \",\n \"Please enter your Holberton password (don't worry passwd will be encrypted): \",\n \"Please enter full path where you want to save future projects: \"\n ]\n settings_ini_variables = [\"username\", 'password', 'location']\n\n settings_ini = {}\n for msg, var in zip(messages, settings_ini_variables):\n user_input = str(input(msg))\n\n if var == \"location\":\n while not os.path.exists(user_input):\n print(\"[!]: SUPPLIED PATH DOES NOT EXIST.\")\n user_input = str(input(msg))\n settings_ini[var] = encrypted(user_input) if var == \"password\" else user_input\n\n self.write_to_file(**settings_ini)", "def _access_config(self):\n LOG.info('Setup ssh/local user access')\n self.runchroot([\n 'pacman',\n '-Syy',\n '--noconfirm',\n 'openssh'\n ])\n self.runchroot([\n 'systemctl',\n 'enable',\n 'sshd.service'\n ])\n self.runchroot([\n 'systemctl',\n 'enable',\n 'getty@ttyS0.service'\n ])\n if self.domain.password:\n self.runchroot([\n 'usermod',\n '-p',\n self.domain.password,\n 'root'\n ])\n if self.domain.sshkeys:\n authorized_keys = []\n for key, value in self.domain.sshkeys.items():\n authorized_keys.append(\n \"%s %s %s\" % (value['type'], value['key'], key)\n )\n os.mkdir('%s/root/.ssh' % self.target)\n self.writetargetfile(\n '/root/.ssh/authorized_keys',\n authorized_keys\n )", "def init():\n file_name = 'config.json'\n home_directory_path = str(Path.home())\n config_file_directory = home_directory_path+\"/.config/files/\"\n full_path = config_file_directory + file_name\n\n if os.path.isfile(full_path) and os.access(full_path, os.R_OK): # Readable Config file exists and is valid\n try:\n with open(full_path) as file:\n json_file = json.load(file)\n load_json_and_arguments(json_file)\n\n except ValueError as exception:\n raise ValueError(\"Invalid JSON configuration file\")\n\n elif not os.path.isfile(full_path): # Config file doesn't exist yet, create it\n\n if not os.path.exists(config_file_directory): # Make the directory if that doesn't exist as well\n os.makedirs(config_file_directory)\n\n get_account_info(full_path)\n\n else:\n raise IOError(\"Config file: \" + full_path + \" not accessible\")", "def configure():\n\n conf_file = _conf_file()\n\n api_key = ''\n if os.environ.get('CLARIFAI_API_KEY'):\n print('The environment variable CLARIFAI_API_KEY is already set. ')\n api_key = os.environ['CLARIFAI_API_KEY']\n elif os.path.exists(conf_file):\n parser = ConfigParser()\n parser.optionxform = str\n\n with open(conf_file, 'r') as fdr:\n parser.readfp(fdr)\n\n if parser.has_option('clarifai', 'CLARIFAI_API_KEY'):\n api_key = parser.get('clarifai', 'CLARIFAI_API_KEY')\n\n api_key_input = input('CLARIFAI_API_KEY: [%s]: ' % _masked_api_key(api_key))\n if api_key_input:\n _setup(api_key_input)\n elif api_key:\n _setup(api_key)\n\n try:\n app = ClarifaiApp()\n app.api.get_inputs(1, 1)\n print('\\nCLARIFAI_API_KEY is valid')\n print('Saved it to environment and to ~/.clarifai/config')\n except ApiError:\n print('Invalid CLARIFAI_API_KEY, please try again')", "def __init__(self, domain, email, password, app):\n self.client = EmailSettingsClient(domain=domain)\n self.client.ClientLogin(email=email, password=password,\n source=app)", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def ConfigInit():\n # Initialize the config system from the command line options.\n config_lib.ParseConfigCommandLine()", "def config():\n usage = \"usage: %prog [options] (\" + '|'.join(COMMANDS) + \") [filenames]\"\n parser = OptionParser(usage=usage)\n parser.add_option('-s', '--server', metavar='<hostname>',\n help=\"deploy to this server (default: pageforest.com\")\n parser.add_option('-u', '--username')\n parser.add_option('-p', '--password')\n parser.add_option('-v', '--verbose', action='store_true')\n parser.add_option('-q', '--quiet', action='store_true')\n options, args = parser.parse_args()\n\n if not args:\n parser.error(\"No command specified.\")\n options.command = args.pop(0).lower().strip()\n if not options.command:\n parser.error(\"Empty command.\")\n # Prefix expansion.\n for command in COMMANDS:\n if command.startswith(options.command):\n options.command = command\n if options.command not in COMMANDS:\n parser.error(\"Unsupported command: \" + options.command)\n\n if options.verbose:\n print(\"Found simplejson in %s\" % os.path.dirname(json.__file__))\n\n if not options.server:\n options.server = \"pageforest.com\"\n\n if options.command == 'test':\n options.application = 'pfpytest'\n else:\n options.application = load_application()\n\n if os.path.exists(PASSWORD_FILENAME):\n options.username, options.password = load_credentials()\n\n options.save = False\n if not options.username:\n options.username = raw_input(\"Username: \")\n options.save = True\n if not options.password:\n from getpass import getpass\n options.password = getpass(\"Password: \")\n options.save = True\n return options, args", "def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)", "def _auth_default_domain(self, config):\n\n identity_version = config.get('identity_api_version', '')\n auth_type = config.get('auth_type', None)\n\n # TODO(mordred): This is a usability improvement that's broadly useful\n # We should port it back up into os-client-config.\n default_domain = config.get('default_domain', None)\n if (identity_version == '3' and\n not auth_type.startswith('v2') and\n default_domain):\n\n # NOTE(stevemar): If PROJECT_DOMAIN_ID or PROJECT_DOMAIN_NAME is\n # present, then do not change the behaviour. Otherwise, set the\n # PROJECT_DOMAIN_ID to 'OS_DEFAULT_DOMAIN' for better usability.\n if (\n auth_type in (\"password\", \"v3password\", \"v3totp\") and\n not config['auth'].get('project_domain_id') and\n not config['auth'].get('project_domain_name')\n ):\n config['auth']['project_domain_id'] = default_domain\n\n # NOTE(stevemar): If USER_DOMAIN_ID or USER_DOMAIN_NAME is present,\n # then do not change the behaviour. Otherwise, set the\n # USER_DOMAIN_ID to 'OS_DEFAULT_DOMAIN' for better usability.\n # NOTE(aloga): this should only be set if there is a username.\n # TODO(dtroyer): Move this to os-client-config after the plugin has\n # been loaded so we can check directly if the options are accepted.\n if (\n auth_type in (\"password\", \"v3password\", \"v3totp\") and\n not config['auth'].get('user_domain_id') and\n not config['auth'].get('user_domain_name')\n ):\n config['auth']['user_domain_id'] = default_domain\n return config", "def config_data():\n return {CONF_USERNAME: \"fake\", CONF_PASSWORD: \"user\"}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all issue types. Optional parameter to list issue types by a given project.
def issuetypes(accountable, project_key): projects = accountable.issue_types(project_key) headers = sorted(['id', 'name', 'description']) rows = [] for key, issue_types in sorted(projects.items()): for issue_type in issue_types: rows.append( [key] + [v for k, v in sorted(issue_type.items()) if k in headers] ) rows.insert(0, ['project_key'] + headers) print_table(SingleTable(rows))
[ "def request_issue_types(cfg):\n url = cjm.request.make_cj_url(cfg, \"issuetype\")\n return cjm.request.make_cj_request(cfg, url).json()", "def issue_types(self):\n return ['issue']", "def get_relevant_issue_classes(project):\n allowed_tags = ['generic'] + [tag.name for tag in project.tags]\n query = {\n 'tags.name': {\n '$in': allowed_tags,\n }\n }\n return backend.filter(IssueClass, query)", "def request_issue_link_types(cfg):\n url = cjm.request.make_cj_url(cfg, \"issueLinkType\")\n return cjm.request.make_cj_request(cfg, url).json()[\"issueLinkTypes\"]", "def list(self, request):\n bug_types = BugType.objects.all()\n\n # Note the additional `many=True` argument to the\n # serializer. It's needed when you are serializing\n # a list of objects instead of a single object.\n serializer = BugTypeSerializer(\n bug_types, many=True, context={'request': request})\n return Response(serializer.data)", "def list_file_types(project_id, host, email, password, api_key):\n ListFileTypes(\n project_id,\n Credentials(email=email, password=password, api_key=api_key),\n Optionals(host=host),\n ).run()", "def get_issues(self, proj):\n self.cur.execute('SELECT * FROM issues WHERE repo_id={}'.format(proj.id))\n issues = [Issues(row) for row in self.cur.fetchall()]\n return issues", "def list_all(self, **kwargs):\n\n session = self.session\n max_pages = 20\n key = \"issues\"\n endpoint = \"/v1/issues\"\n request = HttpHelper(session)\n params = utility.sanitize_url_params(kwargs)\n issues = request.get_paginated(endpoint, key, max_pages, params=params)\n return issues", "def get_all(self, problem_type):\n pass", "def listProject(self, ptype=None):\n if not ptype:\n return self.projects.iteritems()\n else:\n return (\n (name, proj)\n for name, proj in self.projects.iteritems()\n if proj.p_type == ptype\n )", "def get_list(self):\n result = issue_request(method='GET', endpoint='account/projects', token=self.token)\n return result", "def issues_list(self, mar, request):\n if request.additionalProject:\n for project_name in request.additionalProject:\n project = self._services.project.GetProjectByName(\n mar.cnxn, project_name)\n if project and not permissions.UserCanViewProject(\n mar.auth.user_pb, mar.auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (mar.auth.email, project_name))\n url_params = [(name, mar.GetParam(name)) for name in\n framework_helpers.RECOGNIZED_PARAMS]\n # TODO(jrobbins): This should go through work_env.\n pipeline = frontendsearchpipeline.FrontendSearchPipeline(\n mar.cnxn, self._services, mar.auth, [mar.me_user_id], mar.query,\n mar.query_project_names, mar.num, mar.start, url_params, mar.can,\n mar.group_by_spec, mar.sort_spec, mar.warnings, mar.errors,\n mar.use_cached_searches, mar.profiler, display_mode=mar.mode,\n project=mar.project)\n if not mar.errors.AnyErrors():\n pipeline.SearchForIIDs()\n pipeline.MergeAndSortIssues()\n pipeline.Paginate()\n else:\n raise endpoints.BadRequestException(mar.errors.query)\n\n issue_list = [\n api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssueWrapper, r, mar, self._services)\n for r in pipeline.visible_results]\n return api_pb2_v1.IssuesListResponse(\n kind='monorail#issueList',\n totalResults=pipeline.total_count,\n items=issue_list)", "def get_jira_defects(project):\n return get_jira_issues('project = \"{}\" AND filter = 19589'.format(project))", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def report_types():\n return [ReportClass for name, ReportClass in REPORT_REGISTRY.items() if name != \"BaseReport\"]", "def view_all(request, index_call=False):\n closed = request.GET.get('closed', '')\n if closed in ('0', 'false'):\n closed = False\n elif closed in ('1', 'true'):\n closed = True\n elif index_call:\n # for index we display only open issues by default\n closed = False\n else:\n closed = None\n\n nav_parameters = {}\n if closed is not None:\n nav_parameters['closed'] = int(closed)\n\n # This uses eventual consistency and cannot be made strongly consistent.\n query = models.Issue.query(\n models.Issue.private == False).order(-models.Issue.modified)\n if closed is not None:\n # return only opened or closed issues\n query = query.filter(models.Issue.closed == closed)\n\n return _paginate_issues(reverse(view_all),\n request,\n query,\n 'all.html',\n extra_nav_parameters=nav_parameters,\n extra_template_params=dict(closed=closed))", "def view_all(request, index_call=False):\n closed = request.GET.get('closed', '')\n if closed in ('0', 'false'):\n closed = False\n elif closed in ('1', 'true'):\n closed = True\n elif index_call:\n # for index we display only open issues by default\n closed = False\n else:\n closed = None\n\n nav_parameters = {}\n if closed is not None:\n nav_parameters['closed'] = int(closed)\n\n query = models.Issue.query(\n models.Issue.private == False).order(-models.Issue.modified)\n if closed is not None:\n # return only opened or closed issues\n query = query.filter(models.Issue.closed == closed)\n\n return _paginate_issues(reverse(view_all),\n request,\n query,\n 'all.html',\n extra_nav_parameters=nav_parameters,\n extra_template_params=dict(closed=closed))", "def list_types(apiId=None, format=None, nextToken=None, maxResults=None):\n pass", "def filter_issue_types(self, issue_types):\n\n # A list of issue type classes, sorted by the display name. This is used\n # to keep the results in display name order where the order is otherwise\n # not specified.\n sorted_issue_types = [cls for _, cls in sorted(issue_types.items(), key=itemgetter(0))]\n if not self._include_by_default:\n # Preserve the order of issue types given on the command line.\n source_list = []\n for (cls, _) in self.klasses:\n for issue_type in sorted_issue_types:\n if (issue_type == cls or issubclass(issue_type, cls)) and not issue_type in source_list:\n source_list.append(issue_type)\n else:\n source_list = sorted_issue_types\n ret = []\n for klass_a in source_list:\n want_this_class = self._include_by_default\n for (klass_b, want_this_issue_type) in self.klasses:\n if klass_a == klass_b or issubclass(klass_a, klass_b):\n want_this_class = want_this_issue_type\n if want_this_class:\n ret.append(klass_a)\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all comments for a given issue key.
def comments(accountable): comments = accountable.issue_comments() headers = sorted(['author_name', 'body', 'updated']) if comments: rows = [[v for k, v in sorted(c.items()) if k in headers] for c in comments] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No comments found for {}'.format( accountable.issue_key ), fg='red')
[ "def request_issue_comments_regexp(cfg, issue_key, comment_re):\n # pylint: disable=too-many-nested-blocks\n\n comments = []\n comments_url = cjm.request.make_cj_url(cfg, \"issue\", issue_key, \"comment\")\n\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_request(\n cfg, comments_url,\n params={\"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for comment in response_json[\"comments\"]:\n for content_l1 in comment[\"body\"][\"content\"]:\n if content_l1[\"type\"] == JIRA_COMMENT_CONTENT_TYPE_PARAGRAPH:\n for content_l2 in content_l1[\"content\"]:\n if content_l2[\"type\"] == JIRA_COMMENT_CONTENT_TYPE_TEXT:\n m = comment_re.match(content_l2[\"text\"])\n if m is not None:\n comments.append(m)\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return comments", "def get_comments(issue):\n\tcomments = []\n\tkey = issue.get('key')\n\n\t# for each comment save it and see if QA steps\n\tfor index, comment in enumerate(issue.get('renderedFields', {}).get('comment', {}).get('comments', [])):\n\n\t\t# try toget raw comment data\n\t\traw_comments = issue.get('fields', {}).get('comment', {}).get('comments', [])\n\t\tif len(raw_comments) > index-1:\n\t\t\tcomment['raw_comment'] = raw_comments[index].get('body', '')\n\n\t\tcomments.append( format_comment(comment, key) )\n\treturn comments", "def problem_comments(self, identifier):\n return self._get(\"problems/%d/comments\" % identifier).json()", "def get_issue_comments(issue):\n issue_comments = []\n for comment in issue.fields.comment.comments:\n comment_content = comment.body\n comment_author = comment.author.name\n comment_date = datetime.strptime(comment.created, '%Y-%m-%dT%H:%M:%S.%f%z').date()\n issue_comments.append({'body' : comment_content, 'author' : comment_author, 'date' : comment_date})\n return issue_comments", "def GetIssueComments(self, bug_id, project='chromium'):\n if not bug_id or bug_id < 0:\n return None\n response = self._MakeGetCommentsRequest(bug_id, project=project)\n if not response:\n return None\n return [{\n 'id': r['id'],\n 'author': r['author'].get('name'),\n 'content': r['content'],\n 'published': r['published'],\n 'updates': r['updates']\n } for r in response.get('items')]", "def getAllComments():", "def _MakeGetCommentsRequest(self, bug_id, project):\n # TODO (prasadv): By default the max number of comments retrieved in\n # one request is 100. Since bisect-fyi jobs may have more then 100\n # comments for now we set this maxResults count as 10000.\n # Remove this max count once we find a way to clear old comments\n # on FYI issues.\n request = self._service.issues().comments().list(\n projectId=project, issueId=bug_id, maxResults=10000)\n return self._ExecuteRequest(request)", "def test_issue_get_comments(self):\n pass", "def view_comments(request, id):\n comments = Comment.objects.filter(ticket=id)\n \n return render (request, \"comments.html\", {'comments': comments})", "def list(self, number, user=None, repo=None):\n return self._get_result(\n self.make_request('pull_requests.comments.list', number=number,\n user=user, repo=repo)\n )", "def get_cohorted_commentables(course_key):\r\n\r\n course = courses.get_course_by_id(course_key)\r\n\r\n if not course.is_cohorted:\r\n # this is the easy case :)\r\n ans = []\r\n else:\r\n ans = course.cohorted_discussions\r\n\r\n return ans", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def get_pr_comments(api, urn, pr_num):\n params = {\n \"per_page\": settings.DEFAULT_PAGINATION\n }\n path = \"/repos/{urn}/issues/{pr}/comments\".format(urn=urn, pr=pr_num)\n comments = api(\"get\", path, params=params)\n for comment in comments:\n yield comment", "def lookup_comment_list(self):\n if self.thread_id is None:\n return None, None\n\n # Just pulling a single issue here so pagination shouldn't be problem\n my_req = self.raw_pull(self.thread_id)\n if my_req.status_code != 200:\n raise GitHubAngry('Bad status code %s because %s' % (\n my_req.status_code, my_req.reason))\n issue_json = my_req.json()\n comments_url = issue_json['comments_url'] + self.url_extras\n kwargs = {} if not self.user else {'auth': (self.user, self.token)}\n comments_json = []\n while comments_url:\n logging.debug('Pulling comments URL: %s', comments_url)\n c_req = requests.get(comments_url, **kwargs)\n my_json = c_req.json()\n assert isinstance(my_json, list)\n comments_json.extend(my_json)\n comments_url = None\n if 'link' in c_req.headers: # need to handle pagination.\n logging.debug('Paginating in lookup_comment_list')\n link = c_req.headers['link'].split(',')\n for thing in link:\n potential_url, part = thing.split('; ')\n if part == 'rel=\"next\"':\n comments_url = potential_url.lstrip(' <').rstrip('> ')\n\n return issue_json, comments_json", "def all_user_comments(username):\n return commentslist", "def _listIssues_Comments_GenericQuery(self):\n return '''\n SELECT\n comments.issue_id,\n comments.comment\n FROM\n comments\n INNER JOIN issues ON\n issues.id = comments.issue_id\n WHERE\n issues.open = 1\n '''", "def comments(self):\r\n return IssueComments(self)", "def get_all_comments(question_id):\n from api.app.models.models import Comment, Question\n response = None\n question_id = int(question_id)\n comments = Comment.query_by_field(\"question\", question_id)\n if not Question.query_by_field(\"id\", question_id):\n response = jsonify({\n \"error\": \"There is no question with that id\",\n \"status\": Status.not_found\n }), Status.not_found\n elif not comments:\n response = jsonify({\n \"error\": \"There are no comments for that question\",\n \"status\": Status.not_found\n }), Status.not_found\n else:\n response = jsonify({\n \"data\": [comment.to_dictionary() for comment in comments],\n \"status\": Status.success\n }), Status.success\n return response", "def request_issues_by_keys(cfg, issue_keys):\n if not issue_keys:\n return []\n\n issues = []\n issues_url = cjm.request.make_cj_url(cfg, \"search\")\n\n jql = 'key in ({0:s})'.format(\", \".join(issue_keys))\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_post_request(\n cfg, issues_url,\n json={\"jql\": jql, \"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for issue in response_json[\"issues\"]:\n issues.append(extract_issue_data(cfg, issue))\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return issues" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a comment to the given issue key. Accepts a body argument to be used as the comment's body.
def addcomment(accountable, body): r = accountable.issue_add_comment(body) headers = sorted(['author_name', 'body', 'updated']) rows = [[v for k, v in sorted(r.items()) if k in headers]] rows.insert(0, headers) print_table(SingleTable(rows))
[ "def add_issue_comment(self, issue_comment):\r\n\t\tself[\"issueComments\"][issue_comment[\"id\"]] = issue_comment", "def create_issue_comment(self, body):\n json = None\n if body:\n owner, repo = self.repository\n owner = owner.split('/')[-1]\n url = self._build_url('repos', owner, repo, 'issues', str(self.number), 'comments')\n json = self._json(self._post(url, data={'body': body}), 200)\n return IssueComment(json, self) if json else None", "def create_comment(self, body):\n return self.client.request(\n \"{}/issues/{}/comments\".format(self.repo.base_path, self.num),\n params={\"body\": body},\n method=\"POST\"\n )", "def add_comment(\n issue_key,\n comment,\n visibility=None,\n is_internal=False,\n server=None,\n username=None,\n password=None,\n):\n jira_ = _get_jira(server=server, username=username, password=password)\n comm = jira_.add_comment(\n issue_key, comment, visibility=visibility, is_internal=is_internal\n )\n return True", "def __add_comment(self, issue_id, comment):\n import httplib2\n http = httplib2.Http() \n response, content = http.request(\n uri=self.__issue_url % int(issue_id),\n method='PUT',\n body=comment,\n headers={\n 'X-Redmine-API-Key': self.__api_key,\n 'Content-type': 'application/json'\n }\n )\n print(response)\n print(content)", "def _add_comment(self, issue_id, comment):\n self.logger.debug(\"Adding comment to issue {}\".format(issue_id))\n self.jira.add_comment(issue_id, comment, visibility={'group': 'jira-users'})", "def add_comment_to_issue(self, issue, comment, visibility=None):\r\n self.jira.add_comment(issue=issue, body=comment)", "def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()", "def add_pr_comment(comment: str):\n token = os.environ['CONTENT_GITHUB_TOKEN']\n branch_name = os.environ['CI_COMMIT_BRANCH']\n sha1 = os.environ['CI_COMMIT_SHA']\n\n query = f'?q={sha1}+repo:demisto/content+is:pr+is:open+head:{branch_name}+is:open'\n url = 'https://api.github.com/search/issues'\n headers = {'Authorization': 'Bearer ' + token}\n try:\n res = requests.get(url + query, headers=headers, verify=False)\n res_json = handle_github_response(res)\n if res_json and res_json.get('total_count', 0) == 1:\n issue_url = res_json['items'][0].get('comments_url') if res_json.get('items', []) else None\n if issue_url:\n res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)\n handle_github_response(res)\n else:\n logging.warning(\n f'Add pull request comment failed: There is more then one open pull request for branch {branch_name}.')\n except Exception:\n logging.exception('Add pull request comment failed.')", "def comment_jira(jira_id, comment):\n\n jira_api = get_jira_api()\n log.info(\"Commenting \\n{} on ticket {}\".format(\n\t\tcomment,\n\t\tjira_id)\n\t)\n \n jira_api.add_comment(jira_id, comment)", "def post_comment(self, entry, body, **args):\n args.update(entry=entry, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def on_issue_comment(self, payload):\n pass", "def addCommentToEmail(self, id, body, owner_user_id):\n data = dict(\n BODY = body,\n OWNER_USER_ID = owner_user_id,\n )\n urldata = json.dumps(data)\n text = self.generateRequest('/v2.1/Emails/' + str(id) + '/Comments', 'POST', urldata)\n return json.loads(text)", "def add_issue(self, issue):\r\n\t\tself[\"issues\"][issue[\"id\"]] = issue", "def comment(self, body, incident_id):\n payload = {\"comment\":{\"body\":body, \"is_private\":\"false\"}}\n response = self.session.post(\n \"{0}/incidents/{1}/comments.json\".format(self.uri, incident_id),\n json=payload\n )\n return response.status_code", "def add_comment(self, task_id: str, comment_body: str) -> str:\n validate_object_id(task_id, \"AsanaClient.add_comment requires a task_id\")\n if comment_body is None or not comment_body:\n raise ValueError(\"AsanaClient.add_comment requires a comment body\")\n response = self.asana_api_client.tasks.add_comment(\n task_id, {\"html_text\": comment_body}\n )\n return response[\"gid\"]", "def _apply_comment(self, iid, comment):\n data = {\"body\" : comment._body}\n resp = self._post(\n self._base + \"/issues/{}/comments\".format(iid),\n data=self._format_data(data))", "def add_comment(jira_id: str, comment_text: str) -> jira.resources.Comment:\n return get_client().add_comment(jira_id, comment_text)", "def edit(self, body, extraParams={}):\n import labstep.entities.comment.repository as commentRepository\n\n return commentRepository.editComment(self, body, extraParams=extraParams)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all worklogs for a given issue key.
def worklog(accountable): worklog = accountable.issue_worklog() headers = ['author_name', 'comment', 'time_spent'] if worklog: rows = [[v for k, v in sorted(w.items()) if k in headers] for w in worklog] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No worklogs found for {}'.format(accountable.issue_key), fg='red' )
[ "def get_worklog(issue):\n\tworklogs = []\n\tworklog_field = issue.get('fields', {}).get('worklog', False)\n\n\tif worklog_field:\n\t\tworklogs = worklog_field.get('worklogs', [])\n\n\treturn worklogs", "def obtain_worklogs(issues, start_date, end_date, session_data):\n issue_keys = dict()\n all_worklogs = list()\n received_worklogs = list()\n jira_conn = session_data['jira_conn']\n for issue in issues:\n if issue.fields is None:\n continue\n if issue.fields.worklog.total > issue.fields.worklog.maxResults:\n received_worklogs += jira_conn.worklogs(issue.id) # additional request to JIRA API\n else:\n received_worklogs += issue.fields.worklog.worklogs\n\n issue_keys[issue.id] = issue.key\n\n for worklog in received_worklogs:\n worklog_date = pendulum.parse(worklog.started)\n if worklog_date < start_date or worklog_date > end_date:\n continue\n w_data = {\n 'issue_key': issue_keys[worklog.issueId],\n 'author_name': worklog.author.name,\n 'created': pendulum.parse(worklog.created),\n 'started': worklog_date,\n 'time_spent_seconds': worklog.timeSpentSeconds,\n }\n all_worklogs.append(w_data)\n\n return all_worklogs", "def define_user_worklogs(_worklogs, username, name_key):\n return [log for log in _worklogs if log.get(name_key) == username]", "def request_issues_by_keys(cfg, issue_keys):\n if not issue_keys:\n return []\n\n issues = []\n issues_url = cjm.request.make_cj_url(cfg, \"search\")\n\n jql = 'key in ({0:s})'.format(\", \".join(issue_keys))\n start_at = 0\n max_results = 50\n\n while True:\n response = cjm.request.make_cj_post_request(\n cfg, issues_url,\n json={\"jql\": jql, \"startAt\": start_at, \"maxResults\": max_results})\n response_json = response.json()\n\n for issue in response_json[\"issues\"]:\n issues.append(extract_issue_data(cfg, issue))\n\n start_at += max_results\n\n if start_at >= response_json[\"total\"]:\n break\n\n return issues", "def get_job_logs(self, job_id: str) -> str:\n return self._log_client.get_logs(job_id)", "def get_job_logs(self, params, ujs_proxy=None):\n if ujs_proxy is None:\n ujs_proxy = self.__proxy_client()\n return ujs_proxy.get_job_logs(params)", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def list_jira_issues(jql):\n\n LOGGER.debug('Received call to listJiraIssues function - %s' % jql)\n\n # asking for 100 issues per page\n npp = 100\n\n # get the first page of matches\n r = get_jira_issues_page(jql, st=0, npp=npp)\n\n # add the issues to a list and get number of total tickets in query\n # and where we started at to get the next page\n out = r['issues']\n nr = r['total']\n start = r['startAt']\n\n # if there are still tickets to get, go get them\n while len(out) != nr:\n\n # make the call for the next page\n r = get_jira_issues_page(jql, st=start + npp, npp=npp)\n\n # update the variables\n start = r['startAt']\n out = out + r['issues']\n\n LOGGER.info('%s records read from JIRA', len(out))\n\n return [x['key'] for x in out]", "def push_worklogs(\n entries: Sequence[TogglEntry], toggl_token: str\n) -> Optional[str]:\n for index, worklog in enumerate(entries):\n logger.info('pushing worklog {}/{}'.format(index + 1, len(entries)))\n\n payload = json.dumps(asdict(worklog), cls=DateTimeEncoder)\n\n response = requests.post(\n 'https://www.toggl.com/api/v8/time_entries',\n data=payload,\n headers={'Content-Type': 'application/json'},\n auth=(toggl_token, 'api_token'),\n )\n\n try:\n response.raise_for_status()\n except HTTPError as err:\n assert isinstance(err.response.text, str)\n return err.response.text\n except RequestException:\n return traceback.format_exc()\n\n return None", "def get_klocwork_issues(self, build, project, project_root):\n common = {}\n common['user'] = self.user\n common['ltoken'] = self.ltoken\n\n # request list of issues\n issues_response = fetch(self.url, {\n 'action': 'search',\n 'project': project,\n 'build': build\n }, common)\n # request issues details\n issues_table = {}\n issue_details_requests = []\n for issue in issues_response:\n issues_table[str(issue['id'])] = issue\n issue_details_requests.append({\n 'action': 'issue_details',\n 'project': project,\n 'id': issue['id']\n })\n issues_details_response = multifetch(self.url, issue_details_requests,\n common)\n for response in issues_details_response:\n for r in response:\n issues_table[r['id']].update(r)\n\n issues_table = issues_table.values()\n for issue in issues_table:\n # get the latest comment from the history or leave empty string\n issue['comment'] = issue.get('history', [{}])[0].get('comment', '')\n # Issue is dispositioned if code is not Analyze and comment is not empty\n if not issue['comment'] or issue['code'] == 'Analyze':\n issue['dispositioned'] = 'no'\n else:\n issue['dispositioned'] = 'yes'\n location = os.path.normpath(issue['location'])\n relpath = location\n if project_root:\n try:\n relpath = os.path.relpath(location, project_root)\n except:\n pass\n\n issue['location'] = relpath.replace('\\\\', '/')\n return issues_table", "def get_logs(job_id: int, db: Session = DB_SESSION):\n return GetLogsResponse(logs=_get_logs(task_id=job_id, session=db))", "def list_entries(logger_name):\n logger = logging_client.logger(logger_name)\n print('Listing entries for logger {}:'.format(logger.name))\n for entry in logger.list_entries():\n timestamp = entry.timestamp.isoformat()\n print('* {}: {}'.format(timestamp, entry.payload))", "def get_logs(self, job_id):\n\n # Get the logstream name\n response = self.batch_client.describe_jobs(jobs=[job_id])\n logstream = response[\"jobs\"][0][\"container\"][\"logStreamName\"]\n\n # Keep a list with the log messages\n logs = []\n\n # Get the logs\n response = self.logs_client.get_log_events(\n logGroupName=\"/aws/batch/job\", logStreamName=logstream\n )\n\n # Add to the list\n logs.extend([l[\"message\"] for l in response[\"events\"]])\n\n # Keep getting more pages\n while response[\"nextForwardToken\"] is not None:\n\n # Keep track of the last token used\n last_token = response[\"nextForwardToken\"]\n\n # Get the next page\n response = self.logs_client.get_log_events(\n logGroupName=\"/aws/batch/job\",\n logStreamName=logstream,\n nextToken=last_token,\n )\n\n # If the token is the same, we're done\n if response[\"nextForwardToken\"] == last_token:\n response[\"nextForwardToken\"] = None\n else:\n # Otherwise keep adding to the logs\n logs.extend([l[\"message\"] for l in response[\"events\"]])\n\n return logs", "def _get_project_user_stories(self, project_key):\n self.logger.info(\"Getting project {} user stories\".format(project_key))\n issues = self.jira.search_issues('project=' + project_key + ' and issuetype=' + config.JIRA_USER_STORY_TYPE,\n maxResults=200)\n return issues", "def list_logs(request):\n sort_order, sort_dir = get_sort_order(request.GET, \"date\")\n search = request.GET.get(\"searchquery\")\n if not request.user.is_superuser:\n domains = Domain.objects.get_for_admin(request.user)\n logs = ml_models.Maillog.objects.filter(\n Q(from_domain__in=domains) | Q(to_domain__in=domains)\n )\n else:\n logs = ml_models.Maillog.objects.all()\n logs = logs.order_by(\"{}{}\".format(sort_dir, sort_order))\n if search:\n logs = logs.filter(\n Q(sender__icontains=search) |\n Q(rcpt__icontains=search) |\n Q(queue_id__icontains=search) |\n Q(status__icontains=search)\n )\n page = get_listing_page(logs, request.GET.get(\"page\", 1))\n context = {\n \"headers\": render_to_string(\n \"admin/domains_log_headers.html\", {}, request\n )\n }\n if page is None:\n context[\"length\"] = 0\n else:\n context[\"rows\"] = render_to_string(\n \"admin/domains_logs.html\", {\"logs\": page}, request\n )\n context[\"pages\"] = [page.number]\n return render_to_json_response(context)", "def get_logs_list():\n # reads the session\n session = request.args.get('session', type=str)\n\n available_keys = []\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n\n all_keys = lh.get_handlers().keys()\n\n for key in all_keys:\n if lh.check_user_log_visibility(user, key):\n available_keys.append(key)\n\n return jsonify({\"logs\": available_keys})", "def group_timelogs(timelogs, logf):\n cache = {}\n for timelog in timelogs:\n key = '{}:{}:{}'.format(timelog.ticket, timelog.description, timelog.date.date())\n logf(\"Found worklog {}: {} ({}) ({})\".format(timelog.ticket, timelog.description, timelog.date, timelog.time))\n if key not in cache:\n cache[key] = timelog\n else:\n cache[key].time += timelog.time\n\n logf('\\n---\\n')\n for timelog in cache.values():\n logf(\"Grouped worklog {}: {} ({}) ({})\".format(timelog.ticket, timelog.description, timelog.date, timelog.time))\n\n return cache.values()", "def list_whylogs_runs(experiment_id: str, dataset_name: str = \"default\"):\n import mlflow\n\n client = mlflow.tracking.MlflowClient()\n run_infos = client.list_run_infos(experiment_id)\n\n res = []\n for run in run_infos:\n if run.status != \"FINISHED\":\n continue\n artifacts = client.list_artifacts(run.run_id, path=f\"whylogs/{dataset_name}\")\n if len(artifacts) == 1 and not artifacts[0].is_dir and artifacts[0].path.endswith(\"/profile.bin\"):\n res.append(run)\n\n return res", "def history(self, employee_id):\n return self.client.get(\n 'people/{employee_id}/work'.format(\n employee_id=employee_id\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all possible transitions for a given issue.
def transitions(accountable): transitions = accountable.issue_transitions().get('transitions') headers = ['id', 'name'] if transitions: rows = [[v for k, v in sorted(t.items()) if k in headers] for t in transitions] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No transitions found for {}'.format(accountable.issue_key), fg='red' )
[ "def get_transitions(issue):\n\ttransitions = issue.get('transitions', [])\n\treturn [\n\t\t{\n\t\t\t'name': transition.get('name', ''),\n\t\t\t'id': transition.get('id', ''),\n\t\t}\n\t\tfor transition in transitions\n\t]", "def list_transitions_command(args):\n issue_id = args.get('issueId')\n transitions_data_list = list_transitions_data_for_issue(issue_id)\n transitions_names = [transition.get('name') for transition in transitions_data_list.get('transitions')]\n readable_output = tableToMarkdown(\n 'List Transitions:', transitions_names, headers=['Transition Name']\n )\n outputs = {'ticketId': issue_id,\n 'transitions': transitions_names\n }\n return CommandResults(raw_response=transitions_names, readable_output=readable_output,\n outputs_prefix=\"Ticket.Transitions\", outputs_key_field=\"ticketId\", outputs=outputs)", "def list_transitions_data_for_issue(issue_id):\n url = f'rest/api/2/issue/{issue_id}/transitions'\n return jira_req('GET', url, resp_type='json')", "def transitions(self) -> List[Dict]:\n return []", "def transitions(self, from_state=None):\n return list(self.iter_transitions(from_state))", "def setup_transition_list():\n xn_list = []\n\n xn_list.append( Transition(1, 3, 1., 'weathering') ) # rock-sap to sap-sap\n xn_list.append( Transition(2, 3, 1., 'weathering') ) # sap-rock to sap-sap\n xn_list.append( Transition(5, 3, 1., 'weathering') ) # rock/sap to sap/sap\n xn_list.append( Transition(6, 3, 1., 'weathering') ) # sap/rock to sap/sap\n\n if _DEBUG:\n print()\n print('setup_transition_list(): list has',len(xn_list),'transitions:')\n for t in xn_list:\n print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)\n\n return xn_list", "def get_transitions(obj):\n wf_tool = api.portal.get_tool('portal_workflow')\n return [tr[\"id\"] for tr in wf_tool.getTransitionsFor(obj)]", "def execute_jira_list_transitions_command(args: Dict[str, Any]) -> List[Dict[str, Any]]:\n demisto.debug(f'Got the following args {args}')\n res = demisto.executeCommand(\n \"jira-list-transitions\", args\n )\n if res and isinstance(res, list):\n return res\n else:\n raise DemistoException((f'Error occurred while running JiraListStatus, expected a list as response but got:'\n f' {type(res)}. The response is: {res}'))", "def possible_transitions(self):\n return self.get_state_info().possible_transitions()", "def get_to_transitions(self):\n result = []\n state_machine = self.states[0].handler\n for state in state_machine.iter_states():\n if state == self:\n continue\n if not self.matches(state):\n continue\n else:\n for transition in state.get_to_transitions():\n if not transition.from_state == self:\n result.append(transition)\n return result", "def getListOfTransitions(self, *args):\n return _libsbml.QualModelPlugin_getListOfTransitions(self, *args)", "def get_transitions(self):\n transitions = []\n for row in self.states:\n t_row = []\n for column in self.states:\n t_row.append([row, column])\n transitions.append(t_row)\n return sorted(transitions)", "def transitions(self):\n return self.workflow.transitions.available_from(self.state)", "def issue_do_transition(self, issue, transition):", "def transitions_to(self, state_letter):\n if state_letter in self._transitions_to:\n return self._transitions_to[state_letter]\n else:\n return []", "def get_transitions(self, cell_id):\n raise NotImplementedError()", "def make_chord_transitions():\n transitions = []\n for from_chord in Chord:\n for to_chord in Chord:\n transition = ChordTransition(from_chord, to_chord)\n transitions.append(transition)\n return transitions", "def get_all_possible_transitions(maze):\n transitions = []\n\n g = _create_graph(maze)\n\n path_nodes = (node for node, data\n in g.nodes(data=True) if data['type'] == 'path')\n\n for node in path_nodes:\n for neighbour in nx.all_neighbors(g, node):\n direction = Maze.distinguish_direction(node, neighbour)\n action = find_action_by_direction(direction)\n\n transitions.append((node, action, neighbour))\n\n return transitions", "def to_transitions(self) -> 'Transitions':\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Debug breakpoint while in curses mode
def _D(stdscr): curses.nocbreak() stdscr.keypad(0) curses.echo() curses.endwin() import pdb; pdb.set_trace()
[ "def debugger_enable_breakpoint():", "def debugger_break_now():", "def debugger_add_hw_breakpoint():", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def debugger_show_breakpoints():", "def debugger_add_sw_breakpoint():", "def _debug_trace():\n from PyQt4.QtCore import pyqtRemoveInputHook\n from pdb import set_trace\n pyqtRemoveInputHook()\n set_trace()", "def debugger_step_over_line():", "def start_pdb():\r\n import ctypes\r\n ctypes.windll.kernel32.AllocConsole()\r\n import sys\r\n sys.stdout = open('CONOUT$', 'wt')\r\n sys.stdin = open('CONIN$', 'rt')\r\n import pdb\r\n pdb.set_trace()", "def debugger_goto_highlighted():", "def debugger_step_over():", "def debugger_disable_breakpoint():", "def patch_curses(monkeypatch):\n # pylint: disable=import-outside-toplevel\n import curses\n\n monkeypatch.setattr(curses, \"cbreak\", lambda: None)\n monkeypatch.setattr(curses, \"nocbreak\", lambda: None)\n monkeypatch.setattr(curses, \"endwin\", lambda: None)", "def debugger_clear_breakpoint():", "def debugger_goto_clipboard():", "def debugger_continue():", "def skip_breakpoint(self):\n if not self.serial: \n self.statusbar.showMessage(\"Serial not initialized\")\n else:\n self.serial.write(b\"ok\")\n self.on_breakpoint = False", "def breakpoint(self):\n return None", "def debugger_enable_all_breakpoints():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve yaml data from a given path if file not exist, return False
def get_yaml_data(path): yaml_path = "%s%s.yml" % (CONTENT_FILE_DIR, path[:-5]) if os.path.isfile(yaml_path): f = open(yaml_path, 'r') template_data = yaml.load(f) return template_data else: return False
[ "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n return {}", "def load(key, path='farmboy.yaml'):\n try:\n doc = yaml.load(open(path))\n return doc.get(key, None)\n except IOError:\n # log.warn('No file found at %s' % path)\n return None", "def yaml_file_must_exist(cls, v: pathlib.Path):\n if not v.exists():\n raise ValueError(f\"Path object not found in filesystem : {v}\")\n return v", "def _read(path: str): # pragma no cover\n with open(path, \"r\") as f:\n return yaml.full_load(f)", "def load_yaml(self, file_name, path_of_file=None):\n if path_of_file is None:\n path_of_file = self.config_dir\n\n print(path_of_file)\n print(file_name + '.yaml')\n with open(sim_framework_path(path_of_file, file_name + '.yaml'), 'r') as stream:\n data = yaml.safe_load(stream)\n return data", "def tryLoadMeta(self, filePath):\n with open(filePath, 'rb') as f:\n firstLine = f.readline()\n if firstLine and firstLine.rstrip() == b'---':\n header = []\n line = None\n for line in f:\n line = line.rstrip()\n if line == b'---':\n break\n header.append(line)\n if line != b'---':\n return\n header = b'\\n'.join(header)\n try:\n return yaml.load(header)\n except Exception:\n return", "def read_exercise_yaml(path_yaml):\n exer_dict = {}\n with open(path_yaml, 'r') as stream:\n try:\n exer_dict = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return exer_dict", "def yaml_loader(filepath):\n with open(filepath, \"r\") as file_descriptor:\n data = yaml.load(file_descriptor)\n return data", "def parse(self):\n if not self._path or not os.path.isfile(self._path):\n raise YAMLParserError(f\"File {self._path} does not exist!\")\n\n data = yaml.load(open(self._path), Loader=yaml.Loader)\n\n if YAMLParser.check_exists(self._mode, data):\n logging.info(f\"Runner YML file {self._path} loaded, will run in mode:{self._mode}\")\n\n return data", "def pickle_load(path):\n if os.path.isfile(path):\n file = pickle.load(open(path, \"rb\"))\n return file\n else: \n return False", "def _load_yaml_object(self, fPath):\n L = logging.getLogger('lamia.templates')\n with open(fPath) as f:\n try:\n content = yaml.load(f)\n mt = os.path.getmtime(fPath)\n except Exception as e:\n L.exception(e)\n L.error('... while processing \"%s\". File ignored.'%fPath)\n else:\n if type(content) is not dict or 'template' not in content.keys():\n L.warning( 'No \"template\" field found in %s. Ignored.'%fPath )\n return None\n else:\n L.debug( 'template %s loaded.'%fPath )\n return content, mt\n return None", "def __open_yml_file(path_to_yml_file: str):\n\n yaml_content = None\n\n with open(path_to_yml_file, 'r', encoding='utf8') as stream:\n try:\n yaml_content = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(\"could not read yml file '\" + str() + \"'...\\n\" + str(exc) + \"...\")\n\n return yaml_content", "def read_yaml(file_path: Union[str, Path], section: Optional[str]) -> Any:\n with open(file_path, \"r\") as f:\n yaml_content = yaml.safe_load(f)\n if not section:\n return yaml_content\n else:\n try:\n return yaml_content[section]\n except KeyError:\n logging.error(f\"Section {section} not found in config file. Please check.\")\n raise", "def load():\n print(\"Attempting to load data.\")\n\n file_name = get_input()\n loaded = False\n\n while loaded is False:\n try:\n with open(file_name) as file:\n data = yaml.safe_load(file.read())\n loaded = True\n except (IOError, FileNotFoundError, ValueError):\n print(\"ERROR: Invalid file name: \" + file_name)\n print(\"Please try a different file name.\")\n file_name = get_input()\n\n print(\"\")\n print(\"Load successful.\")\n print(\"\")\n\n return data", "def load_local_paths(path):\n\n with open(path, 'r') as f:\n try:\n local_paths = yaml.safe_load(f)\n except yaml.YAMLError as err:\n print(err)\n return None\n\n return local_paths", "def loadDict(yamlfile):\n try:\n with open(yamlfile, 'r') as fichier:\n yamlcontenu = yaml.safe_load(fichier)\n return(yamlcontenu)\n except IOError:\n print(\"Impossible d'ouvrir le fichier \", fichier.name)\n sys.exit(1)", "def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None", "def load(path=\".travis.yml\"):\n if not path:\n path = \".travis.yml\"\n with open(path, 'r') as stream:\n return yaml.load(stream)", "def parse_yaml(file_path, data):\n path, name = os.path.split(file_path)\n env = Environment(loader=FileSystemLoader(path))\n template = env.get_template(name)\n cont = template.render(data)\n parsed_yaml = yaml.safe_load(cont)\n return parsed_yaml" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try and determine the correct _ (underscore) template matching the files directory structure
def determine_template_by_path(path): path = path.lstrip('/') path_chunks = re.split('\/', path) if len(path_chunks) <= 1: return path else: """ For now be ignorant and just return the first entry of the list as the possible template name, so in fact we only have a 1 level deep structure """ return '_%s.html' % path_chunks[0]
[ "def find_custom_template(args):\n for arg in args:\n if os.path.isdir(arg):\n dirlist = os.listdir(arg)\n if \"custom.html\" in dirlist:\n return os.path.join(arg, \"custom.html\")\n elif \"custom.jinja\" in dirlist:\n return os.path.join(arg, \"custom.jinja\")", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)", "def find_template_filename(self, template_name):\n\n def next_file():\n filename = self.path / template_name\n yield filename\n try:\n exts = self.default_file_extensions\n except AttributeError:\n return\n\n strfilename = str(filename)\n for ext in exts:\n yield Path(strfilename + ext)\n\n for filename in next_file():\n if filename.is_file():\n return filename", "def upy_templates():\n PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))\n return os.path.join(PROJECT_PATH, 'templates')", "def upy_tpl():\n PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))\n return os.path.join(PROJECT_PATH, 'contrib/tree/tpl')", "def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)", "def _get_templates():\n SRC = _ARGS['SRC']\n TEMPLATE = _ARGS['TEMPLATE']\n \n templates = []\n files = list_files(SRC)\n for filename in files:\n name, extension = os.path.splitext(filename)\n if extension == TEMPLATE:\n templates.append(name + extension)\n if len(templates) == 0:\n raise Exception('No template \\'%s\\' files found.\\n\\t%s\\n\\t%s' %\n (TEMPLATE, SRC, files))\n return templates", "def match_template_filename(project, filename):\n name, ext = os.path.splitext(os.path.basename(filename))\n #FIXME: is the test for matching extension redundant?\n if ext == os.path.extsep + project.get_template_filtetype():\n if ext != os.path.extsep + project.localfiletype:\n # template extension is distinct, surely file is a template\n return True\n elif not find_lang_postfix(filename):\n # file name can't possibly match any language, assume it is a template\n return True\n return False", "def _get_project_template(self): # suppress(no-self-use)\n parent = os.path.realpath(os.path.join(os.path.dirname(__file__),\n \"..\"))\n assert \"sample\" in os.listdir(parent)\n assert project_type in os.listdir(os.path.join(parent, \"sample\"))\n return os.path.join(parent, \"sample\", project_type)", "def _find_relative(self, spec):\n if spec.template_rel_path is not None:\n return os.path.split(spec.template_rel_path)\n # Otherwise, determine the file name separately.\n\n locator = self.loader._make_locator()\n\n # We do not use the ternary operator for Python 2.4 support.\n if spec.template_name is not None:\n template_name = spec.template_name\n else:\n template_name = locator.make_template_name(spec)\n\n file_name = locator.make_file_name(template_name, spec.template_extension)\n\n return (spec.template_rel_directory, file_name)", "def _on_template_names(self, controller, templates):\n\n controller, prefix, action, ext = self.controller.route.name, self.controller.route.prefix, self.controller.route.action, self.controller.meta.view.template_ext\n\n # Try the prefix template first\n if prefix:\n templates.append('scaffolding/%s_%s.%s' % (prefix, action, ext))\n\n # Then try the non-prefix one.\n templates.append('scaffolding/%s.%s' % (action, ext))", "def template_path(self):\n return super().template_path+[os.path.join(os.path.dirname(__file__), \"templates\")]", "def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'", "def chooseTemplate(templatesPath):\n templates = []\n i = 0\n for subdir, dirs, files in os.walk(templatesPath):\n for file in files:\n if file.endswith('.npy'):\n filepath = os.path.join(subdir, file)\n templates.append([subdir, file])\n i+=1\n print('['+str(i)+'] '+ file)\n if not i:\n print('No template found, closing script...')\n exit()\n choice = intInput('Which template do you want to use? (1-'+str(i)+')', 1, i)\n filepath = os.path.join(templates[choice-1][0], templates[choice-1][1])\n name = templates[choice-1][1]\n return np.load(filepath), name", "def test_filesystem_loader(self):\n\n self.assertEqual(\n list(\n template_finder.templates_for_engine({\n 'BACKEND': 'django.templates.backends.django.Djangotemplate.',\n 'APP_DIRS': False,\n 'DIRS': ['/tmp/project/templates/', '/tmp/project/other_templates/']\n })\n ),\n [\n ('base.html', '/tmp/project/templates/base.html'),\n ('foo/bar.html', '/tmp/project/templates/foo/bar.html'),\n ('baz.html', '/tmp/project/other_templates/baz.html'),\n ]\n )", "def get_dotted_filename(self, template_name, template_extension='.html'):\n try:\n return self.__cache[template_name]\n except KeyError:\n # the template name was not found in our cache\n divider = template_name.rfind('.')\n if divider >= 0:\n package = template_name[:divider]\n basename = template_name[divider + 1:] + template_extension\n try:\n result = resource_filename(package, basename)\n except ImportError as e:\n raise DottedFileLocatorError(str(e) +\". Perhaps you have forgotten an __init__.py in that folder.\")\n else:\n result = template_name\n\n self.__cache[template_name] = result\n\n return result", "def get_output_filename(match_name, template_filename):\n base_name = template_filename.split('.j2')[0]\n return base_name.replace('template', match_name.lower())", "def find_template_file(template_name):\n for loader in _get_template_loaders():\n for origin in loader.get_template_sources(template_name):\n path = getattr(origin, 'name', origin) # Django <1.9 compatibility\n if os.path.exists(path):\n return path\n raise TemplateDoesNotExist(template_name)", "def find_template_file(template_name):\n for loader in _get_template_loaders():\n for origin in loader.get_template_sources(template_name, None):\n path = getattr(origin, 'name', origin) # Django <1.9 compatibility\n if os.path.exists(path):\n return path\n raise TemplateDoesNotExist(template_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
constructor instantiate a Document with a term_list to be converted into dict
def __init__(self, term_list, links=[]): # do type check if not isinstance(term_list, list): raise TypeError('term_list must be of type list') if not isinstance(links, list): raise TypeError('links must be of type list') self.term_dict = {x: term_list.count(x) for x in term_list} self.links = copy.deepcopy(links)
[ "def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def __init__(self, main_doc):\n\t\tif not isinstance(main_doc, Document):\n\t\t\traise TypeError('term must be of type Document')\n\t\tself.main_doc = main_doc\n\t\tself.env_docs = []", "def _create_doc_dict_from_string(self, list_of_docs_string):\n self.doc_dict = {}\n\n for string in list_of_docs_string:\n string = string.split()\n self.doc_dict[string[0]] = string[1:]", "def __init__(self, description, corpus_documents):\n self.description = description\n self.documents = corpus_documents\n self.doc_map = {}\n for doc in self.documents:\n self.doc_map[doc.identifier] = doc", "def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indices = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indices = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indices = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_urls = doc_as_list[12]\n retweet_quoted_indices = doc_as_list[13]\n\n term_dict = {}\n entities_dict={}\n tokenized_text = []\n \"\"\"--------parse url and quote--------\"\"\"\n if len(url) > 2:\n url_finished = str(self.parse_url(url))\n returned_token = self.check_url(url_finished) # check if the term is url\n check_spec = '2019'\n if check_spec in returned_token:\n returned_token.remove(check_spec)\n\n if len(returned_token) > 0:\n tokenized_text.extend(returned_token)\n else:\n tokenized_text.append(returned_token)\n\n to_insert_list_dict = self.parse_sentence(full_text)\n tokenized_text += to_insert_list_dict[0]\n for key in to_insert_list_dict[1]:\n if key in entities_dict:\n entities_dict[key] = entities_dict[key] + to_insert_list_dict[1][key]\n else:\n entities_dict[key] = to_insert_list_dict[1][key]\n\n if quote_text != None and len(quote_text) > 2:\n to_insert_list_dict = self.parse_sentence(quote_text)\n for term_from_quote in to_insert_list_dict[0]:\n if term_from_quote in tokenized_text:\n to_insert_list_dict[0].remove(term_from_quote)\n tokenized_text += to_insert_list_dict[0]\n for key in to_insert_list_dict[1]:\n if key in entities_dict:\n entities_dict[key] = entities_dict[key] + to_insert_list_dict[1][key]\n else:\n entities_dict[key] = to_insert_list_dict[1][key]\n\n doc_length = len(tokenized_text) # after text operations.\n\n for term in tokenized_text:\n\n if len(term) == 0 or term.lower() in self.stop_words or self.isAscii(term) == False or (term.isdigit() and len(term) > 15):\n continue\n\n\n if len(term) == 0:\n continue\n\n\n term_dict = self.upperCase_handler(term, term_dict)\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, entities_dict, doc_length)\n\n return document", "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def parse_doc(self, doc_as_list):\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n tweet_date_obj = datetime.strptime(tweet_date, '%a %b %d %X %z %Y')\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n retweet_url = doc_as_list[6]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_urls = doc_as_list[12]\n term_dict = {}\n\n tokenized_text = []\n # parse all urls\n urls = self.get_urls([url, retweet_url, quote_url, retweet_quoted_urls])\n for (key, value) in urls.items():\n if value:\n domain = self.url_parse(value)\n if domain:\n tokenized_text += domain\n\n all_texts = self.get_texts([full_text, quote_text, retweet_quoted_text])\n # remove urls from the text\n all_texts = self.url_removal_pattern.sub('', all_texts)\n\n tokenized_text, entities_set, small_big = self.parse_sentence(all_texts)\n unique_terms = set(tokenized_text)\n doc_length = len(tokenized_text) # after text operations.\n\n max_tf = 1\n # save only tf for each term in tweet\n for index, term in enumerate(tokenized_text):\n if term not in term_dict:\n term_dict[term] = 1\n\n else:\n term_dict[term] += 1\n if term_dict[term] > max_tf:\n max_tf = term_dict[term]\n\n self.total_len_docs += doc_length\n self.number_of_documents += 1\n # TODO - check if we need to save tokenized_text\n document = Document(tweet_id, max_tf, entities_set, small_big, unique_terms, tweet_date_obj, term_dict, doc_length)\n\n return document", "def __init__(self, *terms, **kwargs):\n self.missing = kwargs.pop('_key_missing_', False)\n if terms and kwargs:\n raise ValueError(\"You must specify terms or kwargs, not both\")\n self.terms = []\n for t in terms:\n self.add_term(t)\n self.add_term(kwargs)", "def create_corpus(self): \n\n\n self.dictionary = gensim.corpora.Dictionary(self.data)\n self.dictionary.filter_extremes( no_below= 15 ,no_above=0.5, keep_n=100000)\n self.bag_of_words = [self.dictionary.doc2bow(document=document) for document in self.data]\n self.tfidf = models.TfidfModel(self.bag_of_words)\n self.corpus_tfidf = self.tfidf[self.bag_of_words]", "def _to_solr_document(document):\n solr_doc = collections.defaultdict(list)\n solr_doc['id'] = document.doc_id\n solr_doc['rank'] = document.rank\n solr_doc['language'] = document.language or ''\n\n for field in document.fields:\n\n lang_suffix = ''\n lang = field.language or document.language\n if lang in SUPPORTED_LANGUAGES:\n lang_suffix = '_{}'.format(lang)\n elif lang is not None:\n logger.warning('Language \"{}\" is not supported'.format(lang))\n\n if field.type == Field.Type.TEXT:\n solr_field_name = '{}_{}{}'.format(field.name, 'txt', lang_suffix)\n solr_doc[solr_field_name].append(field.value)\n elif field.type == Field.Type.HTML:\n raise InvalidRequest('Indexing HTML fields is not supported yet')\n elif field.type == Field.Type.ATOM:\n solr_field_name = '{}_{}'.format(field.name, 'atom')\n solr_doc[solr_field_name].append(field.value)\n elif field.type == Field.Type.NUMBER:\n solr_field_name = '{}_{}'.format(field.name, 'number')\n solr_doc[solr_field_name].append(field.value)\n elif field.type == Field.Type.DATE:\n # A single GAE date field goes as two Solr fields.\n # <field_name>_date is DateRange field which is used for queries\n solr_field_name = '{}_{}'.format(field.name, 'date')\n datetime_str = field.value.strftime('%Y-%m-%dT%H:%M:%SZ')\n solr_doc[solr_field_name].append(datetime_str)\n # <field_name>_date_ms is integer field which is used for sorting\n solr_field_name = '{}_{}'.format(field.name, 'date_ms')\n datetime_ms = int(time.mktime(field.value.timetuple()) * 1000)\n solr_doc[solr_field_name].append(datetime_ms)\n elif field.type == Field.Type.GEO:\n solr_field_name = '{}_{}'.format(field.name, 'geo')\n geo_str = '{},{}'.format(field.value[0], field.value[1])\n solr_doc[solr_field_name].append(geo_str)\n else:\n raise UnknownFieldTypeException(\n \"A document contains a field of unknown type: {}\".format(field.type)\n )\n\n for facet in document.facets:\n if facet.type == Facet.Type.ATOM:\n # A single GAE facet goes as two Solr fields.\n # <field_name>_atom_facet_value stores original value (not indexed).\n solr_field_name = '{}_{}'.format(facet.name, 'atom_facet_value')\n solr_doc[solr_field_name].append(facet.value)\n # <field_name>_atom_facet stores lowercased value (indexed).\n solr_field_name = '{}_{}'.format(facet.name, 'atom_facet')\n solr_doc[solr_field_name].append(facet.value.lower())\n elif facet.type == Facet.Type.NUMBER:\n solr_field_name = '{}_{}'.format(facet.name, 'number_facet')\n solr_doc[solr_field_name].append(facet.value)\n else:\n raise UnknownFacetTypeException(\n \"A document contains a facet of unknown type: {}\".format(facet.type)\n )\n\n return solr_doc", "def from_docdict(cls, docdict, dbinterface):\n ob = cls.__new__(cls)\n ob.__init__(dbi=dbinterface, **docdict)\n return ob", "def _extract_term_index(self):\n terms = {}\n links = {}\n for (uid, doc) in self._docmap.items():\n if uid.is_function():\n if uid.module() is None: continue # ouch.\n link = Link(uid.name(), uid.module())\n elif uid.is_method():\n if uid.cls() is None: continue # ouch.\n link = Link(uid.name(), uid.cls())\n else:\n link = Link(uid.name(), uid)\n\n # Get index items from standard fields.\n for field in self._standard_field_values(doc):\n self._get_index_terms(field, link, terms, links)\n if doc.descr():\n self._get_index_terms(doc.descr(), link, terms, links)\n\n # Get index items from object-specific fields.\n if isinstance(doc, ModuleDoc):\n for var in doc.variables():\n self._get_index_terms(var.descr(), link, terms, links)\n self._get_index_terms(var.type(), link, terms, links)\n elif isinstance(doc, ClassDoc):\n for var in doc.ivariables() + doc.cvariables():\n self._get_index_terms(var.descr(), link, terms, links)\n self._get_index_terms(var.type(), link, terms, links)\n elif isinstance(doc, FuncDoc):\n params = doc.parameter_list()\n if doc.returns(): params.append(doc.returns())\n for param in params:\n self._get_index_terms(param.descr(), link, terms, links)\n self._get_index_terms(param.type(), link, terms, links)\n for fraise in doc.raises():\n self._get_index_terms(fraise.descr(), link, terms, links)\n\n # Combine terms & links into one list\n keys = terms.keys()\n keys.sort()\n return [(k, terms[k], links[k]) for k in keys]", "def index_document(self, document):\r\n\r\n terms = document['text'].split()\r\n appearances_dict = dict()\r\n # Dictionary with each term and the frequency it appears in the text.\r\n #these next 13 lines are heavily based on this website; i have never used python before and this model allowed me to conceptualize how to interact with our index\r\n #appearance objects contain {docId, frequency} for each instance of a documentId key pair\r\n for term in terms:\r\n term_frequency = appearances_dict[term].frequency if term in appearances_dict else 0\r\n appearances_dict[term] = Appearance(document['id'], term_frequency + 1)\r\n\r\n # Update the inverted index\r\n update_dict = {key: [appearance]\r\n if key not in self.index\r\n else self.index[key] + [appearance]\r\n for (key, appearance) in appearances_dict.items()}\r\n self.index.update(update_dict)\r\n # Add the document into the database\r\n self.db.add(document)\r\n return document", "def create(init_document: 'Document') -> 'DocumentArray':", "def __init__(self, *args, **kwargs):\n self.document_links = []\n self.document_date_map = defaultdict(list)\n super().__init__(*args, **kwargs)", "def make_document_term_matrix(token_list):\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n j_indices = []\n \"\"\"Construct an array.array of a type suitable for scipy.sparse indices.\"\"\"\n indptr = array.array(str(\"i\"))\n values = array.array(str(\"i\"))\n indptr.append(0)\n\n for tokens in token_list:\n feature_counter = {}\n for token in tokens:\n feature_idx = vocabulary[token]\n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n vocabulary = dict(vocabulary)\n j_indices = np.asarray(j_indices, dtype=np.intc)\n indptr = np.frombuffer(indptr, dtype=np.intc)\n values = np.frombuffer(values, dtype=np.intc)\n\n X = scipy.sparse.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.int64)\n X.sort_indices()\n return X, vocabulary", "def __init__(self, documents):\n Classifier.__init__(self, documents)\n documents = set(documents)\n term_document_matrix = TermDocumentMatrix(documents, compute_word_vectors=False, compute_document_vectors=False)\n self.vocabulary = set(term_document_matrix.vocabulary)\n self.tree = self.get_tree(documents, self.vocabulary)", "def index(self,docs):\n\n\t\t# invertList is a dict where key is term, and value is a list of InvertListItems\n\t\tindexList = {}\n\t\t# for each document in documents\n\t\tfor doc in docs:\n\t\t\tdocId = doc.getDocId()\n\t\t\tdocLen = doc.getDocLen()\n\t\t\t# get term,tf mapping in this doc\n\t\t\tterms = doc.getTermDict()\n\t\t\t# for each term\n\t\t\tfor t,tf in terms.iteritems():\n\t\t\t\tif not indexList.has_key(t):\n\t\t\t\t\tindexList[t] = []\t\n\t\t\t\tindexList[t].append([docId,docLen,tf])\n\n\t\treturn indexList", "def __init__(self):\n self._word_dict = {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
init Construct a DocumentSet with main document
def __init__(self, main_doc): if not isinstance(main_doc, Document): raise TypeError('term must be of type Document') self.main_doc = main_doc self.env_docs = []
[ "def create(init_document: 'Document') -> 'DocumentArray':", "def __init__(self, description, corpus_documents):\n self.description = description\n self.documents = corpus_documents\n self.doc_map = {}\n for doc in self.documents:\n self.doc_map[doc.identifier] = doc", "def build_document(self):\n pass", "def __init__(self, documents):\n self.documents = documents\n self.classes = self.get_classes()", "def document():\n document = Document()\n return document", "def new_document(self) -> nodes.document:\n document = super().new_document()\n document.__class__ = addnodes.document # replace the class with patched version\n\n # substitute transformer\n document.transformer = SphinxTransformer(document)\n document.transformer.set_environment(self.settings.env)\n\n # substitute reporter\n reporter = document.reporter\n document.reporter = LoggingReporter.from_reporter(reporter)\n\n return document", "def init_document(self, _, value):\n document = IamPolicy.Document(value)\n self.init_default_attr(\"document\", document)", "def __init__(self, document):\n\n self._settemplates(self.onecol, self.twocol)\n assert document.type_key == 'cim.2.designing.Project'\n self.doc = document\n\n # We will populate the \"mip\" variable with the mip era\n self.mips = 'CMIP6'\n\n self.related = []\n for r in self.doc.required_experiments:\n self.related.append(esd.retrieve(r.id))", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def __init__(self, documents):\n Classifier.__init__(self, documents)\n documents = set(documents)\n term_document_matrix = TermDocumentMatrix(documents, compute_word_vectors=False, compute_document_vectors=False)\n self.vocabulary = set(term_document_matrix.vocabulary)\n self.tree = self.get_tree(documents, self.vocabulary)", "def document_set_from_documents(documents: List[Document]) -> DocumentSet:\n return DocumentSet(\n results=documents, metadata=metadata_from_documents(documents)\n )", "def __init__(self, *args, **kwargs):\n self.document_links = []\n self.document_date_map = defaultdict(list)\n super().__init__(*args, **kwargs)", "def setUp(self):\n self.docCounter = 0", "def __init__(self, *args):\n this = _libsbml.new_SBMLDocument(*args)\n try: self.this.append(this)\n except: self.this = this", "def set_doc(self, doc):\n assert self.doc is None\n self._doc = doc", "def __init__(self, docs, **kargs): # [todo]\n self.cohort = kargs.get('cohort', None)\n self.nDoc = len(docs)\n\n return", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def __init__(self, *args):\n this = _libsbml.new_MultiSBMLDocumentPlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def copy(self):\n\n a_subcorpus = subcorpus(self.ontonotes, self.physical_root_dir, cursor=\"a fake cursor\", old_id=self.id)\n\n if self.file_hash:\n for extension in self.file_hash:\n a_subcorpus.file_hash[extension] = self.file_hash[extension][:]\n\n return a_subcorpus" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add Env Page append a new env_page to env_docs
def add_env_page(self, env_page): if not isinstance(env_page, Document): raise TypeError('env_page must be of type Document') self.env_docs.append(env_page)
[ "def addPage(self, name, page, **attrs):\n page.globalConfig = self.globalConfig\n page.pageConfig['pageName'] = name\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n setattr(self,name,page) # Link page into page tree (for CherryPy)", "def make_pages():\n\n # Create output folder, copy static files\n if os.path.exists(OUT_FOLDER):\n shutil.rmtree(OUT_FOLDER)\n os.mkdir(OUT_FOLDER)\n os.mkdir(os.path.join(OUT_FOLDER, HTML_FOLDER))\n shutil.copytree(STATIC_FOLDER_ABS, os.path.join(OUT_FOLDER, STATIC_FOLDER))\n\n env = Environment(\n loader=PackageLoader('mod'),\n autoescape=select_autoescape(['html', 'xml']),\n )\n\n env.filters['extract_url'] = extract_url\n\n with urllib.request.urlopen(PROVIDERS_URL) as url_response:\n providers = json.load(url_response)['data']\n\n last_check_time = datetime.datetime.utcnow().strftime(\"%A %B %d, %Y at %H:%M UTC\")\n\n all_provider_data = []\n # Create HTML view for each provider\n for provider in providers:\n if CHECK_PROVIDERS is not None and provider['id'] not in CHECK_PROVIDERS:\n continue\n provider_data = {\n 'id': provider['id'],\n 'last_check_time': last_check_time\n }\n print(\" - {}\".format(provider['id']))\n\n subpage = os.path.join(HTML_FOLDER, get_html_provider_fname(provider['id']))\n subpage_abspath = os.path.join(OUT_FOLDER, subpage)\n\n provider_data['subpage'] = subpage\n provider_data['attributes'] = provider['attributes']\n\n if provider['attributes'].get('base_url') is None:\n provider_data['index_metadb'] = {\n 'state': \"unspecified\",\n 'tooltip_lines': [\"The provider did not specify a base URL for the Index Meta-Database\"],\n 'color': \"dark-gray\"\n }\n else:\n provider_data['index_metadb'] = {}\n try:\n index_metadb_data = get_index_metadb_data(provider['attributes']['base_url'])\n provider_data['index_metadb'] = index_metadb_data\n except Exception:\n provider_data['index_metadb'] = {\n 'state': \"unknown\",\n 'tooltip_lines': \"Generic error while fetching the data:\\n{}\".format(traceback.format_exc()).splitlines(),\n 'color': \"orange\"\n }\n\n # Write provider html\n provider_html = env.get_template(\"singlepage.html\").render(**provider_data)\n with open(subpage_abspath, 'w') as f:\n f.write(provider_html)\n all_provider_data.append(provider_data)\n print(\" - Page {} generated.\".format(subpage))\n\n all_data = {}\n all_data['providers'] = sorted(all_provider_data, key=lambda provider: provider['id'])\n all_data['globalsummary'] = {\n 'with_base_url': len([provider for provider in providers if provider.get('attributes', {}).get('base_url') is not None]),\n 'num_sub_databases': sum([provider_data.get('index_metadb', {}).get('num_non_null_subdbs', 0) for provider_data in all_provider_data])\n }\n\n # Write main overview index\n print(\"[main index]\")\n rendered = env.get_template(\"main_index.html\").render(**all_data)\n outfile = os.path.join(OUT_FOLDER, 'index.html')\n with open(outfile, 'w') as f:\n f.write(rendered)\n print(\" - index.html generated\")", "def add_page(self,**app_names_and_pages):\n \n for app,pages in app_names_and_pages.items():\n if os.path.exists(os.path.join(self._main,app)):\n for page in pages:\n os.makedirs(os.path.join(self._main,app,page))\n self._create_init_routes(self._main,app,page)\n else:\n print(\"that app does not exist\")\n\n self._update_add_app_or_page()", "def add_page(self, page): \n self.pages.append(Page(page))", "def AddPage(self, page):\r\n wx.lib.agw.flatnotebook.FlatNotebook.AddPage(self, page, page.GetTitle())", "def addPage(self, page):\n self._addPage(page, list.append)", "def append_page(self, page: Page) -> \"Document\": # type: ignore [name-defined]\n return self.insert_page(page)", "def register_jinja_env(app):\n app.jinja_env.globals.update({\n 'timeago': lambda x: arrow.get(x).humanize(),\n 'url_for_other_page': url_for_other_page,\n})", "def addPage(self, page):\n #page.buildstream()\n pos = len(self.objects) # work out where added\n\n page.ParentPos = 3 #pages collection\n page.info = {\n 'parentpos': 3,\n 'fontdict': self.fontdict,\n 'contentspos': pos + 2,\n }\n\n self.PageCol.PageList.append(pos + 1)\n self.add('Page%06d' % len(self.PageCol.PageList), page)\n #self.objects.append(page)\n self.add('PageStream%06d' % len(self.PageCol.PageList), page.stream)\n #self.objects.append(page.stream)", "def env_created():", "def create_page(self):", "def add_experience(doc, experience):\n doc = add_title(doc, \"EXPERIENCE PROFESSIONNELLE\")\n for exp in experience :\n # ADD JOB TITLE LINE\n p = doc.add_paragraph(exp['title'])\n p.paragraph_format.space_before = Cm(0.2)\n p.paragraph_format.space_after = Cm(0)\n p.paragraph_format.line_spacing = 1\n for run in p.runs:\n run.bold = True\n \n # ADD COMPANY NAME, ADRESS, DATES\n p = doc.add_paragraph(exp['company'] + ', '+ (9*\"\\t\").join([exp['site'], exp['date']]))\n p.paragraph_format.line_spacing = 1\n p.paragraph_format.space_before = Cm(0) \n p.paragraph_format.space_after = Cm(0) \n for run in p.runs:\n run.italic = True\n \n # ADD CONTENT\n if exp['text'] != \"\" :\n p = doc.add_paragraph(exp['text'])\n p.paragraph_format.line_spacing = 1\n \n # ADD BULLETPOINTS\n for task in exp['tasks'] :\n p = doc.add_paragraph(task, style='List Bullet')\n p.paragraph_format.line_spacing = 1\n p.paragraph_format.space_before = Cm(0)\n p.paragraph_format.space_after = Cm(0)\n \n doc.add_paragraph('Environnement : ' + ', '.join(exp['Environnement']))\n\n return doc", "def addLink(self, name, alias, **attrs):\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n self.globalConfig.pageAttributes[name]['alias'] = alias", "def render_env(self):\n\n con = {}\n con[\"env_vars\"] = [field.get_env_line() for field in self.env_profile.envfield_set.all()]\n\n\n for soft_config in [config.get_env_line() for pp in self.used_pips.all() if pp.soft_config for config in pp.soft_config.envfield_set.all()]:\n con[\"env_vars\"].append(soft_config)\n return render_to_string(\"env_example_template.jinja\",con)", "def add_page_arg(args, page):\n if args is None:\n args = {}\n\n args['page'] = page\n return args", "def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')", "def add_to_pr_export(self, exp_template):", "def new_page(self, page):\n pass", "def _make_version_page(self, pages):\n from pesummary._version_helper import install_path\n\n html_file = webpage.open_html(\n web_dir=self.webdir, base_url=self.base_url, html_page=\"Version\"\n )\n html_file = self.setup_page(\n \"Version\", self.navbar[\"home\"], title=\"Version Information\"\n )\n html_file.make_banner(approximant=\"Version\", key=\"Version\")\n contents = __version_string__\n contents += install_path(return_string=True)\n for i in self.labels:\n contents = (\n \"# {} version information\\n\\n{}_version={}\\n\\n\".format(\n i, i, self.file_versions[i]\n )\n ) + contents\n html_file.make_container()\n styles = html_file.make_code_block(language='shell', contents=contents)\n with open('{0:s}/css/Version.css'.format(self.webdir), 'w') as f:\n f.write(styles)\n html_file.end_container()\n packages = self.package_information[\"packages\"]\n style = \"margin-top:{}; margin-bottom:{};\"\n if len(packages):\n html_file.make_table(\n headings=[x.title().replace('_', ' ') for x in packages.dtype.names],\n contents=[[pp.decode(\"utf-8\") for pp in pkg] for pkg in packages],\n accordian=False, style=style.format(\"1em\", \"1em\")\n )\n if self.package_information[\"manager\"] == \"conda\":\n html_file.export(\n \"environment.yml\", margin_top=\"1em\", csv=False,\n conda=True\n )\n else:\n html_file.export(\n \"requirements.txt\", margin_top=\"1em\", csv=False,\n requirements=True\n )\n html_file.make_footer(user=self.user, rundir=self.webdir)\n html_file.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count term in environment calculate idf of a term in main doc
def __count_term_in_env(self, term): # type check if not isinstance(term, str): raise TypeError('term must be of type str') total_cnt = float(len(self.env_docs)) + 1.0 if total_cnt == 1.0: return 1.0 cnt = 1.0 for doc in self.env_docs: if term in doc.term_dict: cnt += 1.0 return math.log(total_cnt / cnt)
[ "def _calculate_idf(self, term):\n term = self._ignore_case(term)\n if term not in self._docs:\n return 0\n else:\n num_occ = len(self._docs[term])\n return math.log(self._num_docs/num_occ)", "def term_idf(self, term):\n idf = math.log(2 + self.count_term_distinct_documents(ANY))\\\n - math.log(1 + self.count_term_distinct_documents(term))\n return idf", "def calculate_idf(self, term):\n # to calc idf\n n = self.num_of_docs\n df = self.inverted_idx[term][0]\n idf = math.log10(n / df)\n return idf", "def _calculate_idf(self, term):\n docs = set()\n term = re.sub(r'\\W+', '', term).lower()\n for k, v in self._inv_index.items():\n if k == term:\n docs.add(doc for doc in v)\n score = len(docs)\n if score == 0:\n return 0\n else:\n return math.log((self._num / score))", "def tfidf_term_in_document(self, term, document):\n tf = self.count_term_in_document(term, document)\\\n / self.count_term_in_document(ANY, document)\n idf = math.log(1 + self.count_term_distinct_documents(ANY))\\\n - math.log(1 + self.count_term_distinct_documents(term))\n return tf * idf", "def count_term_in_document(self, term, document):\n doc = self.get_document(document)\n for docterm, value in doc.get_terms():\n if docterm == term:\n return value\n return 0", "def calc_tf_idf(term, doc, inverted_index, doc_term_count):\n return calc_tf(term, doc, inverted_index, doc_term_count) * calc_idf(term, inverted_index)", "def statistic_tfidf(self):\n\t\t# calculate df-idf for all words\n\t\tcount_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict}\n\t\t# sort them by df and idf\n\t\treturn sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)", "def count_terms(equat_orig):\n\tterms = 0\n\tfor pow_group in equat_orig:\n\t\tif pow_group:\n\t\t\tfor _ in pow_group:\n\t\t\t\tterms += 1\n\tprint(f'\\033[1;95mTerms in the polynom: \\033[0m{terms}')", "def tf(self, term, text):\n return text.count(term) / len(text)", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def tfidf(self):\n vocabulary = {}\n term_count = 0\n for instance in self.text:\n for word, tag in zip(instance.words, instance.pos_tags):\n if word not in string.punctuation: # tag in WANT_TAGS:\n term_count += 1\n if word in vocabulary:\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n # print(vocabulary)\n # print(term_count)\n tf_idf = {}\n for word in vocabulary:\n tf = vocabulary[word] / term_count\n df = 0\n for doc in self.text:\n if word in doc.words:\n df += 1\n tf_idf[word] = (1 + math.log10(tf)) * math.log10(self.document_count / df)\n return tf_idf", "def inv_document_frequency(term, document_list):\n num_docs_with_t = len([d for d in document_list if term in d])\n num_docs = len(document_list)\n return log10(num_docs / (1 + num_docs_with_t))", "def num_terms(clade):\t\n return sum(1 for term in clade.get_terminals())", "def freq(word, document):\n return document.count(word)", "def compute_idfs(self):\n dfs = {}\n for bow in self.bag_of_words:\n for termid in bow:\n dfs[termid] = dfs.get(termid, 0) + 1\n self.idfs = {\n termid: df2idf(df, self.num_docs)\n for termid, df in dfs.items()\n }", "def augmented_term_fequency(term,tokens):\n\tterm = processes_and_tokenize(term)[0] #make sure term is in correct form\n\n\tmax_count = max([tokens.count(t) for t in tokens])\n\treturn tokens.count(term)/max_count", "def get_idf(term_postings, N):\n doc_freq = len(term_postings)\n if doc_freq == 0:\n # query term does not occur in ANY document so it has no weight\n return 0\n else:\n return math.log(N / doc_freq, 10)", "def _calculate_term_freq_field(self, term, field_tokens, avg_field_len):\n count_in_field = float(np.sum([int(term == t) for t in field_tokens]))\n return count_in_field / (1 + self.b * ((len(field_tokens) / avg_field_len) - 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Statistic TF calculate and sort terms in main doc by tf
def statistic_tf(self): return sorted(self.main_doc.term_dict.items(), key=operator.itemgetter(1), reverse=True)
[ "def statistic_tfidf(self):\n\t\t# calculate df-idf for all words\n\t\tcount_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict}\n\t\t# sort them by df and idf\n\t\treturn sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)", "def tf(word, document):\n return freq(word,document) / wordCount(document)", "def compute_tfs(self):\n for bow in self.bag_of_words:\n total_fs = sum(bow.values())\n self.tfs.append(\n {termid: fs / total_fs\n for termid, fs in bow.items()})", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def computeTF(self):\n for word in self.dictionary:\n self.dictionary[word].setTF(self.getTotalTerms())", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def tf_score(all_argument_units_tokens: dict):\n words_freq = {}\n for k, tokens in all_argument_units_tokens.items():\n # nlp function returns some weird words like 'educatio' etc which do not exist anywhere in the text\n # and their IDF score cannot be computed\n words = [token.text.lower() for token in tokens\n if token.is_stop is not True and token.is_punct is not True]\n word_count = Counter(words)\n tf_scores = {}\n for w in word_count:\n tf_scores[w] = word_count[w] / len(tokens)\n words_freq[k] = tf_scores\n return words_freq", "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def tfidf(t, h):\n h[0] = h[0].lower()\n t[0] = t[0].lower()\n score = 0\n for word in t:\n word = word.strip()\n if word in h:\n if word in config.doc_freq:\n score += (float(config.total_sentences) - config.word_freq[word]) / config.total_sentences\n else:\n score += 1\n return score", "def rank(weightTerms, termtuples, querylist):\n TOTAL_DOCUMENTS = 55393 # Number of docterm lines\n\n ranklist = {} # { doc: similarity }\n queryfreq = {}\n cosineVals = {} #{ document: [] }\n docset = set()\n\n for x in weightTerms.values(): # determines a set of documents, removing repeats\n for y in x:\n docset.add(y)\n\n for x in querylist: # determines tf for each term in query\n if x not in queryfreq.keys():\n queryfreq[x] = 1\n else:\n queryfreq[x] += 1\n\n for i in set(querylist): # determines tf-idf values for each in the query\n if i not in termtuples.keys():\n termtuples[i] = 1 ####\n if \"MaliaQUERY\" not in cosineVals.keys():\n cosineVals[\"MaliaQUERY\"] = [0]\n pass\n else:\n cosineVals[\"MaliaQUERY\"].append(0)\n pass\n\n elif \"MaliaQUERY\" not in cosineVals.keys():\n cosineVals['MaliaQUERY'] = [queryfreq[i] * math.log( TOTAL_DOCUMENTS/ termtuples[i])]\n else:\n cosineVals[\"MaliaQUERY\"].append(queryfreq[i] * math.log( TOTAL_DOCUMENTS/ termtuples[i]))\n\n for i in set(querylist):\n for x in docset:\n if x not in cosineVals.keys():\n cosineVals[x] = []\n if i not in weightTerms.keys():\n cosineVals[x].append(0)\n elif x not in weightTerms[i].keys():\n cosineVals[x].append(0)\n else:\n cosineVals[x].append(weightTerms[i][x])\n\n\n ### COSINE SIMILARITY\n for i in cosineVals.keys():\n if i == \"MaliaQUERY\":\n pass\n else:\n ranklist[i] = dot(cosineVals[i], cosineVals[\"MaliaQUERY\"])/(norm(cosineVals[i])*norm(cosineVals[\"MaliaQUERY\"]))\n\n return ranklist", "def get_tf(word_lst, vocab):\n count_of_each_word = Counter(word_lst)\n doc_word_count = len(word_lst)\n return np.array([count_of_each_word[v] / doc_word_count if v in count_of_each_word else 0 for v in vocab])", "def tf_sents(doc):\n words = set(word for word in tools.word_iter(doc))\n word_pk = {word: pk for pk, word in enumerate(words)}\n\n vecs = []\n for part in doc:\n for sent in part:\n wordcounter = defaultdict(int)\n for word in sent:\n wordcounter[word] += 1\n\n vec = np.zeros(len(words))\n for word, count in wordcounter.items():\n if word in words:\n vec[word_pk[word]] += count\n vecs.append(vec)\n\n return np.array(vecs)", "def featurize(movies):\n ###TODO\n tf = dict()\n # Dictionary for capturing the unique word \n df = dict()\n temp_vocab = []\n vocab = dict()\n for tokens in movies.tokens:\n # Created token set for holding unique terms for a document and show not contain repeated terms\n token_set = set()\n for token in tokens:\n token_set.add(token)\n if token not in temp_vocab:\n temp_vocab.append(token)\n for token in token_set:\n if( token not in df):\n df[token] = 1\n else:\n df[token] += 1 \n #Capturing the sorted vocabulary\n for v in sorted(temp_vocab):\n pos = len(vocab)\n vocab[v] = pos\n # Array for capturing all the csr_matrix for all features\n movies_features = []\n for tokens in movies.tokens:\n col = []\n row = []\n featureVectData = []\n tf.clear()\n for token in tokens:\n if( token not in tf):\n tf[token] = 1\n else:\n tf[token] += 1\n for token in set(tokens):\n if token in vocab:\n col.append(vocab[token])\n row.append(0)\n #tf(i, d) / max_k tf(k, d) * log10(N/df(i))\n tfidf = tf[token] / max(tf.values()) * math.log10(len(movies) / df[token])\n #print(tfidf)\n featureVectData.append(tfidf)\n #print(\"------------\")\n matrix = csr_matrix((featureVectData,(row,col)),shape=(1,len(vocab)))\n movies_features.append(matrix)\n movies['features'] = pd.Series(movies_features, index = movies.index)\n return movies,vocab", "def get_person_tf(self, person_id):\n doc_ids = self.__elastic.search(person_id, self.CONTENT_FIELD, num=10000).keys()\n\n tf_agg = {}\n for doc_id in doc_ids:\n tv = self.__elastic.get_termvector(doc_id, self.CONTENT_FIELD) # , term_stats=True)\n for t, val in tv.items():\n tf_agg[t] = tf_agg.get(t, 0) + val[\"term_freq\"]\n return tf_agg, len(doc_ids)", "def text2tf(text, add_vocabulary = False, add_Docs = False, vocabulary = None, language = None, feature_frequency = None, nDocs = None): \n if nDocs is None:\n nDocs = 0\n\n # When its a new training sample, its necessary to add new terms to the vocabulary\n if add_vocabulary:\n\n # Initialize the vocabulary, the attributes frequency and the number of documents\n if vocabulary is None:\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n\n if feature_frequency is None:\n feature_frequency = np.zeros((1,1)).astype(int)\n\n # Splits the terms of documents \n tokenized_text = [word_tokenize(row, language=language) for row in text] # tokenized docs\n tokens = set([item for sublist in tokenized_text for item in sublist])\n\n #print(nDocs)\n #a = text[0].split(' ')\n #for token in tokens:\n # for i, b in enumerate(a):\n # if (b == token):\n # a[i] = ''\n #for b in a:\n # if b != '':\n # print(b)\n \n # Add to vocabulary the terms of analyzed documents\n for word in tokens:\n try:\n feature_idx = vocabulary[word]\n except KeyError:\n continue\n\n # Generates the term-frequency array to texts\n vectorizer = skl.feature_extraction.text.CountVectorizer(analyzer = \"word\", tokenizer = None, preprocessor = None, stop_words = None, lowercase = True, binary=False, dtype=np.int32, vocabulary = vocabulary )\n\n tf = vectorizer.transform(text) \n\n if add_Docs:\n\n # Updates the frequency of terms while the vocabulary changes\n # calculates the quantity of new terms/attributes to add the frequency\n if feature_frequency is None:\n new_terms = len(vocabulary)\n \n else:\n new_terms = len(vocabulary) - feature_frequency.shape[1]\n new_features = np.zeros( (1,new_terms) ).astype(int)\n\n # calculates/updates the frequency of attributes\n if feature_frequency is None:\n feature_frequency = (tf != 0).sum(axis=0)\n \n else:\n feature_frequency = np.hstack((feature_frequency,new_features)) + (tf != 0).sum(axis=0)\n nDocs += len(text)\n\n return tf, vocabulary, feature_frequency, nDocs \n else:\n return tf", "def calc_tf_idf(term, doc, inverted_index, doc_term_count):\n return calc_tf(term, doc, inverted_index, doc_term_count) * calc_idf(term, inverted_index)", "def tfidf(self):\n vocabulary = {}\n term_count = 0\n for instance in self.text:\n for word, tag in zip(instance.words, instance.pos_tags):\n if word not in string.punctuation: # tag in WANT_TAGS:\n term_count += 1\n if word in vocabulary:\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n # print(vocabulary)\n # print(term_count)\n tf_idf = {}\n for word in vocabulary:\n tf = vocabulary[word] / term_count\n df = 0\n for doc in self.text:\n if word in doc.words:\n df += 1\n tf_idf[word] = (1 + math.log10(tf)) * math.log10(self.document_count / df)\n return tf_idf", "def tf_idf(self, dictionary):\n #tf and idf are calls to functions of the class Doc to calculate word frequency and inverse df respectively\n tf = self.count(dictionary)\n idf = self.idf(dictionary)\n \n tf_idf_docs = dict()\n \n for doc in self.docs:\n tf_idf_docs[(doc.pid, doc.year, doc.author) ] = \\\n np.log(tf[(doc.pid, doc.year, doc.author)] + 1) * idf\n \n return(tf_idf_docs)", "def calculate_tf_idf(self,doc_token_number,document_count):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n len_of_posting_list=postingsList.length\n idf=document_count/len_of_posting_list\n if postingsList.start_node is None:\n print(\"List has no element\")\n return\n else:\n n = postingsList.start_node\n # Start traversal from head, and go on till you reach None\n while n is not None:\n freq=n.term_frequency\n tf=freq/doc_token_number[n.value]\n tf_idf_value=tf*idf\n n.tf_idf=tf_idf_value\n n = n.next" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Statistic TFIDF calculate and sort terms in main doc by tfidf
def statistic_tfidf(self): # calculate df-idf for all words count_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict} # sort them by df and idf return sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)
[ "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def tfidf(t, h):\n h[0] = h[0].lower()\n t[0] = t[0].lower()\n score = 0\n for word in t:\n word = word.strip()\n if word in h:\n if word in config.doc_freq:\n score += (float(config.total_sentences) - config.word_freq[word]) / config.total_sentences\n else:\n score += 1\n return score", "def tfidf(self):\n vocabulary = {}\n term_count = 0\n for instance in self.text:\n for word, tag in zip(instance.words, instance.pos_tags):\n if word not in string.punctuation: # tag in WANT_TAGS:\n term_count += 1\n if word in vocabulary:\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n # print(vocabulary)\n # print(term_count)\n tf_idf = {}\n for word in vocabulary:\n tf = vocabulary[word] / term_count\n df = 0\n for doc in self.text:\n if word in doc.words:\n df += 1\n tf_idf[word] = (1 + math.log10(tf)) * math.log10(self.document_count / df)\n return tf_idf", "def statistic_tf(self):\n\t\treturn sorted(self.main_doc.term_dict.items(), key=operator.itemgetter(1), reverse=True)", "def tf_idf(self, dictionary):\n #tf and idf are calls to functions of the class Doc to calculate word frequency and inverse df respectively\n tf = self.count(dictionary)\n idf = self.idf(dictionary)\n \n tf_idf_docs = dict()\n \n for doc in self.docs:\n tf_idf_docs[(doc.pid, doc.year, doc.author) ] = \\\n np.log(tf[(doc.pid, doc.year, doc.author)] + 1) * idf\n \n return(tf_idf_docs)", "def computeTFIDF(self):\n for word in self.dictionary:\n numOfAppearance = self.dictionary[word].getDocumentFrequency()\n idf = math.log( (self.MAX_RATING) / (numOfAppearance), 10 )\n self.dictionary[word].setTFIDF(idf)", "def calc_tf_idf(term, doc, inverted_index, doc_term_count):\n return calc_tf(term, doc, inverted_index, doc_term_count) * calc_idf(term, inverted_index)", "def get_tf_idf(term, document, documents):\n\n tf_idf = get_tf(term, document) * get_idf(term, documents)\n\n return round(tf_idf, 5)", "def calculate_tf_idf(self,doc_token_number,document_count):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n len_of_posting_list=postingsList.length\n idf=document_count/len_of_posting_list\n if postingsList.start_node is None:\n print(\"List has no element\")\n return\n else:\n n = postingsList.start_node\n # Start traversal from head, and go on till you reach None\n while n is not None:\n freq=n.term_frequency\n tf=freq/doc_token_number[n.value]\n tf_idf_value=tf*idf\n n.tf_idf=tf_idf_value\n n = n.next", "def create_index_tfidf(lines, numDocuments):\n \n index=collections.defaultdict(list)\n tf=collections.defaultdict(list) #term frequencies of terms in documents (documents in the same order as in the main index)\n df=collections.defaultdict(int) #document frequencies of terms in the corpus\n idf=collections.defaultdict(float)\n with Bar('Creating tf-idf index', max=len(lines)) as bar:\n for key in lines:\n page_id = key \n terms = getTerms(lines[key]) \n\n ## create the index for the **current page** and store it in termdictPage\n ## termdictPage in form ==> { ‘term1’: [currentdoc, [list of positions]], ...,‘termn’: [currentdoc, [list of positions]]}\n\n termdictPage={}\n\n for position, term in enumerate(terms): \n try:\n # if the term is already in the dict append the position to the corrisponding list\n termdictPage[term][1].append(position) \n except:\n # Add the new term as dict key and initialize the array of positions and add the position\n termdictPage[term]=[page_id, array('I',[position])] \n\n #normalize term frequencies\n norm=0\n for term, posting in termdictPage.items(): \n # posting ==> [currentdoc, [list of positions]] \n norm+=len(posting[1])**2\n norm=math.sqrt(norm)\n\n\n #calculate the tf(dividing the term frequency by the above computed norm) and df weights\n for term, posting in termdictPage.items(): \n # append the tf for current term (tf = term frequency in current doc/norm)\n tf[term].append(np.round(len(posting[1])/norm,4)) ## SEE formula (1) above\n #increment the document frequency of current term (number of documents containing the current term)\n df[term] += 1 \n\n #merge the current page index with the main index\n for termpage, postingpage in termdictPage.items():\n index[termpage].append(postingpage)\n\n # Compute idf following the formula (3) above. HINT: use np.log\n bar.next()\n for term in df:\n idf[term] = np.round(np.log(float(numDocuments/df[term])),4)\n \n return (index, tf, df, idf)", "def rank_tfidf(self, dictionary):\n \n docs_tfidf = self.tf_idf(dictionary)\n \n doc_rank = [[key, sum(docs_tfidf[key])] for key in docs_tfidf.keys()]\n \n doc_rank.sort(key=lambda x: x[1], reverse = True)\n \n return(doc_rank) \n #return(np.sort(np.array(doc_rank), axis=0)[::-1])", "def tf_idf(arr):\n \n tf = inverse_term_freq(arr)\n tf['tf-idf'] = tf['tf'] * tf['idf']\n return tf", "def _tfidf(term_frequency: int, document_frequency: int, document_count: int) -> float:\n if term_frequency == 0:\n return 0\n else:\n tf = 1 + np.log(term_frequency)\n idf = np.log(document_count / document_frequency)\n return tf * idf", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def tf_idf(docs, queries, tokenizer):\n\n processed_docs = [d.lower().translate(str.maketrans('','',string.punctuation)) for d in docs]\n tfidf = TfidfVectorizer(stop_words='english', tokenizer=tokenizer)\n tfs = tfidf.fit_transform(processed_docs)\n tfs_query = tfidf.transform(queries)\n return tfs, tfs_query", "def test_tfidf_scorer(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor(tokenizer=tokenizer)\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates)\n self.assertGreater(scores.get('erdogan'), scores.get('damascus'))\n self.assertEqual(scores.get('everywhere'), scores.get('disconnected')) # they appear the same number of times\n self.assertGreater(scores.get('erdogan'), scores.get('threats')) # 'threats' and 'erdogan' appear with the same frequency, but 'threats' has a higher DF", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def nltk_tfidf_vectorize(corpus):\n\n corpus = [\n [token for token in tokenize(doc)]\n for doc in corpus\n ]\n\n # TextCollection - обертывает список документов или корпус. Поддерживает подсчет вхождений,\n # вычисление оценки согласованности (конкордантности), определение устойчивых словосочетаний и\n # непосредственно то, что нам надо - вычисляет tf_idf\n texts = TextCollection(corpus)\n\n for doc in corpus:\n yield {\n term: round(texts.tf_idf(term, doc), 3)\n for term in doc\n }", "def get_tf_idf(propressed_data_file, idfs_file):\r\n #get data from pre_computed idfs and pre-processed full data:\r\n with open(idfs_file, encoding='utf-8', errors='ignore') as f:\r\n #with each line: word<fff>idf_of_word\r\n word_idfs = [(line.split('<fff>')[0], float(line.split('<fff>')[1])) for line in f.read().splitlines()]\r\n\r\n word_idfs = dict(word_idfs)\r\n word_IDs = dict([(word_idfs, index) for index, word_idfs in enumerate(word_idfs)])\r\n word = word_idfs.keys()\r\n\r\n with open(propressed_data_file, encoding='utf-8', errors='ignore') as f:\r\n #with each line: label<fff>file_name<>content\r\n docs = [(line.split('<fff>')[0], line.split('<fff>')[1], line.split('<fff>')[2])\\\r\n for line in f.read().splitlines()]\r\n\r\n #compute data_tf_idf:\r\n data_tf_idfs = []\r\n for doc in docs:\r\n label, id, text = doc\r\n #get set of words in doc:\r\n words = [word for word in text.split() if word in word_idfs]\r\n words_set = list(set(words))\r\n\r\n #and find max_freq in doc:\r\n max_freq = max(words.count(word) for word in words_set)\r\n\r\n #compute doc_tf_idfs of words in doc:\r\n doc_tf_idfs = []\r\n sum_squares = 0.0\r\n\r\n for word in words_set:\r\n word_freq = words.count(word)\r\n word_tf_idf = word_freq*1./max_freq * word_idfs[word]\r\n token = (word_IDs[word], word_tf_idf)\r\n doc_tf_idfs.append(token)\r\n sum_squares += word_tf_idf**2\r\n\r\n doc_tf_idfs_normalize = [str(index) + ':' + str(word_tf_idf/np.sqrt(sum_squares))\\\r\n for index, word_tf_idf in doc_tf_idfs]\r\n\r\n sparse_rep = ' '.join(doc_tf_idfs_normalize)\r\n data_tf_idfs.append(label + '<fff>' + id + '<fff>' + sparse_rep)\r\n\r\n #then write data_tf_idfs to file:\r\n with open('data/test_tf_idfs.txt', 'w') as f:\r\n f.write('\\n'.join(data_tf_idfs))\r\n\r\n print(\"Get TF_IDF Done!!!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the menu and return either None (if an exit key was pressed) or FindTweetMenu.BACK_INDEX
def showAndGet(self): keywords = TerminalInterface.getSearchKeywords() # If user did not enter any keywords, return FindUserMenu.BACK_INDEX if keywords is None: return FindTweetMenu.BACK_INDEX tweetGeneratorMethod = lambda: TweetsTableTools.findTweets( self._connection, keywords) menu = TweetsMenu(self._connection, self._userID, tweetGeneratorMethod, emptyMessage = FindTweetMenu._EMPTY_MESSAGE) choice = menu.showAndGet() if choice == TweetsMenu.BACK_INDEX: return FindTweetMenu.BACK_INDEX return choice
[ "def return_menu(self):\n while True:\n number = pyip.inputNum(\"0. Back to the main menu: \")\n if number == 0:\n # Clean up the console\n self.clear_console()\n # back to the main menu\n self.run()\n else:\n print('Press the number zero to go back')", "def menu_quit():\n return \"Quit\"", "def analysis_menu():\n print()\n print(\"Menu Options:\")\n print(\"-------------\")\n print(\"Enter \\\"B\\\" for book page information\\nEnter \\\"S\\\" for statistical analysis\\nEnter \\\"A\\\" for additional analysis\\nEnter \\\"V\\\" for visualizations\\nEnter \\\"F\\\" to save results to a file\\nEnter \\\"M\\\" to return to main menu\")", "def main_menu():\n print()\n print(\"Menu Options:\")\n print(\"-------------\")\n print(\"Enter \\\"A\\\" for analysis\\nEnter \\\"H\\\" for help\\nEnter \\\"Q\\\" to quit\")", "def action(self,input,session,context):\n index = int(input) - 1\n if index < 0:\n raise IndexError('Menu option can not be less than 1')\n return self.menu_items[index].next_screen", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"n\": (\"Navigate\", self.nav),\n \"d\": (\"Dance\", self.dance),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"s\": (\"Shy\", self.shy),\n \"f\": (\"Follow\", self.follow),\n \"c\": (\"Calibrate\", self.calibrate),\n \"q\": (\"Quit\", self.quit)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def menu(title, msg, options, back=False, back_text=\"Back\", index=True, current=\"\", current_append=\" (current)\", print_func=menu_print, input_func=raw_input):\n\n while True:\n choice = 0\n i = 1\n print_func(\"%s\" % title)\n for m in options:\n append = \"\"\n if current==m:\n append = current_append\n print_func(\"%s. %s%s\" % (i,m,append))\n i += 1\n if back:\n print_func(\"%s. [ %s ]\" % (i, back_text))\n try:\n choice = int(input_func(msg))\n except TypeError:\n continue # loop again\n except ValueError:\n continue\n if choice > 0 and choice <= len(options):\n break\n elif choice == len(options)+1:\n if back:\n return -1 if index else None \n else:\n continue\n if not index:\n if choice != -1:\n return options[choice-1]\n else:\n return None\n return choice-1", "def go_menu(self, window, keycode1, keycode2, text, modifiers):\r\n if keycode1 in [27, 1001]:\r\n self.sm.current = \"menu\"\r\n return True\r\n return False", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values\n # You may change the menu if you'd like to add an experimental method\n menu = {\"n\": (\"Navigate forward\", self.nav),\n \"n2\": (\"Navigate forward\", self.nav_2),\n \"cs\": (\"Cupid Shuffle\", self.cupid_shuffle),\n #\"b\": (\"Break dance\", self.break_dance),\n #\"fc\": (\"Full Count\", self.full_count),\n #\"c\": (\"Calibrate\", self.calibrate),\n #\"s\": (\"Check status\", self.status),\n \"tr\": (\"Test Restore Method\", self.test_restore),\n \"c\": (\"Cruise\", self.cruise),\n \"sc\": (\"Smart Cruise\", self.smart_cruise),\n #\"b\": (\"Left BackUp \", self.back_turn_left),\n \"q\": (\"Quit\", quit_now)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = raw_input(\"\\n----Your selection:----\\n \")\n # activate the item selected\n menu.get(ans, [None, error])[1]()", "def press_enter_to_continue():\n # Wait for user input\n input(f\"{helper.blue_text}Press Enter to go back to the main menu.\\n\")\n ui()", "def exit_menu():\n set_refreshing(False)\n clear_display_text()\n device_state.set_state(STATE_IDLE)", "def go_to_exit(self, _: int = 0) -> None:\n self.current_option = self.last_item_index\n self.draw()", "def go_to_menu(self):\n self.clear_frame()\n Admin(self.frame).admin_main()", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def return_to_main_menu() -> bool:\n choice = get_user_choice(['Return to main menu', 'Move a book to another shelf'],\n '\\nWould you like to return to the main menu or move a book?')\n return True if choice == '1' else False", "def menu():\n print 'Opções:\\n\\t(1) Entrar\\n\\t(2) Cadastrar\\n\\t(3) Sair'\n return raw_input('Digite a opção desejada: ')", "def search_method_menu(self):\n\n print()\n options = {'1': 'Employee Name', '2': 'Keyword', '3': 'Time Spent',\n '4': 'Date', '5': 'Date Range', '6': 'Exit to main menu'}\n\n while True:\n\n for k, v in options.items():\n print(k + \". \" + v)\n\n user_choice = input('\\nPlease enter the number of choice: ').lower().strip()\n\n if user_choice in options.keys():\n return options.get(user_choice)\n else:\n print('\\nInvalid choice! Please try again.\\n')", "def choice_stay_return(self, text, action):\n while True:\n print(\"\"\"\n 0. Back to the main menu\n 1. {}\n \"\"\".format(text))\n choice = pyip.inputNum('Enter a number: ')\n if choice == 0:\n # Clean up the console\n self.clear_console()\n # Gives the options that can be selected in the menu\n self.run()\n elif choice == 1:\n action()\n else:\n print('Please, choose number 0 or 1')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses an index array to obtain indices using an index array along an axis.
def select_indices(arr,index_arr,axis=-1): shape_list=(lambda x,y: [ 1 if dim!=x else y for dim in range(len(arr.shape))] ) indices_list=[np.reshape(np.arange(length),shape_list(length_id,length)) for length_id,length in enumerate(arr.shape)] indices_list[axis]=index_arr return arr.ravel()[np.ravel_multi_index(indices_list,dims=arr.shape)]
[ "def index(dims, axis):\n return intrinsic('index', axis, *dims)", "def pndindex(*args):\r\n return np.ndindex(*args)", "def pndindex(*args):\n return np.ndindex(*args)", "def view_along_axis(arr, indices, axis):\n slices = [slice(None)] * arr.ndim\n slices[axis] = sliceify(indices)\n return arr[tuple(slices)]", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v", "def _indexShape(index, arrayShape):\n # \"when the selection object is not a tuple, it will be referred to as if it\n # had been promoted to a 1-tuple, which will be called the selection tuple\"\n if not isinstance(index, tuple):\n if isinstance(index, list):\n index = tuple(index)\n else:\n index = (index,)\n\n Nnewaxes = len([element for element in index if element is newaxis])\n desiredRank = len(arrayShape) + Nnewaxes\n\n ellipsislen = 1 + desiredRank - len(index)\n\n # \"Exactly one Ellipsis object will be expanded, any other Ellipsis objects\n # will be treated as full slice (':') objects. The Ellipsis object is replaced\n # with as many full slice (':') objects as needed to make the length of the\n # selection tuple N.\"\n expanded = ()\n for element in index:\n if element is Ellipsis:\n expanded += (slice(None, None, None),) * ellipsislen\n ellipsislen = 1\n else:\n expanded += (element,)\n\n if len(expanded) > desiredRank:\n # \"If the length of the selection tuple is larger than N (=X.ndim) an error\n # is raised.\"\n if len(arrayShape) == 0:\n raise IndexError(\"0-d arrays can't be indexed\")\n else:\n raise IndexError(\"invalid index\")\n else:\n # \"If the selection tuple is smaller than N, then as many ':' objects as\n # needed are added to the end of the selection tuple so that the modified\n # selection tuple has length N.\"\n expanded += (slice(None, None, None),) * (desiredRank - len(expanded))\n\n # \"The shape of all the integer indexing arrays must be broadcastable to the\n # same shape\"\n broadcasted = ()\n arrayIndices = ()\n arrayindex = None\n broadcastshape = None\n i = 0\n j = 0\n while i < len(expanded):\n element = expanded[i]\n if element is newaxis or isinstance(element, slice):\n broadcasted += (element,)\n if isinstance(element, slice):\n arrayIndices += (j,)\n else:\n if broadcastshape is None:\n arrayindex = i\n broadcastshape, skip = _compressIndexSubspaces(index=expanded, i=i)\n else:\n # we're only in this branch if indexing subspaces are separated\n # by slice objects, so indexing subspace is broadcast first\n arrayindex = 0\n broadcastshape, skip = _compressIndexSubspaces(index=expanded, i=i,\n broadcastshape=broadcastshape)\n i += skip\n j += skip\n\n i += 1\n if element is not newaxis:\n j += 1\n\n indexShape = ()\n j = 0\n for element in broadcasted:\n if element is newaxis:\n indexShape += (1,)\n elif isinstance(element, slice):\n start, stop, stride = element.indices(arrayShape[arrayIndices[j]])\n indexShape += ((stop - start) // stride,)\n j += 1\n else:\n raise IndexError(\"invalid index\")\n\n if arrayindex is not None:\n indexShape = indexShape[:arrayindex] + broadcastshape + indexShape[arrayindex:]\n\n return indexShape", "def make_idx(idx: np.ndarray, axis: int) -> Tuple[int, ...]:\n total_idx = np.ogrid[tuple(map(slice, idx.shape))]\n total_idx[axis] = idx\n return tuple(total_idx)", "def _dask_oindex(x, indices):\n axis = 0\n for index in indices:\n x = da.take(x, index, axis=axis)\n # If axis wasn't dropped by a scalar index:\n if not isinstance(index, Integral):\n axis += 1\n return x", "def apply_index(data, idx):\n data = numpy.asanyarray(data)\n idx = numpy.asanyarray(idx)\n if len(idx.shape) != 2:\n raise ValueError(\"idx must have dimensions 2, not {0}\".format(\n len(idx.shape)))\n if len(data.shape) < 2:\n raise ValueError(\"data must have at least dimensions 2\")\n if idx.shape[0] != data.shape[0]:\n raise ValueError(\"data and idx must have same size in \"\n \"0th dimension\")\n if not idx.shape[1] in data.shape[1:]:\n raise ValueError(\"Size of idx dimension 1 must match a dimension in \"\n \"data\")\n idx_dim = data.shape[1:].index(idx.shape[1]) + 1\n return numpy.rollaxis(\n numpy.rollaxis(data, idx_dim, 1) #make time and index dim adjacent\n #get a 2d array where every element matches index of first axis\n [numpy.mgrid[0:idx.shape[0], slice(idx.shape[1])][0],\n idx, #2d array, every element is desired index of second axis\n ...] #and the other axes come along for the ride\n , 1, idx_dim + 1) #and put index dim back in place", "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def broadcast_index(values, indices):\r\n assert_array(indices, shape=(...,) + values.shape[:-1])\r\n indexed_values = jp.take_along_axis(\r\n values.reshape((1,) + values.shape),\r\n indices.reshape((-1,) + values.shape[:-1] + (1,)),\r\n axis=-1,\r\n )\r\n flat_result = jp.squeeze(indexed_values, axis=-1)\r\n return flat_result.reshape(indices.shape)", "def _makeIndices(self,inputArray):\r\n inputArray = numpy.asarray(inputArray, 'O')#make sure its an array of objects (can be strings etc)\r\n #get some simple variables for later\r\n dims=inputArray.shape\r\n dimsProd=numpy.product(dims)\r\n dimsN = len(dims)\r\n dimsList = range(dimsN)\r\n listOfLists = []\r\n arrayOfTuples = numpy.ones(dimsProd, 'O')#this creates space for an array of any objects\r\n\r\n #for each dimension create list of its indices (using modulo)\r\n for thisDim in dimsList:\r\n prevDimsProd = numpy.product(dims[:thisDim])\r\n thisDimVals = numpy.arange(dimsProd)/prevDimsProd % dims[thisDim] #NB this means modulus in python\r\n listOfLists.append(thisDimVals)\r\n\r\n #convert to array\r\n indexArr = numpy.asarray(listOfLists)\r\n for n in range(dimsProd):\r\n arrayOfTuples[n] = tuple((indexArr[:,n]))\r\n return (numpy.reshape(arrayOfTuples,dims)).tolist()", "def recompose_index(self, array):\n idx = 0\n for i in range(len(array)):\n idx += array[i] * self.N**i\n return idx", "def indices(self):", "def ndindex(masks, axes):\n joint = {}\n for mask, axis in zip(masks, axes):\n if axis in joint:\n joint[axis] = joint[axis] & mask\n else:\n joint[axis] = mask\n rank = max(joint.keys()) + 1 # find highest dimension\n return axes_pts([joint[i] for i in range(rank)])", "def twoD_gather(array, indices):\n temp = list()\n for row, i in zip(array, indices):\n temp.append(row[i])\n return np.asarray(temp)", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def to_nd(self, index, shape):\n return np.unravel_index(index, shape)", "def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continous loop of inputs and answers
def evaluateCycle(self): print("Enter q or quit to exit") input_sentence = '' while(1): # Get input sentence input_sentence = input('> ') # Check if it is quit case if input_sentence == 'q' or input_sentence == 'quit': break ans = self.evaluateOneInput(input_sentence) print('Bot:', ans)
[ "def user_answer(self):\n #for inputs in range(0, 1):\n self.answer.append(input(\"Your Answer: \"))\n return self.answer", "def main():\n min_random = 10 #keeping constant for the min random number range\n max_random = 99 #keeping constant for the max random number range\n count = 0 #creating a counter variable to keep track of user's answers in a row\n\n\n while count != 3: #this loop will keep goin until user get 3 answers correct in a row\n num1 = random.randint(min_random, max_random) #generating a random number each new equations\n num2 = random.randint(min_random, max_random)\n\n print(\"What is \" + str(num1) + \"+\" + str(num2) + \"?\")\n user_input = int(input(\"Your answer is: \")) #takign the user's input and converting it into an integer\n\n total = num1 + num2 #keeping track of the actual answer to compare with the user's response", "def loopselection():\r\n pass", "def confirmDataSimulatorUserInputLoop(self):\r\n userInput = \"\"\r\n try:\r\n while userInput != \"quit\":\r\n self.printConfirmDataSimulationOptions()\r\n userInput = raw_input(\"\\nPlease provide the input : \")\r\n if userInput == \"1\":\r\n # Building the model\r\n tree, model_count_title, model_count_description, count_vectorizer_title, count_vectorizer_description = Video.generatePredictingModel(\r\n self.datamanager.cleaned_data)\r\n print \"\\nNow give it a try and check out our awesome predictions !!!\"\r\n time.sleep(3)\r\n # Perform as many prediction as required by the user\r\n self.performPrediction(tree, model_count_title, model_count_description, count_vectorizer_title,\r\n count_vectorizer_description)\r\n elif userInput == \"4\":\r\n self.InitiateFlow()\r\n elif userInput == \"quit\":\r\n self.ExitProgram()\r\n else:\r\n print \"\\nOops...Incorrect Input...Please enter correct Input !!!\\n\"\r\n self.confirmDataSimulatorUserInputLoop()\r\n except KeyboardInterrupt:\r\n print \"quitting...\"\r\n sys.exit()", "def algorithm_loop(self):", "def DataSimulatorUserInputLoop(self):\r\n userInput = \"\"\r\n try:\r\n while userInput != \"quit\":\r\n self.printDataSimulationOptions()\r\n userInput = raw_input(\"\\nPlease provide the input : \")\r\n if userInput == \"1\":\r\n self.confirmDataSimulatorUserInputLoop()\r\n elif userInput == \"4\":\r\n print \"\\nYou are now in the previous control\"\r\n self.InitiateFlow()\r\n elif userInput == \"quit\":\r\n self.ExitProgram()\r\n else:\r\n print \"\\nOops...Incorrect Input...Please enter correct Input !!!\\n\"\r\n self.DataSimulatorUserInputLoop()\r\n\r\n except KeyboardInterrupt:\r\n print \"quitting...\"\r\n sys.exit()", "def main():\n # correct_counter variable counts the number of correct answers\n correct_counter = 0\n while correct_counter < ANSWERS_TO_PASS:\n # generate first random number\n first_rand = int(rand_num())\n # generate second random number\n second_rand = int(rand_num())\n # generate random operation\n opt_rand = (rand_operation())\n # ask the question\n print(\"What is\", first_rand, opt_rand, str(second_rand) + '?')\n answer = int(input(\"Your answer: \"))\n expected_answer = compute_answer(first_rand, opt_rand, second_rand)\n # compare_flag is True when expected_answer and answer are equal\n compare_flag = compare_values(expected_answer, answer)\n if compare_flag:\n correct_counter += 1\n print(\"Correct! You've gotten\", correct_counter, \"correct in a row.\")\n if correct_counter == 3:\n print(\"Congratulations! You mastered Math.\")\n else:\n correct_counter = 0\n print(\"Incorrect. The expected answer is\", str(expected_answer))", "def test(self, speaker, input, answer=None):\n import time\n\n self.in_interaction = True\n self.input(input, speaker)\n while self.in_interaction:\n if self.waiting_for_more_info:\n if answer:\n self._logger.debug(colored_print(\"> Automatically answering: \", 'bold'))\n self._logger.info(colored_print(answer, 'red'))\n self.input(answer, speaker)\n answer = None\n else:\n return [], self.last_sentence\n time.sleep(0.1)\n\n return self.last_stmts_set, self.last_sentence", "def loop(self):\n while True:\n self._print_field()\n try:\n cmd = input(PROMPT)\n self._invoke_cmd(cmd)\n except EOFError: # Allows to exit by pressing ⌃D without error\n break", "def __multiple_answers(self):\n while True:\n quiz_current_progress, quiz_complete_progress = self.__get_quiz_progress()\n self.__sys_out_progress(\n quiz_current_progress, quiz_complete_progress, 4\n )\n #either on the last question, or just completed\n if quiz_current_progress == quiz_complete_progress - 1:\n try:\n time.sleep(self.__WEB_DRIVER_WAIT_SHORT)\n #quiz has been completed\n if len(\n self.driver.find_elements(By.CLASS_NAME, 'headerMessage_Refresh')\n ) > 0:\n self.__sys_out_progress(\n quiz_complete_progress, quiz_complete_progress, 4\n )\n self.__sys_out(\"Quiz complete\", 3, True)\n return True\n\n #just got to last question, time to solve it\n except:\n pass\n\n #within the question, select the correct multiple answers\n try:\n option_index = 0\n question_progress = '0/5'\n question_progresses = [question_progress]\n while True:\n if (\n len(\n self.driver.find_elements(\n By.ID, 'rqAnswerOption{0}'.format(option_index)\n )\n )\n <= 0\n ):\n return False\n #find_element_by_id returns an EventFiringWebElement object, to get the web element, must use wrapped_element attribute\n element = self.driver.find_element(By.ID,\n 'rqAnswerOption{0}'.format(option_index)\n ).wrapped_element\n #must use ActionChains due to error 'element is not clickable at point', for more info see this link:https://stackoverflow.com/questions/11908249/debugging-element-is-not-clickable-at-point-error\n ActionChains(self.driver).move_to_element(element).click(\n element\n ).perform()\n time.sleep(random.uniform(1, 4))\n prev_progress = question_progress\n #returns a string like '1/5' (1 out of 5 answers selected correctly so far)\n question_progress = self.driver.find_element(By.CLASS_NAME,\n 'bt_corOpStat'\n ).text\n #once the last correct answer is clicked, question progress becomes '' or 5/5, tho in the past it became '0/5' sometimes, hence 2nd cond\n if question_progress in ['', '5/5'] or (\n prev_progress != question_progress\n and question_progress in question_progresses\n ):\n #wait for the next question to appear\n time.sleep(self.__WEB_DRIVER_WAIT_SHORT)\n break\n question_progresses.append(question_progress)\n option_index += 1\n except:\n return False", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def maths(): # Maybe rename this?\n say(\"How about a little exercise?\")\n i = 0\n while i < 3:\n result = do_math()\n if result < 1:\n i = 0\n else:\n i += result", "def eval_loop():\n while True:\n prompt = 'Type input to eval:\\n'\n expression = raw_input(prompt)\n print(eval(expression))", "def check(question, solutions):\n good_answer = False\n while not good_answer:\n answer = input(question)\n if answer in solutions:\n good_answer = True\n else:\n print(\"Please try again!\")", "def loop(self):\n pass", "def solutions(self, query):\n \n ans = self.ask(query)\n while ans != None:\n yield ans\n ans = self.redo()", "def perform_strategy(self, counter):\r\n ans = \"\"\r\n while ans.lower() not in [\"y\", \"n\"]:\r\n print(f\"Envelope number {counter} contains: \\n ... \\n ... \\n ... \\n{self.envelopes[counter].money}$!!!!\\n \"\r\n f\"Do you CHOOSE this ENVELOPE!?!? y/n\")\r\n ans = input()\r\n return ans.lower() == 'y'", "def Main():\n # coconut_test - the lowest number for checking for solutions\n # coconut_test_limit - the highest number for checking for solutions\n # sailors - the number of sailors put on the forsaken island\n # print_results - the boolean pertaining to whether the user wants to see\n # the results of just the number of how many results there\n # are\n # wall_start - the programs run time at the start of the calculations\n # cpu_start - the time spent computing at the beginning of the \n # calcluations\n # second_solution - the second working solution\n # results - the number of working solutions found\n # increment - the difference between any two working solutions\n # wall_stop - the programs run time at the end of the calculations\n # cpu_stop - the time spent computing at the end of the calculations\n # wall_secs - the elapsed time it took the program to complete the calcs\n # cpu_secs - the total time spent computing during the calculations\n\n Instructions()\n\n\n done = False\n while not done:\n coconut_test, coconut_test_limit, sailors, print_results = Get_Input()\n \n wall_start = time.time()\n cpu_start = time.clock()\n\n results = Calculations(coconut_test, coconut_test_limit, sailors, \n print_results)\n\n wall_stop = time.time()\n cpu_stop = time.clock()\n\n wall_secs = wall_stop - wall_start\n cpu_secs = cpu_stop - cpu_start\n \n Print_Output(results, coconut_test, coconut_test_limit, cpu_secs,\n wall_secs)\n\n answer = input('Would you like to enter numbers again (y/n)? ')\n done = (answer == 'n' or answer == 'N')\n print()", "def loop(self):\n line = self.read()\n while line != \"quit\":\n value = self.eval(line)\n print(value)\n line = self.read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute html_reporter if html flag is exist in sys.argv.
def __execute_reporter(self): if not self.__args.report: return reporter.HTMLReporter().generate_report_from_file( self.__lst_json_files)
[ "def do_html(self, subcmd, opts, path):\n mgr = Manager()\n try:\n if opts.browse:\n htmls = []\n buf = mgr.buf_from_path(path, lang=opts.lang)\n html = buf.to_html(True, True, title=path,\n do_trg=opts.do_trg,\n do_eval=opts.do_eval)\n finally:\n mgr.finalize()\n\n if opts.output == '-':\n output_path = None\n output_file = sys.stdout\n else:\n if opts.output:\n output_path = opts.output\n else:\n output_path = path+\".html\"\n if exists(output_path):\n if opts.force:\n os.remove(output_path)\n else:\n raise Error(\"`%s' exists: use -f|--force option to \"\n \"allow overwrite\" % output_path)\n output_file = open(output_path, 'w')\n # else:\n # output_path = None\n # output_file = sys.stdout\n # #XXX Disable writing t\n # output_file = None\n if output_file:\n output_file.write(html)\n if output_path:\n output_file.close()\n\n if opts.browse:\n if not output_path:\n raise Error(\"cannot open in browser if stdout used \"\n \"for output\")\n import webbrowser\n url = _url_from_local_path(output_path)\n webbrowser.open_new(url)", "def check_html_lint(args):\n logging.info('Linting HTML...')\n\n base = shakaBuildHelpers.get_source_base()\n files = ['index.html', os.path.join('demo', 'index.html'), 'support.html']\n file_paths = [os.path.join(base, x) for x in files]\n config_path = os.path.join(base, '.htmlhintrc')\n\n htmllinter = compiler.HtmlLinter(file_paths, config_path)\n return htmllinter.lint(force=args.force)", "def test_htmldir(self):\n self.check_argument('htmldir')", "def file_test_results_html(file_spec, input_data):\n file_test_results(file_spec, input_data, frmt=u\"html\")", "def doHTML(bunch, text, env):\n return \"<html>%s</html>\" % text", "def main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--levels', type=int,\n help='maximum levels to show', default=2)\n parser.add_argument('--report', type=str, default=\"structure_dump\",\n help='Report to run.')\n parser.add_argument('input')\n\n args = parser.parse_args()\n\n data = open(args.input).read()\n if args.input.endswith('.html'):\n root = from_html(data)\n else:\n root = from_markdown(data)\n\n if args.report == 'structure_dump':\n report.structure_dump.report(root, args)\n elif args.report == 'display_cdf':\n report.display_cdf.report(root, args)\n elif args.report == 'enhanced_html':\n assert hasattr(root, \"ast\") # Require the CommonMark AST.\n report.enhanced_html.report(root, args)\n else:\n parser.error(\"Unrecognized report: %d\" % args.report)", "def cmdline():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_file\", help=\"path to input html file\")\n parser.add_argument(\"output_file\", help=\"path to output file\")\n args = parser.parse_args()\n\n file = open(args.input_file, 'r')\n lines = file.readlines()\n file.close()\n html_string = ''.join(lines)\n\n output_string = html2md(html_string)\n\n file = open(args.output_file, 'w')\n file.write(output_string)\n file.close()", "def test_html_documentation(self):\n app = Sphinx(\n self.source_dir,\n self.config_dir,\n self.output_dir,\n self.doctree_dir,\n buildername='html',\n warningiserror=True,\n )\n app.build(force_all=self.all_files)", "def main(*args):\n local_args = pywikibot.handle_args(args)\n\n # This factory is responsible for processing command line arguments\n # that are also used by other scripts and that determine on which pages\n # to work on.\n gen_factory = pagegenerators.GeneratorFactory()\n # The program to pipe stuff through\n filters = []\n options = {}\n\n # Parse command line arguments\n for arg in local_args:\n option, sep, value = arg.partition(':')\n if option == '-filter':\n filters.append(value)\n elif option == '-always':\n options['always'] = True\n else:\n # check if a standard argument like\n # -start:XYZ or -ref:Asdf was given.\n gen_factory.handleArg(arg)\n\n options['filters'] = filters\n\n gen = gen_factory.getCombinedGenerator(preload=True)\n if gen:\n # The preloading generator is responsible for downloading multiple\n # pages from the wiki simultaneously.\n bot = PiperBot(gen, **options)\n bot.run()\n return True\n else:\n pywikibot.bot.suggest_help(missing_generator=True)\n return False", "def html():\n\n\n local('ls -la')\n local('sphinx-build -b html . _build/html')\n\n print \"Build finished; see _build/html/index.html\"", "def run_report_generation(**kwargs):\n out = run_python_script_helper(\n os.path.dirname(__file__), \"report_generation_example.py\", **kwargs\n )\n return out", "def main(argv):\n\n location = None\n output_file = None\n try:\n opts, _args = getopt.getopt(argv, \"hl:o:\", [\"rlocation=\", \"ooutput_file=\"])\n except getopt.GetoptError:\n print('pylint_md.py -l <location> -o <output_file>')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('pylint_md -l <location> -o <output_file>')\n sys.exit()\n elif opt in (\"-l\", \"--llocation\"):\n location = arg\n elif opt in (\"-o\", \"--oooutput_file\"):\n output_file = arg\n print('Location is {}'.format(location))\n print('Output file is {}'.format(output_file))\n\n if location is not None and output_file is not None:\n pylint_markdown(location, output_file)", "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n urls = scrape_urls(parsed_args.webpage)\n emails = scrape_emails(parsed_args.webpage)\n phones = scrape_phones(parsed_args.webpage)\n\n if urls:\n print(\"\\nURLS:\\n\\n\", '\\n'.join(urls))\n else:\n print(\"\\nURLS:\\n\\nNone\")\n\n if emails:\n print(\"\\nEMAILS:\\n\\n\", '\\n'.join(emails))\n else:\n print(\"\\nEMAILS:\\n\\nNone\")\n\n if phones:\n print(\"\\nPHONE NUMBERS:\\n\\n\", '\\n'.join(phones))\n else:\n print(\"\\nPHONE NUMBERS:\\n\\nNone\")", "def _process_html(self):\n pass", "def main():\n args = parse_args()\n\n if args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format=\"[%(levelname)s]: %(message)s\")\n else:\n logging.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n logging.debug(\"Args: %s\", args)\n logging.debug(\"Input file: %s\", args.file)\n logging.debug(\"Output file: %s\", args.output_file)\n logging.debug(\"Output path: %s\", args.output_path)\n\n color_red = \"\\033[91m\"\n color_reset = \"\\033[0m\"\n try:\n DocstringExtractor(args.file, args.output_file, args.output_path)\n return True\n except Error as error:\n logging.error(\"%s%s%s\", color_red, error, color_reset)\n except Exception as error: # pylint: disable=broad-except\n logging.exception(\"%s%s%s\", color_red, error, color_reset)\n return False", "def auto_reporter(**opts):\r\n if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():\r\n try:\r\n return FancyReporter(**opts)\r\n except ImportError:\r\n pass\r\n return PlainReporter()", "def run():\r\n\r\n # Parse options and adjust logging level if necessary\r\n options, logging_level = parse_options()\r\n if not options: sys.exit(2)\r\n logger.setLevel(logging_level)\r\n logger.addHandler(logging.StreamHandler())\r\n\r\n # Run\r\n markdown.markdownFromFile(**options)", "def HTML(text, parser=None, base_url=None): # real signature unknown; restored from __doc__\n pass", "def main():\n\n exporter = HTMLExporter()\n json_as_string = sys.stdin.read().decode(\"utf-8\")\n\n try:\n notebook_node = reads_json(json_as_string)\n except Exception:\n logging.exception(\"Unable to parse JSON.\")\n \n html, _ = exporter.from_notebook_node(notebook_node)\n \n sys.stderr.write(\"JSON was {:,} byte(s); html is {:,} byte(s).\\n\".format(\n len(json_as_string), len(html)\n ))\n \n sys.stdout.write(html.encode(\"utf-8\"))\n sys.stderr.flush()\n sys.stdout.flush()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all scenario in folder. Recursive to sub folder if "rd" argument appear in sys.argv.
def __get_list_scenarios_in_folder(self): # If both directory and recur_directory are exist # then show "Invalid command" and exit. if self.__args.directory is not "" \ and self.__args.recur_directory is not "": utils.print_error("\n{}\n".format(constant.ERR_COMMAND_ERROR)) exit(1) recursive = False start_directory = "" if self.__args.directory is not "": start_directory = self.__args.directory elif self.__args.recur_directory is not "": start_directory = self.__args.recur_directory recursive = True if not start_directory: start_directory = TestRunner.__test_script_dir if not os.path.exists(start_directory): utils.print_error( "\n{}\n".format(constant.ERR_PATH_DOES_NOT_EXIST. format(start_directory))) exit(1) list_files = [] if start_directory.endswith(".py"): list_files = [start_directory] else: try: if recursive: for directory, _, _ in os.walk(start_directory): list_files.extend(glob.glob(os.path.join(directory, "*.py"))) else: list_files.extend(glob.glob(os.path.join(start_directory, "*.py"))) except OSError: pass list_test_scenarios = [] for file in list_files: sys.path.append(os.path.dirname(os.path.abspath(file))) test_module = \ importlib.import_module(os.path.basename(file).replace(".py", "")) for name, cls in inspect.getmembers(test_module, inspect.isclass): if cls is not TestScenarioBase \ and issubclass(cls, TestScenarioBase): list_test_scenarios.append(cls) return list_test_scenarios
[ "def getImmediateSubdirectories(dir):", "def get_run_subdirs(dirpath):\n patt = '^' + dirpath + r'/run'\n return get_subdirs(dirpath, patt)", "def open_run_list(base_path, filter=None):\n dir_list = listdir(base_path)\n if not dir_list:\n return []\n if filter is not None:\n filter_list = glob(path.join(base_path, filter))\n filter_list = [path.basename(x) for x in filter_list]\n dir_list = [x for x in dir_list if x in filter_list]\n if not dir_list:\n return []\n dir_list.sort(key=human_order_key)\n return [Run(x) for x in [path.join(base_path, y) for y in dir_list]]", "def iter_tests():\n import sys\n\n if sys.argv[1:]:\n lSearchDirs = list()\n for rArg in sys.argv[1:]:\n rDir = os.path.abspath( rArg )\n if not os.path.exists( rDir ):\n print \"WARNING: Ignoring non-existant directory %r\" % rDir\n continue\n lSearchDirs.append( rDir )\n else:\n lSearchDirs = [grBaseDir]\n\n for rSearchDir in lSearchDirs:\n for rDir, lDirs, lFiles in os.walk(rSearchDir):\n if \"EXPECTED_RESULT\" in lFiles:\n yield rDir", "def fixture_sets(*args):\n return [os.path.join(*args, dir)\n for dir in os.listdir(os.path.join(FIXTURE_DATA, *args))\n if os.path.isdir(os.path.join(FIXTURE_DATA, *args, dir))\n ]", "def getdirs(args):\n rundir = args.rundir\n if not os.path.exists(rundir):\n logger.fatal(\"rundir '%s' does not exist under Run directory.\\n\", rundir)\n sys.exit(1)\n\n runinfo = os.path.join(rundir + '/RunInfo.xml')\n if not os.path.exists(runinfo):\n logger.fatal(\"RunInfo '%s' does not exist under Run directory.\\n\", runinfo)\n sys.exit(1)\n\n outdir = args.outdir\n if not os.path.exists(outdir):\n logger.fatal(\"output directory '%s' does not exist.\\n\", outdir)\n sys.exit(1)\n\n return(rundir, outdir, runinfo)", "def read_sate_run_folder(directory, rar_fn = \"runs_and_results-it*.csv\",inst_fn = \"instances.txt\" , feat_fn = \"instance-features.txt\" , ps_fn = \"paramstrings-it*.txt\"):\n print((\"reading {}\".format(directory)))\n configs = read_paramstrings_file(find_largest_file(os.path.join(directory,ps_fn)))\n instance_names = read_instances_file(find_largest_file(os.path.join(directory,inst_fn)))\n runs_and_results = read_runs_and_results_file(find_largest_file(os.path.join(directory, rar_fn)))\n\n full_feat_fn = glob.glob(os.path.join(directory,feat_fn))\n if len(full_feat_fn) == 1: \n instance_features = read_instance_features_file(full_feat_fn[0])\n else:\n instance_features = None\n\n return (configs, instance_names, instance_features, runs_and_results)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def get_cases(cases_root, arm, event, case=None):\n match = 'NCANDA_S*'\n if case:\n match = case\n\n case_list = list()\n for cpath in glob.glob(os.path.join(cases_root, match)):\n if os.path.isdir(os.path.join(cpath,arm,event)) : \n case_list.append(cpath)\n \n return case_list", "def run_dir(dirpath):\n items = [os.path.join(dirpath, entry) for entry in os.listdir(args.sample)]\n samples = [sample for sample in items if os.path.isfile(sample)]\n count = len(samples)\n log.info(\"Found \" + str(count) + \" samples\")\n for sample in samples:\n run_sample(sample)\n count -= 1\n log.info(str(count) + ' samples remaining')", "def traverse_directory(args) :\n siteRGX = re.compile('DPH.'+args.site.upper())\n s = []\n\n # report non-unique residuals\n for root, dirs, files in os.walk(args.traverse):\n path = root.split('/')\n for gamitFile in files:\n if siteRGX.search(gamitFile):\n gamitFile = root+'/'+gamitFile\n #check for potential duplicates in the same path, only want to use one of the DOH files\n if len(path[-1]) > 4:\n regex = re.compile(root[:-2])\n else:\n regex = re.compile(root)\n\n\n # only check for duplicates when there is more than one network\n # being processed...\n if args.network == 'yyyy_dddnN':\n if len(s) == 0:\n s.append(gamitFile)\n else:\n # for each element in s, check to see if the root path does not match\n # any of the files already stored in the list\n m = 0\n for item in s:\n if regex.search(item) :\n m = 1\n if not m :\n s.append(gamitFile)\n else:\n s.append(gamitFile)\n\n s.sort()\n lines = ''\n # Now loop through each file and consolidate the residuals\n for dfile in s :\n dphs = res.parseDPH(dfile)\n\n # check if the dph files are being searched are from\n #a GAMIT network of type yyyy/dddn?/\n root, filename = os.path.split(dfile)\n if args.network == 'yyyy_dddnN':\n ddd = root[-5:-2]\n year = int(root[-10:-6])\n startDT = dt.datetime(year,01,01)\n startDT = startDT + dt.timedelta(days=(int(ddd) -1))\n elif args.network == 'ddd':\n ddd = root[-3:]\n year = root[-8:-4] \n startDT = dt.datetime(int(year),01,01)\n startDT = startDT + dt.timedelta(days=(int(ddd) -1))\n\n line = res.consolidate(dphs,startDT)\n lines = lines + line\n\n # if its larger than 1GB dump it to a file\n # this is designed to keep the load n the file system lighter\n if sys.getsizeof(lines) > 1073741824 :\n f = gzip.open(args.save_file,'a',9)\n f.write(lines)\n f.close()\n lines = ''\n #print(lines)\n\n # dump any remaining memory to file\n f = gzip.open(args.save_file,'a',9)\n f.write(lines)\n f.close()\n lines = ''\n\n return", "def readDirectory():\n tagdir = \"tagreplacements\"\n data = os.listdir(tagdir)\n for d in data:\n processFile(os.path.join(tagdir,d))\n \n #print(repd)", "def find_tests():\n for dir_name in os.listdir():\n if os.path.isdir(dir_name):\n game = get_game(dir_name)\n TEST_GAMEBUILDS.append(game)", "def navigate_to_cases():\r\n current_dir = os.getcwd() # Get current directory\r\n dir_steps = \"//foamfiles//counterFlowFlame2D//\"\r\n cases_path = current_dir + dir_steps # full path with case folders\r\n case_directory_list = [directory for directory in os.listdir(cases_path) if os.path.isdir(cases_path)]\r\n length_case_directory_list = len(case_directory_list) # length of directory\r\n\r\n print(\"case directory length:\")\r\n print(\"\\n\")\r\n print(case_directory_list)\r\n\r\n return cases_path, length_case_directory_list, case_directory_list", "def run_with_recursive_look_up(starting_id):\n todo_stack = [starting_id]\n total_all_files = 0\n total_folders = 0\n while len(todo_stack) > 0:\n next_parent = todo_stack.pop()\n q_string = \"'\" + next_parent + \"'\" + \" in parents and trashed=false\"\n query = {'maxResults': 1000, 'q': q_string}\n for file_list in drive.ListFile(query):\n total_all_files += len(file_list)\n print(total_all_files)\n for file in file_list:\n if intense_debug:\n all_file_set.append(file)\n all_folders.log_item(file)\n if SafeFile.is_folder(file):\n todo_stack.append(SafeFile.safe_get(file, 'id'))\n total_folders += 1\n\n print(\"Parsed %d files\\t %d folders\" % (total_all_files, total_folders))", "def get_run_folders():\n return [os.path.join(f, sf) for f in get_date_folders() for sf in os.listdir(f)]", "def load_slurm_folder(p):\n filter_function = lambda f: True if \".out\" in f else False\n slurm_dict = {\"runs\": []}\n for f in filter(filter_function, os.listdir(p)):\n slurm_dict[\"runs\"].append(load_slurm_data(os.path.join(p, f)))\n exit(\"Success!\")", "def walk(top, func, arg):\r\n\r\n try:\r\n names = os.listdir(top)\r\n except os.error:\r\n return\r\n func(arg, top, names)\r\n for name in names:\r\n name = join(top, name)\r\n try:\r\n st = os.lstat(name)\r\n except os.error:\r\n continue\r\n if stat.S_ISDIR(st.st_mode):\r\n walk(name, func, arg)", "def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a tuple representing a circle as (x,y,radius) and returns a tuple with the x,y coordinates and width,size (x,y,w,h)
def circle_2_tuple(circle): assign_coord = lambda x,y: x - y if x > y else 0 x = assign_coord(circle[0],circle[2]) y = assign_coord(circle[1],circle[2]) assign_size = lambda x,y : y*2 if x > y else y*2 - (y-x) w = assign_size(circle[0],circle[2]) h = assign_size(circle[1],circle[2]) return (x,y,w,h)
[ "def circle_2_bbox(circle):\n x,y,w,h = circle_2_tuple(circle)\n return ((x,y),(x+w,y+h))", "def circle(r):\r\n pi=3.14\r\n area=pi*r*r\r\n perimeter=2*pi*r\r\n return(area,perimeter)", "def circle(radius=2, exclude_center: bool=False) -> tuple:\n if radius < 0:\n return\n rr = (radius + 1) * (radius + 1) - (radius >> 1)\n for x in range(-radius, radius + 1):\n rxx = x * x\n for y in range(-radius, radius + 1):\n ryy = y * y\n if rxx + ryy < rr:\n if exclude_center and x == 0 and y== 0:\n continue\n yield x, y", "def get_radius(size):\n return (size * 10) - 5", "def makePointsSquareInCircle(x, y, radius):\n # Precondition: x, y, and radius should be non-negative real numbers.\n points = []\n points.append((x, y))\n points.append((x + radius, y + radius))\n points.append((x, y + (2 * radius)))\n points.append((x - radius, y + radius))\n points.append((x, y))\n return points", "def getPatchFromCoords( x, y, width, height, size ):\n x1 = max( 0, x-size/2 )\n y1 = max( 0, y-size/2 )\n x2 = min( width, x+size/2+1 )\n y2 = min( height, y+size/2+1 )\n return ( x1, y1, x2, y2 )", "def get_circle_coords(self, radius, divider, count,center_x, center_y):\n\n angle_deg = (360/divider)*count\n angle = radians(angle_deg-(90 + (360/divider)))\n x = radius*cos(angle) + center_x;\n y = radius*sin(angle) + center_y;\n return (int(x), int(y))", "def circle(radius : int) -> list:\n xcenter = radius\n ycenter = radius\n circle = []\n for x in range(-radius, radius+1):\n for y in range(-radius, radius+1):\n if x**2 + y**2 <= radius**2:\n circle.append(\"*\")\n else:\n circle.append(\" \")\n circle.append(\"\\n\")\n return circle", "def circle_square(r):\n\n\t# Pi = 3.14\n\n\treturn float(pi * pow(r, 2))", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def square_img(image):\n print(\"Squaring the image...\", end=\" \")\n height, width = image.shape[0], image.shape[1]\n length = min(height, width)\n center = Point(image.shape[0] / 2, image.shape[1] / 2)\n radius = length / 2 - 1 / 2\n print(colored(\"DONE\", \"green\"))\n return center, radius, width, height, length", "def GetCircle(pos):\r\n if pos[0] == 0:\r\n pos[0] = 1\r\n a, b = float(pos[0]), 0.0\r\n c, d = 0.0, iphone_dims[1]/2.0\r\n e, f = float(pos[0]), float(iphone_dims[1])\r\n k = (0.5)*((a**2+b**2)*(e-c) + (c**2+d**2)*(a-e) + (e**2+f**2)*(c-a)) / (b*(e-c)+d*(a-e)+f*(c-a))\r\n h = (0.5)*((a**2+b**2)*(f-d) + (c**2+d**2)*(b-f) + (e**2+f**2)*(d-b)) / (a*(f-d)+c*(b-f)+e*(d-b))\r\n rsqr = (a-h)**2 + (b-k)**2\r\n\r\n theta = math.acos(((a-h)*(e-h) + (b-k)*(f-k)) / rsqr)\r\n return ((h, k), math.sqrt(rsqr), theta)", "def drawSquareInCircle(x, y, radius, screen, color):\n # Precondition: x, y, and radius should be non-negative real numbers, screen should be pygame screen object,\n # color should be an rgb color tuple in range (0,0,0) to (255,255,255)\n lineThickness = 2\n pygame.draw.circle(screen, color, (x, y + radius), radius, 1)\n pygame.draw.lines(screen, color, False, makePointsSquareInCircle(x, y, radius), lineThickness)", "def getSizeTuple(self):\n\t\treturn (self.gridSizeX, self.gridSizeY)", "def random_shape(height, width):\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)", "def createCircle(self, x, y, radius):\n # TODO (#2398) fix this to be top left coordinates, width, height\n return QtCore.QRectF(\n int(x - radius), int(y - radius), int(radius * 2), int(radius * 2)\n )", "def _create_circle(self, x, y, r, **kwargs):\r\n return self.create_oval(x-r, y-r, x+r, y+r, **kwargs)", "def circle_radius(point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a tuple representing a circle as (x,y,radius) and returns a tuple represeting a bbox ((x,y),(x',y'))
def circle_2_bbox(circle): x,y,w,h = circle_2_tuple(circle) return ((x,y),(x+w,y+h))
[ "def circle_2_tuple(circle):\n assign_coord = lambda x,y: x - y if x > y else 0\n x = assign_coord(circle[0],circle[2])\n y = assign_coord(circle[1],circle[2])\n\n assign_size = lambda x,y : y*2 if x > y else y*2 - (y-x) \n w = assign_size(circle[0],circle[2])\n h = assign_size(circle[1],circle[2])\n return (x,y,w,h)", "def bbox_from_circle(img, circles):\n seg_imgs = []\n bboxes = []\n aux = img.copy()\n for i,el in enumerate(circles):\n bbox = circle_2_bbox(el['coord'])\n bbox = fix_bbox(bbox,aux.shape)\n cv.rectangle(aux,bbox[0],bbox[1],(0,255,0))\n bboxes.append(bbox)\n return bboxes", "def bounding_box(primitive):\n\n if primitive[\"shape\"] == \"circle\":\n bbox = [[primitive[\"center\"][0] - primitive[\"radius\"],\n primitive[\"center\"][1] - primitive[\"radius\"]],\n [primitive[\"center\"][0] + primitive[\"radius\"],\n primitive[\"center\"][1] + primitive[\"radius\"]]]\n else:\n x_coords, y_coords = zip(*primitive[\"vertices\"])\n bbox = [[min(x_coords), min(y_coords)],\n [max(x_coords), max(y_coords)]]\n\n primitive[\"bounding_box\"] = bbox\n return primitive", "def get_center_point(bbox):\n x_middle = 42\n y_middle = 42\n\n # HINT: bbox.xmin, bbox,xmax, bbox.ymin, bbox.ymax\n return (x_middle, y_middle)", "def bbox(self, node):\n node_id = node.get('id')\n #inkex.utils.debug(\"Check if \" + str(node_id) + \" is in \" + str(self.node_info))\n info = self.node_info[node_id] \n \n x = info.x\n y = info.y\n width = info.width\n height = info.height\n\n return Box(Point(x, y),\n Point(x + width, y),\n Point(x + width, y + height),\n Point(x, y + height))", "def get_bbox_center(self) -> tuple:\n return tuple([0.5 * (l + u) for l, u in zip(self.lower_left, self.upper_right)])", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def bounding_box(self) -> tuple[Point, Point]:\n result = (\n Point(min(self.p1.x, self.p2.x), min(self.p1.y, self.p2.y)),\n Point(max(self.p1.x, self.p2.x), max(self.p1.y, self.p2.y)),\n )\n return result", "def circle(radius=2, exclude_center: bool=False) -> tuple:\n if radius < 0:\n return\n rr = (radius + 1) * (radius + 1) - (radius >> 1)\n for x in range(-radius, radius + 1):\n rxx = x * x\n for y in range(-radius, radius + 1):\n ryy = y * y\n if rxx + ryy < rr:\n if exclude_center and x == 0 and y== 0:\n continue\n yield x, y", "def pointgraph_from_circle(fitting):\n diameter = fitting.diameter\n radius = diameter / 2.0\n y, x = fitting.center\n y -= radius\n x -= radius\n return bounding_box((y, x), (y + diameter, x + diameter))", "def coord_center2corner(bbox):\n\n x, y = bbox.new([bbox[0]]), bbox.new([bbox[1]])\n w, h = bbox.new([bbox[2]]), bbox.new([bbox[3]])\n x1 = x - torch.floor(w / 2)\n y1 = y - torch.floor(h / 2)\n x2 = x + torch.floor(w / 2)\n y2 = y + torch.floor(h / 2)\n\n return x1, y1, x2, y2", "def get_bbox(x,y, buffer=0.):\n return dict(left=np.min(x), \n right=np.max(x), \n bottom=np.min(y), \n top=np.max(y))", "def bbox_from_middle_point(middle_point, width, height):\n (x, y) = middle_point\n return (\n x - width / 2,\n y - height / 2,\n width,\n height\n )", "def get_bbox(bbox):\n xmin, ymin, w, h = bbox\n xmin = round(xmin)\n ymin = round(ymin)\n xmax = round(xmin + w) - 1\n ymax = round(ymin + h) - 1\n return [xmin, ymin, xmax, ymax]", "def bounding_box(self):\n if self._owcs.pixel_bounds is None:\n if self._owcs.pixel_shape is not None:\n nx, ny = self._owcs.pixel_shape\n elif self._owcs.array_shape is not None:\n ny, nx = self._owcs.array_shape\n else:\n return None\n\n return ((-0.5, nx - 0.5), (-0.5, ny - 0.5))\n\n else:\n return self._owcs.pixel_bounds", "def bbox(x):\n if ispoint(x):\n return pointbbox(x)\n elif isline(x):\n return linebbox(x)\n elif isarc(x):\n return arcbbox(x)\n elif ispoly(x):\n return polybbox(x)\n elif isgeomlist(x):\n return geomlistbbox(x)\n else:\n raise ValueError(\"inappropriate type for bbox(): \",format(x))", "def cv_bbox2bbox(self, cv_bbox: iter) -> tuple:\n\n c_x = cv_bbox[0] + cv_bbox[2] / 2\n c_y = cv_bbox[1] + cv_bbox[3] / 2\n return (c_x, c_y, cv_bbox[2], cv_bbox[3])", "def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)", "def fix_bbox(bbox,img_shape):\n x = min(bbox[1][0],img_shape[1])\n y = min(bbox[1][1],img_shape[0])\n return ((bbox[0]),(x,y))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a tuple of tuples represeting a bbox ((x,y),(x',y')) and returns
def fix_bbox(bbox,img_shape): x = min(bbox[1][0],img_shape[1]) y = min(bbox[1][1],img_shape[0]) return ((bbox[0]),(x,y))
[ "def circle_2_bbox(circle):\n x,y,w,h = circle_2_tuple(circle)\n return ((x,y),(x+w,y+h))", "def get_bbox(bbox):\n xmin, ymin, w, h = bbox\n xmin = round(xmin)\n ymin = round(ymin)\n xmax = round(xmin + w) - 1\n ymax = round(ymin + h) - 1\n return [xmin, ymin, xmax, ymax]", "def points_to_bbox(p):\n llx = urx = p[0][0]\n lly = ury = p[0][1]\n for x in p[1:]:\n if x[0] < llx: llx = x[0]\n elif x[0] > urx: urx = x[0]\n if x[1] < lly: lly = x[1]\n elif x[1] > ury: ury = x[1]\n return (llx, lly, urx, ury)", "def bbox_transform(bbox):\n cx, cy, w, h = bbox\n out_box = [[]]*4\n out_box[0] = cx-w/2\n out_box[1] = cy-h/2\n out_box[2] = cx+w/2\n out_box[3] = cy+h/2\n\n return out_box", "def get_bbox(x,y, buffer=0.):\n return dict(left=np.min(x), \n right=np.max(x), \n bottom=np.min(y), \n top=np.max(y))", "def bounding_box(self) -> tuple[Point, Point]:\n result = (\n Point(min(self.p1.x, self.p2.x), min(self.p1.y, self.p2.y)),\n Point(max(self.p1.x, self.p2.x), max(self.p1.y, self.p2.y)),\n )\n return result", "def cv_bbox2bbox(self, cv_bbox: iter) -> tuple:\n\n c_x = cv_bbox[0] + cv_bbox[2] / 2\n c_y = cv_bbox[1] + cv_bbox[3] / 2\n return (c_x, c_y, cv_bbox[2], cv_bbox[3])", "def parse_bbox(bbox):\n bbox = force_text(bbox)\n sep = (';' if ';' in bbox else ',')\n try:\n lon1, lat1, lon2, lat2 = [float(c) for c in bbox.split(sep)]\n except IndexError:\n raise ValueError('bbox %r is not exactly 4 coordinates' % bbox)\n except ValueError as exc:\n raise ValueError('bbox %r has invalidly formed numbers (%s)' % (bbox, exc))\n if lat1 > lat2:\n lat2, lat1 = lat1, lat2\n if lon1 > lon2:\n lon2, lon1 = lon1, lon2\n\n for lat in (lat1, lat2):\n if not (-90 < lat < +90):\n raise ValueError('latitude %f is out of range (-90..+90)' % lat)\n\n for lon in (lon1, lon2):\n if not (-180 < lon < +180):\n raise ValueError('longitude %f is out of range (-180..+180)' % lon)\n\n return ((lon1, lat1), (lon2, lat2))", "def _box2d_to_bbox(pg_box2d: str) -> Tuple[float, float, float, float]:\n m = _BOX2D_PATTERN.match(pg_box2d)\n if m is None:\n raise RuntimeError(f\"Unexpected postgis box syntax {pg_box2d!r}\")\n\n # We know there's exactly four groups, but type checker doesn't...\n # noinspection PyTypeChecker\n return tuple(float(m) for m in m.groups())", "def polybbox(a):\n if len(a) == 0:\n return False\n elif len(a) == 1:\n return pointbbox(a[0])\n else:\n minx = maxx = a[0][0]\n miny = maxy = a[0][1]\n for i in range(1,len(a)):\n x=a[i][0]\n y=a[i][1]\n if x < minx:\n minx =x\n elif x > maxx:\n maxx = x\n if y < miny:\n miny = y\n elif y > maxy:\n maxy = y\n return [ point(minx,miny),point(maxx,maxy)]", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def bounding_box2D(pts):\n dim = len(pts[0]) # should be 2\n bb_min = [min([t[i] for t in pts]) for i in range(dim)]\n bb_max = [max([t[i] for t in pts]) for i in range(dim)]\n return bb_min[0], bb_min[1], bb_max[0] - bb_min[0], bb_max[1] - bb_min[1]", "def rect(coords : Tuple[int, int]) -> Tuple[int, int, int, int]:\n min_x = min([x for x, _ in coords])\n max_x = max([x for x, _ in coords])\n min_y = min([y for _, y in coords])\n max_y = max([y for _, y in coords])\n\n return (min_x, max_x, min_y, max_y)", "def middle_point_from_bbox(bbox):\n (x, y, width, height) = bbox\n return (\n x + width / 2,\n y + height / 2\n )", "def pptx_to_bbox(left, top, width, height):\n\n return top-height, left, width, height", "def rect_to_bounding_box(rect):\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n\n return x, y, w, h", "def bbox_transform(bbox):\n with tf.variable_scope('bbox_transform') as scope:\n cx, cy, w, h = bbox\n out_box = [[]]*4\n out_box[0] = cx-w/2\n out_box[1] = cy-h/2\n out_box[2] = cx+w/2\n out_box[3] = cy+h/2\n\n return out_box", "def bounding_box(alpha):\n assert alpha.ndim == 2\n\n # Take the bounding box of the support, with a certain threshold.\n #print(\"Using alpha\", self.use_alpha, \"support\", self.support)\n supp_axs = [alpha.max(axis=1-i) for i in range(2)]\n\n th = 0.5 \n # Check first and last value of that threshold\n bb = [np.where(supp_axs[i] > th)[0][[0,-1]] for i in range(2)]\n\n # This bb looks like [(x0, x1), (y0, y1)], when we want it as (x0, y0, x1, y1)\n #psize = self.settings['subsample_size']\n #ret = (bb[0][0]/psize[0], bb[1][0]/psize[1], bb[0][1]/psize[0], bb[1][1]/psize[1])\n\n return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])", "def bbox_to_geom(bbox: Tuple[float, float, float, float]) -> Dict:\n # TODO: Handle dateline crossing geometry\n return {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bbox[0], bbox[3]],\n [bbox[0], bbox[1]],\n [bbox[2], bbox[1]],\n [bbox[2], bbox[3]],\n [bbox[0], bbox[3]],\n ]\n ],\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draws bboxes in a image given an array of circles [(x,y,radius)]
def bbox_from_circle(img, circles): seg_imgs = [] bboxes = [] aux = img.copy() for i,el in enumerate(circles): bbox = circle_2_bbox(el['coord']) bbox = fix_bbox(bbox,aux.shape) cv.rectangle(aux,bbox[0],bbox[1],(0,255,0)) bboxes.append(bbox) return bboxes
[ "def draw_bboxes(img, bboxes, c='r'):\n plt.imshow(img)\n for bbox in bboxes:\n draw_bbox(bbox, c)", "def show_bboxes(img, bounding_boxes, facial_landmarks=[]):\n\n img_copy = np.copy(img)\n\n for b in bounding_boxes:\n x1, y1, x2, y2 = int(b[0]), int(b[1]), int(b[2]), int(b[3])\n cv2.rectangle(img_copy,(x1, y1),(x2, y2),(255,255,255),2)\n\n for p in facial_landmarks:\n for i in range(5):\n x, y = p[i], p[i + 5]\n cv2.circle(img_copy, (x,y), 2, (0,0,255), -1)\n\n return img_copy", "def find_circ_BB(image_path, prnt=False):\r\n\r\n boxes = []\r\n\r\n # load the image, clone it for output, and then convert it to RGB\r\n image = cv2.imread(image_path)\r\n h,w = image.shape[:2]\r\n if prnt:\r\n output = image.copy()\r\n output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)\r\n fig, ax = plt.subplots(1)\r\n # smooth image\r\n blur = cv2.GaussianBlur(image, (5, 5), 0)\r\n # produce binary image with a threshold, use only Red channel (the circles are red)\r\n th = 60\r\n ret, threshold = cv2.threshold(blur[:, :, 2], th, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\r\n\r\n\r\n circles = cv2.HoughCircles(threshold, cv2.HOUGH_GRADIENT, 1.2, 200, param1=150,param2=50)\r\n # ensure at least some circles were found\r\n if circles is not None:\r\n # convert the (x, y) coordinates and radius of the circles to integers\r\n circles = np.round(circles[0, :]).astype(\"int\")\r\n # loop over the (x, y) coordinates and radius of the circles\r\n for (x, y, r) in circles:\r\n # draw a rectangle in the output image\r\n # corresponding to the center of the circle\r\n int(r)\r\n if prnt:\r\n rect = patches.Rectangle((x-r, y-r), 2*r, 2*r, linewidth=1, edgecolor='cyan', facecolor='none')\r\n ax.add_patch(rect)\r\n min_corner = [max(x-r, 0), max(y-r, 0)]\r\n max_corner = [min(x+r, w), min(y+r, h)]\r\n boxes.append([min_corner,max_corner])\r\n\r\n if prnt:\r\n ax.imshow(output)\r\n return boxes", "def show_centre_of_bbox(self, image, objects):\n for obj in objects:\n image = cv2.circle(image, \n (int(obj.centre_cords[0] * self.x), int(obj.centre_cords[1] * self.y)), \n radius=5, \n color=AXE_COLOR, \n thickness=-1)\n \n return image", "def draw_box(rects, img):\n for x1, y1, x2, y2 in rects:\n cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)", "def draw_circles(circles, img):\n\n if circles is not None:\n circles = np.round(circles[0, :]).astype(\"int\")\n print(circles)\n for (x, y, r) in circles:\n # draw the circles over img\n cv2.circle(img, (x, y), r, (0, 0, 255), 2)\n cv2.circle(img, (x,y),2,(0,0,255),2)\n return img", "def draw_labeled_bboxes(self, img):", "def draw_bboxes(imgs, bboxes):\n colors = tf.constant([[1, 0, 0, 1]], dtype=tf.float32)\n imgs_with_bb = tf.image.draw_bounding_boxes(imgs, bboxes, colors)\n plt.figure()\n for img_with_bb in imgs_with_bb:\n plt.imshow(img_with_bb)\n plt.show()", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def draw_bounding_boxes_on_image_array(image, boxes, color=[], thickness=5):\n\n draw_bounding_boxes_on_image(image, boxes, color, thickness)\n \n return image", "def show_bboxes(img, bounding_boxes=None, facial_landmarks=[]):\n\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n# for b in bounding_boxes:\n# draw.rectangle([\n# (b[0], b[1]), (b[2], b[3])\n# ], outline='white')\n\n for p in facial_landmarks:\n for i in range(106):\n draw.ellipse([\n (p[i*2] - 1.0, p[2*i + 1] - 1.0),\n (p[i*2] + 1.0, p[2*i+1] + 1.0)\n ], outline='blue')\n font = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text([p[2*i], p[2*i+1]], str(i), font=font)\n\n return img_copy", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color, 3)", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color)", "def draw_bbox(box,img):\n cv2.rectangle(img, box[0], box[1], color=(0, 255, 0),thickness=3)", "def draw_box(image, boxes, box_color=(255, 255, 255)):\n\n for box in boxes:\n cv2.rectangle(image,\n (box[0], box[1]),\n (box[2], box[3]), box_color)", "def draw_fruits_box(img_bgr, fruits):\n if len(fruits) == 0:\n return img_bgr\n\n img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\n wight, height, _ = img_rgb.shape\n # img_pil = Image.fromarray(img_rgb)\n\n # for fruit in fruits:\n # if fruit.box is not None:\n # xmin, ymin, xmax, ymax = fruit.box\n # draw_bounding_box_on_image_array(img_rgb, ymin, xmin, ymax, xmax, color='white', thickness=2,\n # display_str_list=[\n # ' Type: {} Distance:{:.2f}cm'.format(fruit.cls, fruit.distance)],\n # use_normalized_coordinates=False)\n for fruit in fruits:\n if fruit.box is not None:\n xmin, ymin, xmax, ymax = fruit.box\n draw_bounding_box_on_image_array(img_rgb, ymin, xmin, ymax, xmax, color='white', thickness=2,\n display_str_list=[\n ' Type:{}|XYZ=({:.2f},{:.2f},{:.2f})cm|Size:{:.2f}cm'.format(\n fruit.cls,\n fruit.distance,\n (xmin + xmax - wight) * 0.5 * (28 / 210),\n (height - (ymin + ymax)) * 0.5 * (28 / 210),\n fruit.size)],\n use_normalized_coordinates=False)\n\n return cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)", "def display_bboxes_center_xywh(img, bboxes_center_xywh):\n \n # Create figure and axes\n fig, ax = plt.subplots(1, figsize=(9, 6.9))\n \n # Display the image\n ax.imshow(img)\n \n bboxes_topleft_xywh = convert_bbox_center_xywh_tensor_to_topleft_xywh(bboxes_center_xywh)\n \n for bbox_topleft_xywh in bboxes_topleft_xywh:\n \n # Create a Rectangle patch\n rect = patches.Rectangle(bbox_topleft_xywh[:2],\n bbox_topleft_xywh[2],\n bbox_topleft_xywh[3],\n linewidth=2,\n edgecolor='b',\n facecolor='none')\n # Add the patch to the Axes\n ax.add_patch(rect)\n \n plt.show()", "def drawbox(img,bbox):\n return img", "def draw_bounding_boxes_on_image_array(image, boxes, color=[], thickness=5):\n\n draw_bounding_boxes_on_image(image, boxes, color, thickness)\n\n return image" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate heterozygosity samples = list of sample names vcf = VCF file
def calHet( inFile, varType ): names = [] print("Sample\tfracHet\thetCt\thomCt") # print header with open( inFile, 'r') as files: # open sample name file for i in files: i = i.rstrip() vcf = i + "." + varType + ".vcf" with open( vcf, 'r' ) as data: hom = 0.0 # count homozygous sites het = 0.0 # count heterozygous sites fractionHet = 0.0 # fraction heterozygous for var in data: if var.startswith("#"): # skip header continue else: var = var.rstrip() line = var.split("\t") stats = line[9].split(':') # alleles = list( map( int, stats[1].split(',') ) ) # create list of allele counts check = [ i for i in alleles if i > 0] # put any counts > 0 into a list if not check: # if all allele counts == 0 continue # all alleles are set to zero wtf? Result of a quality score that is low. elif len(check) > 1: # multiple allele counts , must be heterozygous het += 1 # more than one allele elif len(check) == 1: # only one allele has a count hom += 1 #print("%s\t%s\t%s\t%s\t%s\t%s" %(i, line[0], line[1], stats[0], stats[1], check ) ) if hom == 0: fractionHet = 100 else: fractionHet = het/(hom + het) # calculate fraction heterozygous print("%s\t%f\t%f\t%f" %(i, fractionHet, het,hom )) files.close()
[ "def get_samples(self):\n\t\twith gzip.open(self.vcf, 'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith(\"##\"):\n\t\t\t\t\tself.header = self.header + line\n\t\t\t\tif line.startswith(\"#C\"):\n\t\t\t\t\tself.samples = line.strip().split('\\t')[9:]\n\t\t\t\t\tself.samples = ('|').join(self.samples)\n\t\t\t\t\tbreak\n\t\treturn", "def vcf_samples(vcffile):\n try:\n vcf_reader = vcf.Reader(open(vcffile, 'r'))\n return vcf_reader.samples\n except Exception as error:\n print(f\"Could not read vcffile {vcffile}: continuing without vcf data: {str(error)}\")\n\n return []", "def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))", "def available_samples(vcf_path):\n return _header_from_vcf(vcf_path)[9:]", "def init_vcf(self):\n\n self.vcf = pysam.VariantFile(self.vcf_fn)\n self.samples = [s for s in self.vcf.header.samples]", "def load_variants( vcf_file, sig_cutoff, pool1_indices, pool2_indices ):\n\t\n\tvariants = {}\n\tcounter_00 = 0\t\t# positions without sufficient coverage\n\tcounter = 0\t\t\t# all variants\n\tcounter_empty = 0\t# positions without sufficient information to compute test\n\t\n\twith open( vcf_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] != '#':\n\t\t\t\tparts = line.strip().split('\\t')\n\t\t\t\t\n\t\t\t\t# --- prepare variables to save information per row (variant) --- #\n\t\t\t\t\n\t\t\t\tstatus_pool1 = False\n\t\t\t\tstatus_pool2 = False\n\t\t\t\tref1 = 0\n\t\t\t\talt1 = 0\n\t\t\t\tref2 = 0\n\t\t\t\talt2 = 0\n\t\t\t\t\n\t\t\t\t# --- collecting values of pool1 --- #\n\t\t\t\t\n\t\t\t\tfor idx in pool1_indices:\n\t\t\t\t\tif parts[ idx ] not in [ './.', \"./._L\", \"./._J\" ]:\n\t\t\t\t\t\tstatus_pool1 = True\n\t\t\t\t\t\tx, y = map( int, parts[ idx ].split(':')[1].split(',') )\n\t\t\t\t\t\tref1 += x\n\t\t\t\t\t\talt1 += y\n\t\t\t\t\n\t\t\t\t# --- collect values of pool2 --- #\n\t\t\t\t\n\t\t\t\tfor idx in pool2_indices:\n\t\t\t\t\tif parts[ idx ] not in [ './.', \"./._L\", \"./._J\" ]:\n\t\t\t\t\t\tstatus_pool2 = True\n\t\t\t\t\t\tx, y = map( int, parts[ idx ].split(':')[1].split(',') )\n\t\t\t\t\t\tref2 += x\n\t\t\t\t\t\talt2 += y\n\t\t\t\t\n\t\t\t\tif status_pool1 + status_pool2 == 2:\n\t\t\t\t\tif ref1+alt1 > 0 and ref2+alt2 > 0:\n\t\t\t\t\t\toddsratio, pvalue = stats.fisher_exact( ( ( ref1, alt1 ), ( ref2, alt2 ) ) )\n\t\t\t\t\t\tif pvalue <= sig_cutoff:\n\t\t\t\t\t\t\tvariants.update( { parts[0]+\"_%_\"+parts[1].zfill( 9 ): parts[0:7] + [ str( pvalue ) ] + parts[8:] } )\n\t\t\t\t\telse:\n\t\t\t\t\t\tcounter_00 += 1\n\t\t\t\telse:\n\t\t\t\t\tcounter_empty += 1\n\t\t\t\tcounter += 1\n\t\t\tline = f.readline()\n\n\t\tcounter = counter - counter_00 - counter_empty # total nr of SNPs without the no coverage cases\n\t\t\n\t\tprint 'There are ' + str(counter_00) + ' SNPs with no coverage for at least one pool, which will not be included in the analysis.\\n'\n\t\tprint 'There are ' + str(counter_empty) + ' SNPs with empty line for at least one pool, which will not be included in the analysis.\\n'\n\t\tprint 'There are ' + str( len( variants.keys() ) ) + ' SNPs which are significant - adjusted alpha:(' + str( sig_cutoff ) + ')\\n'\n\t\tprint str( 1 - float( len( variants.keys() ) ) / float( counter ) ) + ' percent of SNPs ('+ str( counter - len( variants.keys() ) ) +') are filtered out, because they are not significant.\\n'\n\t\n\treturn variants", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def extract_print_new_vcf_file(parameter_stuff, samples_to_extract_list):\r\n\r\n input_vcf_file = parameter_stuff['vcf_input_file']\r\n\r\n output_vcf_file = parameter_stuff['vcf_output_file']\r\n\r\n # VCF Input File\r\n vcf_input_file = open(input_vcf_file, 'r')\r\n # VCF Output File\r\n vcf_output_file = open(output_vcf_file, 'w')\r\n\r\n # Get indices of samples of interest\r\n samples_list_indices = []\r\n\r\n for line in vcf_input_file:\r\n \r\n #Get Header from file\r\n \r\n if line.startswith((\"##\", \"#\", \" #\", \"'#\", '\"##')) and not line.startswith('#CHROM'):\r\n\r\n vcf_output_file.write(line)\r\n continue\r\n \r\n if line.startswith('#CHROM'):\r\n \r\n # Strip newline from line\r\n line = line.rstrip('\\n')\r\n\r\n # Split data on the tab\r\n parsed_line = line.split('\\t')\r\n\r\n # Get header for variant stuff\r\n header_variant_info = parsed_line[:9]\r\n\r\n # Convert header to a string for writing to file\r\n header_variant_info = ('\\t'.join(map(str,header_variant_info)))\r\n\r\n # Write to new vcf file\r\n vcf_output_file.write(header_variant_info + '\\t')\r\n\r\n # Getting the sample names from file\r\n header_sample_list = parsed_line[9:]\r\n \r\n #setting initial movement value (find end of sample extraction list)\r\n x = 0\r\n\r\n print (samples_to_extract_list)\r\n end_of_list_index = len(samples_to_extract_list) - 1\r\n\r\n # Need sample extraction list first to control flow better\r\n # Ending of the line problematic otherwise\r\n for sample in samples_to_extract_list:\r\n \r\n # Index of actual file list\r\n y = 0\r\n\r\n for header_sample in header_sample_list:\r\n # Setting up loop to change how output occurs\r\n # Last sample put a newline instead of tab\r\n if end_of_list_index == x:\r\n \r\n # Finding matching samples and get the index location\r\n if sample == header_sample:\r\n \r\n # Write sample name to header of new vcf file\r\n # Last sample being extracted add new line\r\n vcf_output_file.write(str(header_sample) + \"\\n\")\r\n\r\n # Get the index of the sample\r\n samples_list_indices.append(y)\r\n\r\n # Just keep moving through lists\r\n else:\r\n pass\r\n\r\n # All samples except for last sample get a tab\r\n else:\r\n\r\n # Finding matching samples and get the index location\r\n if sample == header_sample:\r\n \r\n # Write sample name to header of new vcf file\r\n vcf_output_file.write(str(header_sample) + \"\\t\")\r\n\r\n # Get the index of the sample\r\n samples_list_indices.append(y)\r\n\r\n # Just keep moving through lists\r\n else:\r\n pass\r\n\r\n #Move the counter for next iteration of file head sample list\r\n y += 1\r\n\r\n #Move the counter for next iteration till end of sample extraction list\r\n x += 1\r\n\r\n \r\n print (\"Printing the indices of samples\")\r\n print (samples_list_indices) \r\n \r\n \r\n # Start Parsing Actual Variant Data of File\r\n else:\r\n line = line.rstrip('\\n')\r\n parsed_line = line.split('\\t')\r\n\r\n # Get header for variant stuff\r\n variant_info = parsed_line[:9]\r\n\r\n # Get samples data\r\n samples_data = parsed_line[9:]\r\n\r\n # Convert header to a string for writing to file\r\n variant_info = ('\\t'.join(map(str,variant_info)))\r\n\r\n # Write to new vcf file\r\n vcf_output_file.write(variant_info + '\\t')\r\n \r\n # Find end of list index\r\n # Built in a way to put a newline at end of list\r\n end_of_list_index = len(samples_list_indices) - 1\r\n x = 0\r\n\r\n # Start cycling through variant sample information\r\n for value in samples_list_indices:\r\n\r\n if end_of_list_index == x:\r\n vcf_output_file.write(str(samples_data[value]) + '\\n')\r\n\r\n else:\r\n vcf_output_file.write(str(samples_data[value]) + '\\t')\r\n\r\n # Move indice counter (till it hits end of samples list)\r\n x +=1\r\n\r\n \r\n vcf_input_file.close()\r\n vcf_output_file.close()\r\n \r\n return()", "def test_gff_to_vcf(self):\n if self.vcf_file is None:\n self.fail(\"Can't find VCF file\")\n n_records = 0\n with open(self.vcf_file) as vcf:\n for line in vcf.readlines():\n if not line.startswith(\"#\"):\n n_records += 1\n self.assertEqual(n_records, self.metric_dict[\"number_of_variants\"])", "def count_variants(vcf_list, sample_list):\n\n df_lst = []\n\n sample_vcf_dct = dict(zip(sample_list,vcf_list))\n\n for s in sample_vcf_dct.keys():\n\n vcf_in = sample_vcf_dct[s]\n vcf = VariantFile(vcf_in)\n\n snv = 0\n indel = 0\n\n for rec in vcf:\n\n ref_len = len(rec.ref)\n\n for a in rec.alts:\n if len(a) > 1 or ref_len > 1:\n indel +=1\n else:\n snv +=1\n\n df_lst.append([s,snv,indel])\n\n out_df = pd.DataFrame(df_lst, columns=['sample','snvs','indels'])\n\n return out_df", "def readAndFilterVcf(path, contig, samples, numbers, ploidy, qualflt=30, missingfltprop=0.6, verbose=False):\n \n print(f\"\\n-------------- Reading VCF for chromosome {contig} --------------\")\n vcf = allel.read_vcf(path, \n numbers=numbers,\n fields=['calldata/*', 'variants/*', 'samples'])\n \n #get sample names and indices\n samplenames = vcf['samples']\n ind = defaultdict(list)\n for s,names in enumerate(samplenames):\n idx = np.where(np.isin(samples['sampleID'],names))[0][0]\n t = samples.treatment[idx]\n ind[t].append(s)\n subpops = dict(ind)\n\n if verbose: print(subpops, \"\\n\")\n \n print(f\"------- Filtering VCF at QUAL={qualflt} and missingness proportion of {missingfltprop} -------\")\n #apply quality filters\n qual = vcf['variants/QUAL']\n passfilter = qual >= qualflt\n print(f\"QUAL filter will retain {passfilter.sum()} SNPs retained out of {passfilter.shape[0]} for chromosome {contig}\")\n\n #missingness filters \n ac = allel.GenotypeArray(vcf['calldata/GT']).count_alleles()\n snpcounts = ac.sum(axis=1)\n missingflt = snpcounts.max()*missingfltprop # must have at least 1/p alleles present\n missingness_flt = snpcounts >= missingflt\n print(f\"Missingness filter will retain {missingness_flt.sum()} SNPs out of {missingness_flt.shape[0]} for chromosome {contig}\")\n\n passfilter = np.logical_and(passfilter, missingness_flt)\n print(f\"The combined filter will retain {passfilter.sum()} SNPs out of {passfilter.shape[0]} for chromosome {contig}\")\n \n if ploidy == 1:\n geno = allel.HaplotypeArray(vcf['calldata/GT'].compress(passfilter, axis=0))\n else: \n geno = allel.GenotypeArray(vcf['calldata/GT'].compress(passfilter, axis=0))\n\n pos = allel.SortedIndex(vcf['variants/POS'].compress(passfilter, axis=0)) \n depth = vcf['variants/DP'].compress(passfilter, axis=0)\n #extract snpeff info and filter \n snpeff = pd.DataFrame(vcf['variants/ANN'])[0].str.split(\"|\", expand=True)[passfilter]\n \n ac_subpops = geno.count_alleles_subpops(subpops)\n \n return(vcf, geno, ac_subpops, pos, depth, snpeff, subpops, samplenames)", "def extract_surf_samples(filename, samples, upright = False):\n\tif samples.ndim != 2 or samples.shape[1] != 4 : raise ValueError(\"Bad shape for 'samples' array\")\n\tnsamples = samples.shape[0]\n\tdescriptors = np.zeros((nsamples,64),'float64')\n\t_lib.extract_surf_samples(filename,nsamples,samples,upright,descriptors)\n\treturn descriptors", "def genotypeGVCFs():\n return outDiscovery + \"/multisample.genotyped.vcf\"", "def getQVsForComphetModel(comphetVariantsFilename, caseNames, controlNames):\r\n\r\n\t# If we have a sample file, then we have everyone's names:\r\n\tif len(caseNames) != 0 or len(controlNames) != 0:\r\n\r\n\t\tcaseCounts = {name: set() for name in caseNames}\r\n\t\tcontrolCounts = {name: set() for name in controlNames}\r\n\r\n\t\tvariantIDs = {\"case\": caseCounts, \"ctrl\": controlCounts}\r\n\r\n\t# Otherwise, we work just from the genotypes file and get names from there as we go.\r\n\telse:\r\n\t\tvariantIDs = {\"case\": defaultdict(set), \"ctrl\": defaultdict(set)}\r\n\r\n\treader = csv.reader(open(comphetVariantsFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor line in reader:\r\n\r\n\t\tline = dict(zip(header, line))\r\n\r\n\t\tcaseOrControl = line[\"Sample Phenotype (#1)\"]\r\n\t\tname = line[\"Sample Name (#1)\"]\r\n\t\tvariantID1 = line[\"Variant ID (#1)\"]\r\n\t\tvariantIDs[caseOrControl][name].add(variantID1)\r\n\t\t# The comphet file also includes homozygous mutations, in which case there is no Variant #2.\r\n\t\tif line[\"Sample Phenotype (#1)\"] == \"het\":\r\n\t\t\tvariantID2 = line[\"Variant ID (#2)\"]\r\n\t\t\tvariantIDs[caseOrControl][name].add(variantID2)\r\n\r\n\tcaseCounts = {name: len(variants) for name, variants in variantIDs[\"case\"].items()}\r\n\tcontrolCounts = {name: len(variants) for name, variants in variantIDs[\"ctrl\"].items()}\r\n\treturn caseCounts, controlCounts", "def read_vcf(vcf, chromosomes):\n # A message that we are going through the VCF\n print('Reading ' + vcf + ' ...')\n # Start an empty list to hold the data\n parsed_vcf_data = []\n # Open the VCF, and read through it\n with open(vcf, 'r') as f:\n for line in f:\n # We want to skip the lines that start with ##\n if line.startswith('##'):\n continue\n # VCF has eight fixed fields, the first of which is #CHROM\n # These are _always_ in the same order\n # This is where the data starts\n elif line.startswith('#CHROM'):\n # We want to save the samples, which are listed after FORMAT\n # First, break it up into a list\n data_header = line.split()\n # Get the index of the 'FORMAT' field\n # Sometimes, VCF files won't have this - this means there is\n # no genotype data in the file, and we cannot use it\n if 'FORMAT' in data_header:\n # list.index(value) returns the index of value, but it\n # has to exist, else it raises an error, which is why we\n # put it into an if block\n format_field = data_header.index('FORMAT')\n else:\n # If not, we want to die with a message\n print('This VCF file does not have any genotype data!')\n exit(1)\n # Take everything after FORMAT to the end - the samples\n samples = data_header[format_field+1:]\n # We also don't want to mess with indels, since those are tricky\n elif 'INDEL' in line:\n continue\n # Information is now mined out of the header\n # Time to start accumulating genotype data\n else:\n # Break the line up by entries\n variant_data = line.split()\n # We want to make sure that the chromosomes listed in the VCF\n # are actually the same as those in the reference sequence.\n # If not, then we don't a proper combination of files.\n if variant_data[0] not in chromosomes:\n print('The chromosomes in the VCF do not match those in the reference!')\n exit(1)\n # Save the relevant parts of the file as a tuple\n # We need the chromosome (#CHROM), the position (POS), the \n # reference allele (REF), the alternate allele (ALT), and any\n # sample information. We don't have to save the FORMAT field\n # since we are only interested in the genotype calls, and VCF\n # specifications are such that the first thing that must be \n # listed in the sample fields is the genotype information.\n # We want to manipulate the REF and ALT alleles a little\n # since they are listed as 0, 1, 2, and so on.\n # The REF allele is always listed as 0 in genotype calls,\n # and the ALT allels are 1, 2, ... comma-delimited\n # If they are part of a list, then we can just use the index\n # to get them, and it is much easier. split() gives a list,\n # and + is a list concatenation operator.\n alleles = variant_data[3].split() + variant_data[4].split(',')\n relevant_data = (variant_data[0],\n variant_data[1],\n alleles,\n variant_data[format_field+1:]\n )\n # Append the chromosomes, if they are not already in the list\n # Append it to the beginning list\n parsed_vcf_data.append(relevant_data)\n print('Done!')\n # And return the sample names and the huge list\n return(samples, parsed_vcf_data)", "def extract_variants(vcf):\n\n varListAll = []\n outvcf = open('AllVars.vcf', 'w')\n \n with open(vcf) as f:\n for line in f:\n if len(line.split('\\t')) > 8:\n vcffields = line.split('\\t')[7].split(';')\n \n if len(vcffields[0].split('=')) > 1:\n \n if vcffields[0].split('=')[0] == 'AF':\n maf = vcffields[0].split('=')[1]\n \n else:\n #if maf not present calculate maf=AO/DP\n maf = float(vcffields[0].split('=')[1])/float(vcffields[1].split('=')[1])\n \n if len(vcffields[0].split('=')[1].split(',')) == 1 and float(maf)>0: \n outvcf.write(line)\n varListAll.append(line)\n #elif len(vcffields[0].split('=')[1].split(',')) > 1:\n #multiple variants in the same locus: NEED TO IMPLEMENT THIS\n # outvcf.write(line)\n # varListAll.append(line)\n \n outvcf.close()\n\n return varListAll", "def load_vcf_data(vcf_file):\n \n if(vcf_file[-3:]==\".gz\"):\n vcf_data=gzip.open(vcf_file, \"r\")\n else:\n vcf_data=open(vcf_file, \"r\")\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n missing=0\n \n for line in vcf_data:\n\n if line[0:2] == '##':\n continue\n elif line[0:1] == '#':\n data=line[1:-1]\n data=data.split(\"\\t\")\n if data[0:9]==[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\"]:\n sample_names=data[9:]\n else:\n print data[0:9]\n raise Exception(\"Bad vcf header line\")\n else:\n data=line[:-1]\n data=data.split(\"\\t\")\n\n if len(data[4].split(\",\"))>1: \n print \"Warning: ignoring multi alleleic site at \" + data[0]+\":\"+data[1] \n continue # multi-allelic sites. \n\n if data[2] != \".\":\n snp_names.append(data[2])\n else:\n snp_names.append(data[0]+\":\"+data[1])\n\n snp_pos.append(int(data[1]))\n\n if not all([(x[0]==\".\" and x[2]==\".\") or (x[0] in [\"0\", \"1\"] and x[2] in [\"0\", \"1\"]) for x in data[9:]]):\n raise Exception(\"Could not read line: \" + line) \n \n genotype_data.append([ 3 if x[0]==\".\" and x[2]==\".\" else int(x[0])+int(x[2]) for x in data[9:] ])\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def test_evaluate_hevc(self):\n path_to_before_hevc = 'hevc/temp/luminance_before_hevc.yuv'\n path_to_after_hevc = 'hevc/temp/luminance_after_hevc.yuv'\n path_to_cfg = 'hevc/configuration/intra.cfg'\n path_to_bitstream = 'hevc/temp/bitstream.bin'\n qps = numpy.array([22, 42], dtype=numpy.int32)\n path_to_hevc_vis = 'hevc/pseudo_visualization/evaluate_hevc/'\n list_rotation = [0, 11, 4]\n positions_top_left = numpy.array([[300], [200]], dtype=numpy.int32)\n \n rgb_uint8 = tls.read_image_mode('hevc/pseudo_data/rgb_nightshot.jpg',\n 'RGB')\n (height_initial, width_initial, _) = rgb_uint8.shape\n height_surplus = height_initial % 8\n width_surplus = width_initial % 8\n luminances_uint8 = numpy.expand_dims(tls.rgb_to_ycbcr(rgb_uint8)[0:height_initial - height_surplus, 0:width_initial - width_surplus, 0],\n axis=0)\n (rate, psnr) = hevc.hevc.evaluate_hevc(luminances_uint8,\n path_to_before_hevc,\n path_to_after_hevc,\n path_to_cfg,\n path_to_bitstream,\n qps,\n path_to_hevc_vis,\n list_rotation,\n positions_top_left)\n print('1st quantization parameter: {}'.format(qps[0]))\n print('Rate for the 1st quantization parameter: {}'.format(rate[0, 0]))\n print('PSNR for the 1st quantization parameter: {}'.format(psnr[0, 0]))\n print('2nd quantization parameter: {}'.format(qps[1]))\n print('Rate for the 2nd quantization parameter: {}'.format(rate[1, 0]))\n print('PSNR for the 2nd quantization parameter: {}'.format(psnr[1, 0]))", "def evaluate_hevc(luminances_uint8, path_to_before_hevc, path_to_after_hevc, path_to_cfg,\n path_to_bitstream, qps, path_to_hevc_vis, list_rotation, positions_top_left):\n nb_images = luminances_uint8.shape[0]\n nb_qps = qps.size\n rate = numpy.zeros((nb_qps, nb_images))\n psnr = numpy.zeros((nb_qps, nb_images))\n for i in range(nb_qps):\n qp = qps[i].item()\n path_to_storage = os.path.join(path_to_hevc_vis,\n 'qp_{}'.format(qp))\n \n # The directory containing the reconstructed images\n # is created if it does not exist.\n if not os.path.isdir(path_to_storage):\n os.makedirs(path_to_storage)\n (rate[i, :], psnr[i, :]) = compute_rate_psnr(luminances_uint8,\n path_to_before_hevc,\n path_to_after_hevc,\n path_to_cfg,\n path_to_bitstream,\n qp,\n path_to_storage,\n list_rotation,\n positions_top_left)\n return (rate, psnr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A convenience function for getting a single suggestion.
def get_suggestion(): global _suggestions_iterator while True: try: return next(_suggestions_iterator) except StopIteration: _suggestions_iterator = iter(suggestions)
[ "def suggestion(self):\n return self._suggestion", "def suggestion(self, suggestion_id):\r\n return suggestions.Suggestion(self, suggestion_id)", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def get_suggestion(self):\n if len(self._trial_queue) != 0:\n return self._trial_queue.popleft()\n \n p = self.algorithm.get_suggestion(self.parameters, self.results,\n self.lower_is_better)\n if isinstance(p, dict):\n self.num_trials += 1\n t = Trial(id=self.num_trials, parameters=p)\n return t\n else:\n return p", "def get_room(self):\n\n return self.suggestion_set[0]", "def fetchSuggestion(self, keyword, seed_keyword, meta_keyword):\n # user agent is an HTTP browser request header that gives servers information regarding the client device and/or operating system on which the browser is running\n user_agent_list = [\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n ]\n url = \"http://suggestqueries.google.com/complete/search?client=chrome&hl={}&gl={}&callback=?&q={}\".format(\n self.language, self.country, keyword)\n user_agent = random.choice(user_agent_list)\n headers = {\"user-agent\": user_agent, \"dataType\": \"jsonp\"}\n response = requests.get(url, headers=headers, verify=True)\n if response.status_code == 200:\n suggestions = json.loads(response.text)\n sugg = []\n index = 0\n relevancies = []\n suggesttypes = []\n suggestsubtypes = []\n verbatimrelevance = \"\"\n if \"google:suggestrelevance\" in suggestions[4].keys():\n relevancies = suggestions[4]['google:suggestrelevance']\n if \"google:suggesttype\" in suggestions[4].keys():\n suggesttypes = suggestions[4]['google:suggesttype']\n if \"google:verbatimrelevance\" in suggestions[4].keys():\n verbatimrelevance = suggestions[4]['google:verbatimrelevance']\n if \"google:suggestsubtypes\" in suggestions[4].keys():\n suggestsubtypes = suggestions[4]['google:suggestsubtypes']\n for word in suggestions[1]:\n if self.checkSeedKeywordExists(word, meta_keyword):\n sugg.append({\n 'keyword': word,\n 'relevancy_score': relevancies[index] if len(relevancies) > 0 else None,\n 'suggesttype':suggesttypes[index] if len(suggesttypes) > 0 else None,\n 'verbatimrelevance' : verbatimrelevance,\n 'seed_keyword': seed_keyword,\n 'meta_keyword': meta_keyword,\n 'suggestsubtype' : suggestsubtypes[index] if len(suggestsubtypes) > 0 else None,\n })\n else:\n continue\n index += 1\n return sugg\n # returning false when google blocks an ip for some time \n return False", "def get_suggestion(response):\n\n\tlabel_type = response['itemDataType']\n\tif label_type.lower() == 'image':\n\t\tresponse = __image_classifier(response)\n\t# elif label_type.lower() == 'text':\n\t# \tresponse = __text_sentiment(response)\n\telse:\n\t\treturn response\n\t\n\treturn response", "def suggestion(self, suggestion_id):\r\n return suggestions.ForumSuggestion(self, suggestion_id)", "def get_suggestion(query):\n suggestions = []\n if query:\n if isinstance(query, unicode):\n query = query.encode('utf-8')\n query = quote(query)\n # url = \"http://clients1.google.%s/complete/search?q=%s&json=t&ds=&client=serp\" % (tld, query)\n req = urllib2.urlopen(\"http://clients1.google.%s/complete/search?q=%s&json=t&ds=&client=serp\" % (tld, query))\n encoding = req.headers['content-type'].split('charset=')[-1]\n content = unicode(req.read(), encoding)\n result = json.loads(content)\n suggestions = [i for i in result[1]]\n return suggestions", "def get_suggestion(self, parameters, results, lower_is_better):\n raise NotImplementedError(\"Algorithm class is not usable itself.\")", "def __suggest(self, text):\n if not text:\n return None\n\n try:\n url_or_request = self.caller.getSuggestionsUrl(text)\n if not url_or_request:\n # Abstract method can return invalid URL on purpose to avoid suggestions\n return None\n assert isinstance(url_or_request, basestring) or isinstance(url_or_request, urllib2.Request)\n except Exception as e:\n logging.error(\"Getting the suggestion url/request failed: %s\", e)\n return None\n\n # First try to get cached result. This should always return hit on backtracking (user deleting characters)\n if cache_manager:\n suggestions = cache_manager.get_data(text, self.cache_id, session_id=str(self.quasimodeId))\n if suggestions:\n #print \"Returning %d cached results for query '%s'\" % (len(suggestions), text)\n return suggestions\n\n suggestions = []\n data = None\n\n if isinstance(url_or_request, basestring):\n url = url_or_request\n else:\n url = url_or_request.get_full_url()\n\n try:\n # Lazily open persistent connection to the webservice\n if self._connection_manager is None:\n self._connection_manager = PersistentHTTPConnectionManager(url_or_request)\n conn = self._connection_manager.get_connection()\n resp = conn.urlopen('GET', url, release_conn=True)\n data = resp.data\n except Exception as e:\n logging.error(\"Suggest query failed: %s; url: %s\", e, url_or_request)\n self.caller.onSuggestQueryError(url_or_request, e)\n else:\n if data:\n try:\n suggestions = self.caller.decodeSuggestions(data, resp.headers)\n except Exception as e:\n logging.error(\"Suggest response parsing failed: %s\", e)\n\n # Cache suggestions even when empty is returned to avoid re-query the web service with invalid input\n if cache_manager:\n #print \"Cached %d results for query '%s'\" % (len(suggestions), text)\n cache_manager.set_data(text, suggestions, self.cache_id, session_id=str(self.quasimodeId))\n\n return suggestions", "def get_character(self):\n\n return self.suggestion_set[2]", "def get_search_suggestions(Resource=None, SuggestionQuery=None):\n pass", "def _load_suggestion(self):\n curItem = self.tree.focus()\n parent = self.tree.parent(curItem)\n\n categories = ['approved', 'conflicts', 'suggestions', 'unknown', \\\n 'cldr',]\n if parent is '':\n #skip it\n pass\n else:\n if parent not in categories:\n curTerm = parent\n category = self.tree.parent(parent)\n else:\n curTerm = curItem\n category = parent\n if CurItem != CurTerm:\n self.preferred.set(self.tree.item(curItem)['values'][1])", "def suggest(word, cutoff=0.77):\n if word in LOOKUP_TABLE:\n return LOOKUP_TABLE[word]\n\n guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)\n if guess and len(guess) > 0:\n return guess[0]\n return word", "def autocomplete_suggestion(request):\n # Get the condition from the user's experiment context.\n # This will yield us access to the autocomplete trie!\n ec = get_experiment_context(request)\n condition = ec['condition']\n\n if time_search_experiment_out(request):\n log_event(event=\"EXPERIMENT_TIMEOUT\", request=request)\n return HttpResponseBadRequest(json.dumps({'timeout': True}), content_type='application/json')\n\n if request.GET.get('suggest'):\n results = []\n\n if experiment_setups[condition].autocomplete:\n chars = unicode(request.GET.get('suggest'))\n\n # See if the cache has what we are looking for.\n # If it does, pull it out and use that.\n # If it doesn't, query the trie and store the results in the cache before returning.\n autocomplete_cache = cache.get_cache('autocomplete')\n results = autocomplete_cache.get(chars)\n\n if not results:\n suggestion_trie = experiment_setups[condition].get_trie()\n results = suggestion_trie.suggest(chars)\n cache_time = 300\n\n autocomplete_cache.set(chars, results, cache_time)\n\n response_data = {\n 'count': len(results),\n 'results': results,\n }\n\n return HttpResponse(json.dumps(response_data), content_type='application/json')\n\n return HttpResponseBadRequest(json.dumps({'error': True}), content_type='application/json')", "def suggestion(self, suggestion_id):\n return suggestions.ForumSuggestion(self, suggestion_id)", "def suggest(self, query):\n res, suggest = self.search(query, results=1, suggestion=True)\n try:\n title = res[0] or suggest\n except IndexError: # page doesn't exist\n title = None\n return title", "def google_suggest(self, callback, who, arg, store=True):\n\t\t\n sugs = self.get_xml('http://google.com/complete/search', {'output':'toolbar', 'q': arg})\n\n if sugs is not None:\n try:\n sugs = [x[0].get('data') for x in sugs]\n except Exception, e:\n print \"XML error with Google Suggest: %s\" % e\n\t\t\t\n suggestions = self.remove_lyrics(sugs)\n random_sug = choice(suggestions)\n\t\t\t\n # Same string as we started with - roll again\n if random_sug == arg:\n try:\n suggestions.pop(suggestions.index(random_sug))\n except:\n pass\n random_sug = choice(suggestions)\n\t\t\t\t\n if random_sug is not None:\n if store:\n self.store_suggestion(who, arg)\n random_sug.strip('')\n random_sug.strip('\\r')\n w = random_sug.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if '?' not in w[-1:]:\n random_sug = random_sug + '?'\n return random_sug" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds game board by retrieving a sudoku puzzle preset from a sudoku dataset and then sets up the game board. Also calls a backtracking algorithm to derive a solution for the sudoku puzzle.
def build_game_board(self): # retrieves new sudoku puzzle from dataset sudoku_set = self.data.get_sudoku_set() sudoku_problem, sudoku_solution = sudoku_set[0], sudoku_set[1] # removes old game boards self.board = [] self.puzzle = [] self.alg_solution = [] self.data_solution = [] # sets up sudoku puzzle to array format segment = [] for num in sudoku_problem: segment.append(int(num)) if len(segment) == 9: self.board.append(segment) self.puzzle.append(segment[:]) segment = [] self.alg_solution = alg.solve_sudoku(self.puzzle) # uses sudoku backtracking algorithm to solve puzzle # sets up the provided sudoku puzzle solution from dataset to array format for num in sudoku_solution: segment.append(int(num)) if len(segment) == 9: self.data_solution.append(segment) segment = [] self.game_state = "Not Solved, Keep Trying!"
[ "def new_sudoku(self):\n self.sudokus = [generate()]\n self.index = 0\n self.solution = next(bruteforce(self.sudokus[0]))\n self.hints = None\n self.hinted_sudoku = None\n\n print(\"Generating new sudoku. Rating: %d/10\" % rate(self.sudokus[0]))\n if self.settings.get(\"autocandidates\", False):\n init_candidates(self.sudokus[0])\n self.autofill(self.sudokus[0], verbose=False)\n else:\n init_candidates(self.sudokus[0], filled_only=True)", "def puzzleGen(board):\n\n\tpuzzle = copy(board)\n\tcells = [num for num in range(81)]\n\trandom.shuffle(cells)\n\n\tfor index in cells:\n\t\tx, y = index // 9, index % 9\n\t\tcell_backup = puzzle[x][y]\n\t\tpuzzle[x][y] = cell(False)\n\t\tfor puzzle[x][y].index in range(9):\n\t\t\tif puzzle[x][y].value == cell_backup.value:\n\t\t\t\tcontinue\n\t\t\tif test_cell(puzzle, (x, y)) and solve(puzzle):\n\t\t\t\tpuzzle[x][y] = cell_backup\n\t\t\t\tbreak\n\t\telse:\t# No break\n\t\t\tpuzzle[x][y] = None\n\n\treturn puzzle", "def generate_board():\n return Sudoku(3).difficulty(0.5).board", "def generate_sudoku(self):\n\n # randomly generate the first row \n random_order_number = [x for x in range(1, 10)]\n random.shuffle(random_order_number)\n for x in range(9):\n value = random_order_number[x]\n this_cell = self.grid[0][x]\n this_cell.value = value\n self.remove_value(this_cell, 0, x, value)\n\n row = 1\n column = 0\n while row <9 and column < 9:\n time.sleep(0.05)\n # search for options\n # should only be done once for each cell\n this_cell = self.grid[row][column]\n if this_cell.options == None:\n this_cell.options = self.find_options(row, column, this_cell.grid)\n\n if not this_cell.options:\n # backtrace should only happen when there is no options for this cell\n row, column = self.backtrace(this_cell, row, column)\n\n else:\n # case 3: the number has options and the number returned from the cell is valid\n if this_cell.value != None:\n self.add_value(this_cell, row, column)\n this_cell.get_value_from_options()\n # when you switch the value for a value from the option, put the current value back into the row\n self.remove_value(this_cell, row, column, this_cell.value)\n if column == 8:\n row += 1\n column = 0\n else:\n column += 1\n try:\n self.print_detail(this_cell, row, column)\n except IndexError:\n pass", "def generate_sudoku(self):\n minimum_limit_of_dots = int(self.current_settings[\"min\"])\n maximum_limit_of_dots = int(self.current_settings[\"max\"])\n generator = SudokuGenerator(minimum_limit_of_dots, maximum_limit_of_dots)\n sudoku_generated = generator.generate_sudoku()\n self.writer.save_to_file(sudoku_generated, \"../custom_games/sudoku_generated\")\n self.menu.status = self.go_main_menu_option", "def solve_board(self):\n\n\t\tself.make_lower_case(self.game_board)\n\t\tfor i in range(self.game_board_size):\n\t\t\tfor j in range(self.game_board_size):\n\t\t\t\tself.game_solutions.update(self.words_from_start(self.game_board, i, j, self.trie))\n\t\treturn", "def create_from_web_sudoku(level):\n board = SudokuBoard()\n url = \"http://backup.websudoku.com/?level={}\".format(level)\n soup = BeautifulSoup.BeautifulSoup(urllib2.urlopen(url).read())\n \n puzzle_cells = soup.findAll(\"input\", attrs={\"id\":\"cheat\"})[0][\"value\"]\n mask_cells = soup.findAll(\"input\", attrs={\"id\":\"editmask\"})[0][\"value\"]\n seq = [c if m == \"0\" else None for c, m in zip(puzzle_cells, mask_cells)] \n if len(seq) != 81:\n raise ValueError(\"invalid puzzle data\")\n\n for i in range(0, 9):\n row = [SudokuCell(c, (i, j)) for j, c in enumerate(seq[i * 9:(i + 1) * 9])]\n board._matrix.append(row)\n board._init_cells()\n return board", "def solve_puzzle(grid):\n solutions = []\n if not grid.valid():\n return solutions\n # Backtracking, iterating over (first) smallest list of candidates for empty vertices\n candidates = grid.candidate_map()\n min_number_of_candidates = min([9] + [len(candidates[ln][rw]) for ln in range(9) for rw in range(9) if grid.grid[ln][rw] is None])\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if grid.grid[line][row] is None and len(candidates[line][row]) == min_number_of_candidates:\n for guess in candidates[line][row]:\n grid.grid[line][row] = guess\n for solution in solve_puzzle(grid):\n solutions.append(solution)\n grid.grid[line][row] = None\n break\n else:\n solutions.append(Sudoku(grid.__str__()))\n return solutions", "def main() -> None:\n print(\"Welcome to the sudoku solver!\")\n\n # prompt users to tell the square size of the sudoku\n # Example: A 12 * 12 soduoku board has SQUARE_DIM=3, BOARD_DIM=4\n while True:\n try:\n square_dim = int(input(\"Please enter the square dimension of\"\n \" your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n while True:\n try:\n board_dim = int(input(\"Please enter the board dimension of \"\n \"your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n\n # get the user input sudoku board\n print(\"Now it is time to enter your sudoku board for solving.\")\n\n to_solve_board = read_a_board(square_dim, board_dim)\n print_board(square_dim, board_dim, to_solve_board)\n solve_board(square_dim, to_solve_board)\n print(\"Solution\")\n print_board(square_dim, board_dim, to_solve_board)", "def generate_sudoku(self, n_empty):\n empty_squares = 0\n selection = choice(range(self.rows))\n values = sample(range(1,10),9)\n for sq in self.squares[selection]:\n sq.set(values.pop())\n self.update_state()\n if self.solve_state():\n for row in range(self.rows):\n for col in range(self.cols):\n self.squares[row][col].set(self.state[row][col])\n while(n_empty != empty_squares):\n row = choice(range(9))\n col = choice(range(9))\n self.squares[row][col].set(0)\n empty_squares += 1\n self.update_state()", "def search(values):\n \n # First, reduce the puzzle using the previous function to iteratively apply the following\n # constraints (CONSTRAINT PROPOGATION)\n # i) Elimination - Eliminate all potential box values not allowed by constraints\n #ii) Only Choice - Set potential box values to single value where only possible choice\n values = reduce_puzzle(values)\n\n #If this returns nothing then we hit a problem so return false\n if values is False:\n return False ## Failed earlier\n \n #If the returned suduko's boxes all contain a single value then its done so return the\n #completed suduko\n if all(len(values[s]) == 1 for s in boxes): \n return values ## Solved!\n \n #If the Sudoku is still unsolved resort to brute force and apply SEARCH technique\n #Start by choosing one of the uncompleted boxes with the fewest possibilities\n #s is returned as the box number, n is returned as length of the box value\n #print('Sudoku after inital elimination and only choice applied...')\n #display(values)\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n #print('Find out what n and s look like...')\n #print(n)\n #print(s)\n \n #Now use recurrence to solve each one of the resulting sudokus possibilites\n #By this we mean we take a box (one with the fewest possibile values) and we create\n #a new sudoku with the vlaue of this box set to only one of its possible values.\n #We do this iteratively for each possible value by calling this function again from within itself\n #passing our new sudoku in as parameter (recursion)\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt", "def generate_boards():\n\n print \"Generating data, please hold on...\"\n # a list for turns, each which is a list of boards, which are unique layouts\n # a completely blank layout is always the start of the game, counting for turn 0\n game = [[Board(' ' * 9, 1)]]\n\n # there are at most 9 turns in a game of tic tac toe\n for turnNum in range(1, 10):\n # list of layouts for the current turn\n turn = []\n upperLayouts = game[-1]\n\n if turnNum % 2 == 1: player = 'X'\n else: player = 'O'\n\n # every turns' unique layouts are numbered to seperate them more easily\n pattern = 1\n # goes through every layout from the previous turn\n for ul in upperLayouts:\n # game does not continue after a winning move, and using a won board is only possible after turn 5\n if turnNum <= 5 or not ul.check_win()[0]:\n # 9 positions on every board\n for pos in range(9):\n if ul[pos] == ' ':\n newLayout = Board(ul[0:pos] + player + ul[pos+1:])\n # if it is a unique layout\n unique = True\n # goes through every existing layout for this turn\n for item in turn:\n if newLayout.matches(item): \n unique = False\n # the upper layout leads to an existing layout\n ul.paths.append(item.pattern)\n break\n if unique:\n turn.append(Board(newLayout, pattern))\n # the current upper layout leads to the new layout\n ul.paths.append(pattern)\n pattern += 1\n else:\n # adds a zero for paths because a played character is taking up that space\n ul.paths.append(0)\n game.append(turn)\n return game", "def brute_force_solve(sudoku):\n for y in range(9):\n for x in range(9):\n if sudoku[x, y] == 0:\n potential_values = [True] * 9\n\n for i in range(9):\n # Check row\n if sudoku[i, y] != 0:\n potential_values[sudoku[i, y]-1] = False\n # Check col\n if sudoku[x, i] != 0:\n potential_values[sudoku[x, i]-1] = False\n # Check box\n val = sudoku[3 * (x // 3) + i // 3, 3 * (y // 3) + i % 3]\n if val != 0:\n potential_values[val-1] = False\n\n for i in range(9):\n if potential_values[i]:\n sudoku[x, y] = i + 1\n solution = brute_force_solve(sudoku)\n if solution is not None:\n return solution\n sudoku[x, y] = 0\n\n return None\n # If you get to the end, there are no zeros, so it's already solved\n return sudoku", "def start():\r\n global sudoku\r\n sudoku.fill(0)\r\n \r\n #scan all entries and collect data\r\n init = [] #list with the coordinates for the initial values\r\n for k,v in ent_dict.items():\r\n txt = v.get()\r\n if txt:\r\n if check_digit(txt):\r\n sudoku[k[0],k[1]]=int(txt)\r\n init.append((k[0],k[1]))\r\n else:\r\n #Wrong input\r\n v.config({\"background\": \"Red\"})\r\n print(\"Error, all inputs must be between 1-9\")\r\n print(\"Invalid input is: \",txt,\" in position: \",k)\r\n txt_var0 = \"Error, all inputs must be between 1-9\\n\"+\"Press Clear\\n\"+\"Enter new data\"\r\n txt_res.insert(tk.INSERT,txt_var0)\r\n return False\r\n #All initial data collected\r\n #Set the Start button to disabled\r\n st.config(state=tk.DISABLED)\r\n #start the game\r\n if sudoku_validate(sudoku):\r\n start_sudoku(sudoku)\r\n gen_output(fr_result,sudoku,set(init))\r\n time_stamp = \"Game at \"+ datetime.now().strftime('%Y-%m-%d %H:%M:%S') + \"\\n\" \r\n txt_res.insert(tk.INSERT,time_stamp)\r\n backtrack,exec_time = get_stats()\r\n txt_var1 = \"the number of recursion steps are: \" + str(backtrack) + \"\\n\" \r\n txt_res.insert(tk.INSERT,txt_var1)\r\n exec_time = round(exec_time,2)\r\n if exec_time < 0.5:\r\n exec_time = \" less than 0.5 seconds \"\r\n txt_var2 = \"the approximated execution time in seconds : \" + str(exec_time) + \"\\n\" + \"\\n\" \r\n txt_res.insert(tk.INSERT,txt_var2)\r\n \r\n else:\r\n print(\">>>>>>>>Invalid initial data>>>>>>>>>\")\r\n txt_var3 = \"Invalid Initial data configuration.\\nPress Clear \"+ \"\\n\"+ \"Enter new correct configuration\"\r\n txt_res.insert(tk.INSERT,txt_var3)", "def initialize_board(self):\n for row_number, row in enumerate(self.puzzle):\n for column_number, item in enumerate(row):\n self.solve_puzzle.append(\n Cell(column_number, row_number, item, self))", "def main():\n game = SudokuGame(3)\n game.print_grid()\n choice = input(\"Would you like to play the board or would you like a \"\n \"solution to be generated for you? Enter 'Play' or 'Generate' \")\n choice.lower()\n if choice == \"play\":\n while not game.is_full():\n x = int(input(\"What x-coordinate to place your number? \"))\n y = int(input(\"What y-coordinate to place your number? \"))\n num = int(input(\"What number would you like to place?\"))\n game.make_move(x, y, num)\n game.print_grid()\n if not game.verify_solution():\n print(\"WRONG SOLUTION\")\n else:\n print(\"YOU WIN\")\n else:\n game.find_solution()\n game.print_grid()", "def create(self, show=False):\n # First create empty Sudoku object, and set of indices of empty squares\n puzzle = Sudoku(\"0 \"*(self.sl**2))\n indices = [i for i in range(self.sl**2)]\n deleted = []\n\n # First add pseudorandom squares into puzzle, try 1/2 of total squares\n num_squares_to_add = (self.sl**2) // 2\n self.random_insertion(puzzle, num_squares_to_add, indices, deleted)\n\n # Repeat steps of deleting/inserting until one solution puzzle created\n while True:\n if show:\n print(render(puzzle.get_puzzle()))\n # Now check if one solution exists, and return Sudoku object if it does\n s = time.time()\n if puzzle.is_one_sol():\n return puzzle\n t = time.time()\n\n # If solving takes too much time, \"revamp\" process by deleting and inserting \n # multiple squares\n if t-s > 0.5:\n dels, ins = 1, 0\n while dels > ins:\n dels = self.random_deletion(puzzle, self.sl*2, indices, deleted)\n ins = self.random_insertion(puzzle, self.sl*10, indices, deleted) \n\n # If not one solution exists and it's solvable, more than one solution exists\n elif puzzle.is_solvable():\n dels, ins = 1, 0\n while dels > ins:\n dels = self.random_deletion(puzzle, self.sl*2, indices, deleted)\n ins = self.random_insertion(puzzle, self.sl*10, indices, deleted)\n\n # Else, there are no solutions, so must delete a square\n else:\n self.random_deletion(puzzle, 1, indices, deleted)\n\n return puzzle", "def get_sudoku_board(self, x, y, w, h, open_cv_image=None):\r\n self.clear_for_new_board()\r\n self.take_screenshot(x, y, w, h, open_cv_image)\r\n self.find_original_contours()\r\n self.fix_straight_lines()\r\n self.sort_filtered_contours()\r\n self.read_board_values()\r\n self.convert_to_numbers()", "def solve(self) -> None:\n sudoku = Sudoku(self.get_data())\n solver = SudokuSolver(sudoku)\n validation = solver.validate_sudoku()\n if validation == 1:\n solver.main_sequence()\n self.get_result(solver)\n elif validation == -1:\n self.status_bar.config(text='This sudoku array contains invalid digits.', fg='red')\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Requests user input for the row column and number input they would like to enter as the next entry to the Sudoku puzzle. Has some lightweight data validation through a try / except format and asks for another input attempt if invalid inputs were provided.
def request_number_input(self): try: self.print_board(self.board) row = int(input("Please enter row to add number to (0-8): ")) col = int(input("Please enter column to add number to (0-8): ")) num = int(input("Please enter number you wish to add (1-9): ")) response = self.set_number(col, row, num) print(response) # verifies if move was valid or if invalid inputs were provided. except: print("Invalid input, try again!") self.request_number_input()
[ "def get_input(self):\n while True:\n try:\n self.rows = int(input(\"Number of rows: \"))\n while self.rows < 2 or self.rows > 30:\n self.rows = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.cols = int(input(\"Number of columns: \"))\n while self.cols < 2 or self.cols > 30:\n self.cols = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.mines = int(input(\"Number of mines: \"))\n while self.mines < 1 or (self.mines >= self.rows * self.cols):\n tile_count = self.rows * self.cols\n self.mines = int(input(\"Please enter a number between 1 and \" + str(tile_count - 1) + \": \"))\n break\n except ValueError:\n print(\"Please enter a number!\")", "def r_req():\r\n valid_input = False\r\n while valid_input is False:\r\n r_str = input(\"Please enter row number: \")\r\n if r_str == \"ANSWER\":\r\n valid_input = True\r\n game_end(answer_grid) # Passes the already completed board to the checker function.\r\n r_req()\r\n if len(r_str) > 1:\r\n print(\"Invalid input\")\r\n elif ord(r_str) < 49:\r\n print(\"Invalid input\")\r\n elif ord(r_str) > 57:\r\n print(\"Invalid input\")\r\n else:\r\n valid_input = True\r\n return int(r_str)", "def input_sudoku(self):\r\n print(\"Input Sudoku:\")\r\n for a,row in enumerate(self._original_sudoku):\r\n self._original_sudoku[a] = [int(i) for i in input(\"Row {0} (using \\\r\n' ' as separators):\\n\".format(a+1)).strip().split()]", "def get_user_move(self):\n while True:\n user_input = input(\"Enter the coordinates: > \")\n try:\n col, row = map(int, user_input.split())\n if col not in [1, 2, 3] or row not in [1, 2, 3]:\n raise CoordinateError\n idx = self.board_coords[(col, row)]\n if self.game_board[idx] != ' ':\n raise CellOccupyError\n return idx\n except ValueError:\n print(\"You should enter numbers!\")\n except CoordinateError:\n print(\"Coordinates should be from 1 to 3!\")\n except CellOccupyError:\n print('This cell is occupied! Choose another one!')", "def read_input(ncr):\n print('No input file specified; entering interactive mode.')\n print('Please paste sudoku below.')\n clues = []\n try:\n for _ in range(ncr):\n clue = input().rstrip()\n validate_clue_line(clue)\n clues.append(clue)\n except IndexError:\n print('Input error: insufficient lines in input.\\nPlease provide {} lines of data.'.format(ncr))\n quit()\n return clues", "def input_check(self):\n self.row = self.rowLine.text()\n if self.row == '':\n self.errLabel.setText(\"Row box cannot be empty\")\n return\n \n self.col = self.colLine.text() \n if self.col == '':\n self.errLabel.setText(\"Col box cannot be empty\") \n return\n \n self.numRows = self.numRowsLine.text()\n if self.numRows == '':\n self.errLabel.setText(\"Row count box cannot be empty\")\n return \n \n self.numCols = self.numColsLine.text()\n if self.numCols == '':\n self.errLabel.setText(\"Column count box cannot be empty\")\n return\n \n self.row = int(self.row)\n self.col = int(self.col)\n self.numRows = int(self.numRows)\n self.numCols = int(self.numCols)\n \n if self.row > self.actRows:\n self.errLabel.setText(\"Starting row is outside the picture\") \n elif self.col > self.actCols:\n self.errLabel.setText(\"Starting column is outside the picture\")\n elif (self.numRows + self.row) > self.actRows:\n self.errLabel.setText(\"Row size out of bounds\")\n elif (self.numCols + self.col) > self.actCols:\n self.errLabel.setText(\"Column size out of bounds\") \n elif self.numRows < 1:\n self.errLabel.setText(\"Row size too low\")\n elif self.numCols < 1:\n self.errLabel.setText(\"Column size too low\") \n else:\n self.crop_adjust()", "def user_guess():\n valid_input = False\n while valid_input is False:\n try:\n guess_col = int(input(\"\\nGuess column 1-8:\")) - 1\n guess_row = int(input(\"Guess row 1-8:\")) - 1\n if 0 <= guess_row < 8 and 0 <= guess_col < 8:\n if is_guess_repeated(guess_col, guess_row) is False:\n for ship in Ship.list_of_ships:\n if ship.sunk is True:\n for pos in ship.pos_around_ship:\n if pos == (guess_col, guess_row):\n raise InterruptedError\n valid_input = True\n else:\n raise FileExistsError\n else:\n raise SyntaxError\n except ValueError:\n print(\"Please enter a number\")\n except InterruptedError:\n print(\"Guess too close to sunken ship. Try again\")\n except FileExistsError:\n print(\"You have already guessed this position. Try again\")\n except SyntaxError:\n print(\"Your guess missed the board. Try again\")\n global total_shots_fired\n total_shots_fired += 1\n return guess_col, guess_row", "def parse_input(args):\n args = [i.split(\",\") for i in args]\n num_rows = sum([len(i) for i in args])\n if num_rows != 9:\n error(f\"Input Error: Sudoku must contain 9 rows, but {num_rows} have been given\")\n sudoku = np.uint8(np.zeros((9, 9)))\n row = 0\n for i in args:\n for j in i:\n if len(j) != 9:\n error(f\"Input Error: Every row must have 9 characters, but row {row + 1} has {len(j)} characters\")\n for col in range(9):\n c = j[col]\n if c == '_':\n c = '0'\n try:\n sudoku[col, row] = int(c)\n except ValueError:\n error(f\"Input Error: The character '{c}' on row {row+1}, column {col+1} is invalid. Only the \"\n f\"numeric digits, 0 to 9, and underscores are allowed\")\n row += 1\n\n return sudoku", "def get_col():\n\n while True:\n try:\n guess_letter = str(input(\"Guess a column: \\n\")).upper()\n guess = letter_and_index_conversion(guess_letter, grid_size)\n if guess in range(1, grid_size + 1):\n return guess\n else:\n print(\"Bruh! That's not even in the ocean o_O\")\n except ValueError:\n print(\n f\"\\nPlease enter a letter for the column between {alphabet_list[0]} and {alphabet_list[grid_size - 1]}\"\n )", "def get_adjust_cells(self):\n while True:\n cells_str = self.my_raw_input(\"Enter new cell count for the funding track (1-15): \")\n if cells_str == \"\":\n return None\n try:\n cells = int(cells_str)\n if cells < 0 or cells > 15:\n print \"Invalid cell count %d\" % cells\n else:\n return cells\n except ValueError:\n print \"Invalid number '%s'\" % cells_str", "def obtain_user_input():\n\n rule_number = int(raw_input(\"Enter a rule number from 0-255: \"))\n\n # If the input is outside of 0-255 then user needs to re-enter.\n if rule_number < 0 or rule_number > 255:\n raise NumberOutOfBoundsError(\"Input rule number {} is out of range \"\n \"0-255.\".format(rule_number))\n\n num_of_timesteps = int(raw_input(\"Enter the number of timesteps to be used: \"))\n\n # If the input is 0 or negative, then the user needs to re-enter.\n if num_of_timesteps <= 0:\n raise NumberOutOfBoundsError(\"Input time steps {} is less than \"\n \"zero.\".format(num_of_timesteps))\n\n return (rule_number, num_of_timesteps)", "def main() -> None:\n print(\"Welcome to the sudoku solver!\")\n\n # prompt users to tell the square size of the sudoku\n # Example: A 12 * 12 soduoku board has SQUARE_DIM=3, BOARD_DIM=4\n while True:\n try:\n square_dim = int(input(\"Please enter the square dimension of\"\n \" your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n while True:\n try:\n board_dim = int(input(\"Please enter the board dimension of \"\n \"your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n\n # get the user input sudoku board\n print(\"Now it is time to enter your sudoku board for solving.\")\n\n to_solve_board = read_a_board(square_dim, board_dim)\n print_board(square_dim, board_dim, to_solve_board)\n solve_board(square_dim, to_solve_board)\n print(\"Solution\")\n print_board(square_dim, board_dim, to_solve_board)", "def obtain_user_input():\n\n\n league_types = ['PPR', 'STD'] # possible acceptable league types\n\n while True: # continue till valid entry given\n try:\n league_type = input(\"Enter a League Type (PPR or STD): \").upper() # obtain value from user\n if league_type in league_types: # check if it's valid\n break # entry is valid therefore break\n else: # invalid entry\n raise ValueError\n except:\n # presesnt error message and redo loop\n print(\"Invalid Entry: please enter either PPR or STD\")\n\n\n positions = ['WR', 'RB', 'QB', 'TE'] # possible acceptable positions\n while True: # continue till valid entry given\n try:\n pos = input(\"Please enter a position (WR, RB, QB, or TE): \").upper() # obtain value from user\n if pos in positions: # make sure position is valid\n break # entry is valid so break.\n else: # invalid entry\n raise ValueError\n except:\n # presesnt error message and redo loop\n print(\"Invalid Entry: please enter either WR, RB, QB, or TE\")\n\n\n\n idx = pd.IndexSlice # index slice object used to slice df\n num_pos = final_df.loc[idx[league_type, pos], :].shape[0] # total count of the position.\n while True: # continue till valid entry given\n try:\n n_rows = input(f\"Enter a count of players to study as an integer (max: {num_pos} for {pos}): \")\n n_rows = int(n_rows) # will raise ValueError if not an integer.\n if (n_rows <= num_pos and n_rows >0): # ensure < than count of position\n break # brak since valid entry\n else: # invalid entry\n raise ValueError\n except ValueError:\n # presesnt error message and redo loop\n print(f\"Invalid entry: please enter an integer less than {num_pos} and > 0.\")\n\n\n # possible user entry values. \n rank_dict = {\n \"1\": \"ADP\",\n \"2\": \"TTL PTS\"\n }\n\n while True: # continue till valid entry given\n # obtain value from user\n rank_sys = input(\"Enter how you would like to rank players (1 for ADP, 2 for 2020 Total Points): \")\n try:\n if rank_sys in rank_dict: # valid entry\n rank_sys = rank_dict[rank_sys]\n break\n else: # invalid entry\n raise ValueError\n except ValueError:\n # presesnt error message and redo loop\n print(\"Invalid Entry: please enter either 1 for ADP, or 2 for 2020 Total Points\")\n\n return league_type, pos, rank_sys, n_rows", "def input_pkgidx(g_dim):\n #print('Please specify the num of parking spots:')\n pk_dim = np.int(input('Please specify the num of parking spots:'))\n while pk_dim >= g_dim:\n print('Too many parking spots!')\n pk_dim = np.int(input('Please specify the num of parking spots:'))\n\n pk_g_idx = -np.ones(pk_dim, dtype = int)\n for idx in range(pk_dim):\n print('Input as grid index ranging from 0 to',g_dim-1)\n spot_idx = np.int(input())\n while (spot_idx < 0) or (spot_idx >= g_dim):\n print('Invalid input!')\n print('Input as grid index ranging from 0 to',g_dim-1)\n spot_idx = np.int(input())\n while spot_idx in pk_g_idx:\n print('Repeated input!')\n print('Input as grid index ranging from 0 to',g_dim-1)\n spot_idx = np.int(input())\n while (spot_idx < 0) or (spot_idx >= g_dim):\n print('Invalid input!')\n print('Input as grid index ranging from 0 to',g_dim-1)\n spot_idx = np.int(input())\n pk_g_idx[idx] = spot_idx\n\n pk_g_idx.sort()\n\n return pk_g_idx", "def prompt_move(self, player):\n print(\"Player %s's turn: \" % TTT.player_label[player])\n\n input_flag = False\n while not input_flag: \n try:\n movex = int (raw_input('Enter row coord : '))\n movey = int (raw_input('Enter col coord : '))\n if movex in range(3) and movey in range(3):\n if self.grid[movex][movey] == 0:\n input_flag = True\n else:\n print(\"Move already taken!\\n\")\n else: \n print(\"Incorrect Input, please try again!\\n\")\n except ValueError:\n print(\"Numbers only\\n\")\n\n self.add_move((movex, movey), player)", "def read_position(self, opponent_field):\n while True:\n try:\n inp = input(\"Enter your position for example A4\")\n line = int(inp[1:])\n assert(line >= 1)\n assert(line <= 10)\n row = convert(inp[0])\n if row is not None:\n if opponent_field.is_hit((line, row)):\n print('You have already hit in this cell')\n continue\n return (line, row)\n except:\n print('wrong data! Try again!')", "def start():\r\n global sudoku\r\n sudoku.fill(0)\r\n \r\n #scan all entries and collect data\r\n init = [] #list with the coordinates for the initial values\r\n for k,v in ent_dict.items():\r\n txt = v.get()\r\n if txt:\r\n if check_digit(txt):\r\n sudoku[k[0],k[1]]=int(txt)\r\n init.append((k[0],k[1]))\r\n else:\r\n #Wrong input\r\n v.config({\"background\": \"Red\"})\r\n print(\"Error, all inputs must be between 1-9\")\r\n print(\"Invalid input is: \",txt,\" in position: \",k)\r\n txt_var0 = \"Error, all inputs must be between 1-9\\n\"+\"Press Clear\\n\"+\"Enter new data\"\r\n txt_res.insert(tk.INSERT,txt_var0)\r\n return False\r\n #All initial data collected\r\n #Set the Start button to disabled\r\n st.config(state=tk.DISABLED)\r\n #start the game\r\n if sudoku_validate(sudoku):\r\n start_sudoku(sudoku)\r\n gen_output(fr_result,sudoku,set(init))\r\n time_stamp = \"Game at \"+ datetime.now().strftime('%Y-%m-%d %H:%M:%S') + \"\\n\" \r\n txt_res.insert(tk.INSERT,time_stamp)\r\n backtrack,exec_time = get_stats()\r\n txt_var1 = \"the number of recursion steps are: \" + str(backtrack) + \"\\n\" \r\n txt_res.insert(tk.INSERT,txt_var1)\r\n exec_time = round(exec_time,2)\r\n if exec_time < 0.5:\r\n exec_time = \" less than 0.5 seconds \"\r\n txt_var2 = \"the approximated execution time in seconds : \" + str(exec_time) + \"\\n\" + \"\\n\" \r\n txt_res.insert(tk.INSERT,txt_var2)\r\n \r\n else:\r\n print(\">>>>>>>>Invalid initial data>>>>>>>>>\")\r\n txt_var3 = \"Invalid Initial data configuration.\\nPress Clear \"+ \"\\n\"+ \"Enter new correct configuration\"\r\n txt_res.insert(tk.INSERT,txt_var3)", "def getCoordinates(self, cardNumber):\n number = 'first' if cardNumber == 1 else 'second'\n while True:\n s = input(\"Enter coordinates for \" + number + \" card \")\n s = s.strip()\n x = s[0]\n y = s[-1]\n if x.isdigit() and y.isdigit():\n x = int(x)\n y = int(y)\n if 1 <= x <= self.rows and 1 <= y <= self.columns:\n return x, y\n else:\n print(\" ***Invalid coordinates! Try again.***\")\n else:\n print(\" ***Invalid coordinates! Try again.***\")", "def check_input():\n err_mes = 'Invalid input! Input must not be even and must be greater than 5.'\n while True:\n board_size = int(input('Enter board size: '))\n if not input:\n print(err_mes)\n elif board_size % 2 == 0 or board_size <= 5:\n print(err_mes)\n else:\n break\n return board_size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the requested square to change is an original input for the puzzle, which cannot be changed.
def new_input_does_not_overlap_original_board(self, col, row): return self.puzzle[row][col] == 0
[ "def test_squares_equal_with_different_squares(self):\n board = Board()\n board.set_square(1, 'X')\n board.set_square(2, 'O')\n board.set_square(3, 'X')\n self.assertFalse(board.squares_equal((1, 2, 3)))", "def validSquare(self, square):\n assert(isinstance(square[0], int) and isinstance(square[1], int))\n assert(square[0] >= 0 and square[1] >= 0)\n assert(square[1] < self.size and square[1] < self.size)", "def verify_square(self, square):\n if not self._in_bounds(square):\n return False\n if not self.board.get(square): #empty\n return True\n if self.board[square].color != self.color: #enemy\n return True", "def valid_square(self, row, col, value):\n # Check that the row and col are valid puzzle indices\n if not ((0 <= row < self.sl) and (0 <= col < self.sl)):\n return False\n\n # Check that the square input is empty\n if self.puzzle[row][col] != 0:\n return False\n \n # Check that the value input is a valid puzzle value\n if not (1 <= value <= self.sl):\n if self.puzzle[row][col] == 0 and value == 0:\n return True\n return False\n \n # Check each row, column and block for same number\n for i in range(self.sl): \n if self.puzzle[row][i] == value: # Check each square in row for same value\n return False\n if self.puzzle[i][col] == value: # Check each square in col for same value\n return False\n \n # Check each square in box for same value, a little more complex index-wise\n r = self.bs*(row//self.bs) + (i//self.bs) \n c = self.bs*(col//self.bs) + (i%self.bs) \n if self.puzzle[r][c] == value:\n return False\n \n return True", "def check_valid(self, square, direction, partition):\n if direction == '+':\n change = self.n\n elif direction == '-':\n change = -self.n\n elif direction == '>':\n change = 1\n elif direction == '<':\n change = -1\n for i in xrange(len(partition)):\n next_square = square + change * (i + 1)\n if len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'C':\n return False\n if len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'S' and i != len(partition) - 1:\n return False\n if i == len(partition) - 1 and len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'S' and partition[i] > 1:\n return False\n if i == len(partition) - 1 and len(self.board[next_square]) > 0 and self.board[next_square][-1][1] == 'S' and self.board[square][-1][1] != 'C':\n return False\n\n return True", "def mark_square(self, square):\n if any([\n not isinstance(square, int),\n not (0 <= square < len(self.board)),\n self.board[square] is not None\n ]):\n return False\n self.board[square] = self.current_player\n self.next_player()\n return True", "def isValidMove(gameState, sudokuConfig, row, col, num):\n\n if not usedInRow(gameState, sudokuConfig, row, num) and \\\n not usedInCol(gameState, sudokuConfig, col, num) and \\\n not usedInBox(gameState, sudokuConfig, row - row%sudokuConfig[1], col - col%sudokuConfig[2], num):\n return True\n\n return False", "def valid(game_board, value, row, col):\n if len(value) > 1:\n value = \"X\"\n # Check row of new position\n for i in range(len(game_board[row])):\n if game_board[row][i] == value and i != col:\n return False\n\n # Check column of new position\n for i in range(len(game_board)):\n if game_board[i][col] == value and i != row:\n return False\n\n # Check the 3x3 square area\n start_row = 3 * (row // 3)\n start_col = 3 * (col // 3)\n for i in range(start_row, start_row+3):\n for j in range(start_col, start_col+3):\n if game_board[i][j] == value and i != row and j != col:\n return False\n\n return True", "def try_move(self, try_current_sq, try_move_sq):\r\n result = False\r\n try_move_piece = self.get_square(try_move_sq)\r\n if try_move_piece is not None:\r\n try_move_player = try_move_piece.get_player()\r\n try_current_piece = self.get_square(try_current_sq)\r\n try_current_player = try_current_piece.get_player()\r\n try_gen_location = self.get_gen_square(try_current_player)\r\n\r\n # check if a piece of the same color as the player is in the move square\r\n if try_move_piece is not None:\r\n if try_move_player == try_current_player:\r\n return result\r\n\r\n # try making the move\r\n self.set_square(try_move_sq, try_current_piece)\r\n self.set_square(try_current_sq)\r\n if type(try_current_piece) is General:\r\n self.set_gen_square(try_current_player, try_move_sq)\r\n\r\n # determine if the player is no longer in check and update result\r\n if self.is_in_check(try_current_player) is False:\r\n result = True\r\n\r\n # restore the board to previous state and return\r\n self.set_square(try_current_sq, try_current_piece)\r\n self.set_square(try_move_sq, try_move_piece)\r\n if type(try_current_piece) is General:\r\n self.set_gen_square(try_current_player, try_gen_location)\r\n\r\n return result", "def test_is_solved(self):\n p = hw.TilePuzzle([[1, 2], [3, 0]])\n self.assertTrue(p.is_solved())\n p = hw.TilePuzzle([[0, 1], [3, 2]])\n self.assertFalse(p.is_solved())", "def square_valid(board: Board, n: int, pawn_value: int, x: int, y: int) -> bool:\n\n return (coordinates_within_board(n, x, y) and\n square_playable(board, pawn_value, x, y))", "def has_valid_move(self, cur_square, board):\n coords = cur_square.coords\n neighbor_list = [tuple(map(sum, zip(coords, offset))) for offset in self._offsets]\n return self.has_valid_move_in_list(coords, neighbor_list, board)", "def solve_state(self):\n empty_square = self.find_empty()\n if empty_square == None:\n return True\n else:\n row, col = empty_square\n\n for i in range(1,10):\n if self.valid_move(row, col, i):\n self.state[row][col] = i\n\n if self.solve_state():\n return True\n self.state[row][col] = 0\n return False", "def test_square(self, board, row, col, test):\n if row < 0 or row > 7:\n return False\n if col < 0 or col > 7:\n return False\n \n return test(board[row][col])", "def target_king_square_not_checked(self, king, to_pos):\n for threat in [threat for pos, threat in self.board_dict.items() if not threat.colour is king.colour]:\n if threat.can_move_to(to_pos, self.board_dict):\n return False\n return True", "def is_solved(puzzle):\n for row in puzzle:\n for item in row:\n if item==1:\n return False\n return True", "async def check(self):\n\n while not self.solved:\n # Get list of possible numbers this square can have\n possibles = self.get_possible_numbers()\n # If there's only once possibility, then use this number...this square is now solved\n if len(possibles) == 1:\n self.num = possibles.pop()\n # If there are no possible squares well...something's wrong, that shouldn't be possible\n # This check is done because we want to be able to guess and check, and figure out if a guess is invalid\n elif len(possibles) == 0:\n raise ValueError(\"Impossible square; no possible numbers based on restrictions\")\n # Otherwise wait a small amount and continue\n else:\n await asyncio.sleep(0.05)", "def allowable_move(y, x, n, sudoku):\r\n for i in range(9):\r\n if sudoku[y][i] == n:\r\n return False\r\n elif sudoku[i][x] == n:\r\n return False\r\n xbox = (x//3) * 3\r\n ybox = (y//3) * 3\r\n for i in range(3):\r\n for j in range(3):\r\n if sudoku[ybox+i][xbox+j] == n:\r\n return False\r\n return True", "def can_move(self, row: int, col: int, row_new: int, col_new: int):\n \n # некорректная команда от игрока\n if not (0 <= col < self.FIELD_SIZE and 0 <= row < self.FIELD_SIZE and 0 <= col_new < self.FIELD_SIZE and 0 <= row_new < self.FIELD_SIZE):\n return False\n if row == row_new and col == col_new:\n return False\n \n figure_from = self.field[row][col]\n figure_to = self.field[row_new][col_new]\n \n # нельзя сходить, если нет фигуры\n if figure_from is None:\n return False\n\n # нельзя сходить в свою уже занятую клетку\n if figure_to:\n if figure_from.color == figure_to.color:\n return False\n\n # нельзя ходить чужими фигурами\n if figure_from.color != self.current_color:\n return False\n\n # проверка, что теоретически фигура может туда попасть\n return figure_from.can_move(row_new, col_new)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method for retrieving game state.
def get_game_state(self): return self.game_state
[ "def get_game_state(self):\r\n return self._game_state", "def get_game_state():\n global game_state\n return game_state", "def get_game_state(self):\n return self._game_status", "def get_new_gamestate(self):", "def state_(game):\n return game.initial", "def game_state():\n data = load()\n return types.Game(data)", "def get_state(self):\r\n return self.get_global_state()", "def getGameState(self):\n state = {\n \"pen_hunger\": self.pen.score[0],\n \"pen_sleep\": self.pen.score[1],\n \"pen_fun\": self.pen.score[2],\n \"pen_hand\": self.pen.score[3]\n }\n\n return state", "def get_state(self):\n return self.run_cmd('get-state')", "def game_state():\n if not flask.request.context.game.started_at:\n raise helpers.RequestError(2311)\n helpers.send_user(\n 'game_state', get_game_state(flask.request.context.game)\n )", "def get_state(self):\n return self.session.get(self.state_cache_key)", "def get_current_state(self):\n return self.world.get_state()", "def getState(self): \n return self.tello.get_current_state()", "def get_current_state(self):\n return self.robot.get_current_state()", "def get_state(self):\n self.request_state()\n e = self.get_event()\n if e.id != ID_STATE:\n raise GrblEventError(e)\n return e.data", "def get_state(self, player_id):\n return self._extract_state(self.game.get_state(player_id))", "def getstate(self):\n self._oldpacket = self.state.packet_number\n self._oldsticks = self.sticks.copy()\n self._oldtriggers = self.triggers.copy()\n self._oldbuttons = self.buttons\n res = xinput.XInputGetState(0, ctypes.byref(self.state))\n if res == ERROR_SUCCESS:\n if self.state.packet_number > self._oldpacket:\n #print(hex(self.state.gamepad.buttons))\n self.sticks = np.array([self.state.gamepad.l_thumb_x, self.state.gamepad.l_thumb_y,\n self.state.gamepad.r_thumb_x, self.state.gamepad.r_thumb_y])\n self.triggers = np.array([self.state.gamepad.left_trigger, \n self.state.gamepad.right_trigger])\n self.buttons = self.state.gamepad.buttons\n self.sticks = applydeadband(self.sticks,self.stickdeadzone)\n self.triggers = applydeadband(self.triggers, self.triggerdeadzone)\n self.packet = self.state.packet_number\n return\n if res == ERROR_DEVICE_NOT_CONNECTED:\n raise RuntimeError(\n \"Error %d device not connected\" % (res))", "def fetch(self, tick: int) -> GameState:\r\n return self.cache[tick].game_state", "def getgamestatus(self):\n return self._status" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Nethod for playing a game of sudoku. Prints out rules and instructions and asks for user inputs. If current puzzle is solved, asks player if they would like to play again and provides a new puzzle.
def play_sudoku(puzzle): print_instructions() print("For review and grading purposes purposes, here is a sample solution:") puzzle.print_board(puzzle.alg_solution) # while puzzle is not solved, continues to ask user for their next input while puzzle.get_game_state() != "Solved!": puzzle.request_number_input() puzzle.print_board(puzzle.get_game_board()) # if puzzle is solved, asks user if they would like to play again play_again = input("Would you like to play again? Y/N: ") play_again = play_again.lower() if play_again == 'y': puzzle.build_game_board() play_sudoku(puzzle) else: print("Thanks for playing!")
[ "def main():\n game = SudokuGame(3)\n game.print_grid()\n choice = input(\"Would you like to play the board or would you like a \"\n \"solution to be generated for you? Enter 'Play' or 'Generate' \")\n choice.lower()\n if choice == \"play\":\n while not game.is_full():\n x = int(input(\"What x-coordinate to place your number? \"))\n y = int(input(\"What y-coordinate to place your number? \"))\n num = int(input(\"What number would you like to place?\"))\n game.make_move(x, y, num)\n game.print_grid()\n if not game.verify_solution():\n print(\"WRONG SOLUTION\")\n else:\n print(\"YOU WIN\")\n else:\n game.find_solution()\n game.print_grid()", "def main() -> None:\n print(\"Welcome to the sudoku solver!\")\n\n # prompt users to tell the square size of the sudoku\n # Example: A 12 * 12 soduoku board has SQUARE_DIM=3, BOARD_DIM=4\n while True:\n try:\n square_dim = int(input(\"Please enter the square dimension of\"\n \" your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n while True:\n try:\n board_dim = int(input(\"Please enter the board dimension of \"\n \"your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n\n # get the user input sudoku board\n print(\"Now it is time to enter your sudoku board for solving.\")\n\n to_solve_board = read_a_board(square_dim, board_dim)\n print_board(square_dim, board_dim, to_solve_board)\n solve_board(square_dim, to_solve_board)\n print(\"Solution\")\n print_board(square_dim, board_dim, to_solve_board)", "def play():\r\n count = 0\r\n while True:\r\n count += 1\r\n board = create_board(6, 6, density=40)\r\n guess = create_guess_board(board)\r\n fill_empty(board, guess)\r\n print('Attempting to solve (%d) ...' % count)\r\n solver(board, guess)\r\n if solved(board, guess):\r\n guess = create_guess_board(board)\r\n fill_empty(board, guess)\r\n break\r\n\r\n cursor = [0, 0]\r\n\r\n while True:\r\n print()\r\n print_board(board, guess, cursor=cursor)\r\n if solved(board, guess):\r\n print('\\nYou solved it. Great work!')\r\n break\r\n # stuff = input('Your turn [1-9, ~, ^, s, ?, h, q]: ')\r\n # command = stuff.split()\r\n print('Your turn [1-9, ~, ^, s, ?, h, q]: ')\r\n stuff = msvcrt.getch()\r\n print('stuff = /%s/' % stuff)\r\n command = stuff.split()\r\n if len(command) == 0:\r\n continue\r\n if command[0] == 'q':\r\n break\r\n elif command[0] == 'h':\r\n print_board(board, cursor=cursor)\r\n elif command[0] == 's':\r\n solver(board, guess)\r\n elif command[0] == '?':\r\n if board[cursor[0]][cursor[1]] == BOARD_TENT:\r\n set(guess, cursor[0], cursor[1], BOARD_TENT)\r\n if board[cursor[0]][cursor[1]] == BOARD_EMPTY:\r\n set(guess, cursor[0], cursor[1], BOARD_EMPTY_GUESS)\r\n elif command[0] in ['~', '^']:\r\n if get(guess, cursor[0], cursor[1]) == BOARD_TREE:\r\n print('Please do not cut down the trees!')\r\n else:\r\n set(guess, cursor[0], cursor[1], command[0])\r\n elif command[0] in ['1', '2', '3', '4', '6', '7', '8', '9']:\r\n dir = int(command[0])\r\n adjacent = [\r\n [0, 0], #\r\n [1, -1], # 1\r\n [1, 0], # 2\r\n [1, 1], # 3\r\n [0, -1], # 4\r\n [0, 0], #\r\n [0, 1], # 6\r\n [-1, -1], # 7\r\n [-1, 0], # 8\r\n [-1, 1] # 9\r\n ]\r\n cursor[0] += adjacent[dir][0]\r\n cursor[1] += adjacent[dir][1]\r\n if cursor[0] < 0:\r\n cursor[0] = len(board) - 1\r\n if cursor[0] >= len(board):\r\n cursor[0] = 0\r\n if cursor[1] < 0:\r\n cursor[1] = len(board[0]) - 1\r\n if cursor[1] >= len(board[0]):\r\n cursor[1] = 0\r\n else:\r\n print(\"\"\"\r\nPlease type one of:\r\n # - Move the cursor in that direction\r\n ~ - Place an 'empty' marker at the current square\r\n ^ - Place a 'tent' marker at the current square\r\n h - Hint\r\n q - Quit\r\n\"\"\")\r\n\r\n print_board(board)", "def solve_puzzle(ctx):\r\n puzzle = PuzzleUtils.load_puzzle(ctx.obj[PUZZLE_FILE_KEY])\r\n puzzle.solve()\r\n logger.info(puzzle.dump_grid())", "def play():\n\tboard = ['_','_','_','_','_','_','_','_','_']\t\t# create a board\n\tplus = np.array([\t[1, \tFalse, \t3, 6],\t\t\t# nparray that will be used to add to the appropriate slots in lines nparray\n\t\t\t\t\t\t[False, False, \t3, 7],\t\t\t# Falses make the array add to the number of turns in lines nparray, and additionally fill what would otherwise be a jagged array\n\t\t\t\t\t\t[2, \tFalse, \t3, 8],\n\t\t\t\t\t\t[False,\tFalse, \t4, 6],\n\t\t\t\t\t\t[1, \t2, \t\t4, 7],\n\t\t\t\t\t\t[False,\tFalse, \t4, 8],\n\t\t\t\t\t\t[2, \tFalse, \t5, 6],\n\t\t\t\t\t\t[False,\tFalse, \t5, 7],\n\t\t\t\t\t\t[1, \tFalse, \t5, 8]\t])\n\t\t\t\t\t\t\n\tlines = np.zeros ((2,9))\t\t\t\t\t\t\t# nparray that will be used to store the number of O's and X's along each possible winning line\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 0 = number of turns, 1 = diagonal from top left to bottom right, 2 = diagonal from top right to bottom left, 3-5 = colums 1-3, 6-8 = rows 1-3\n\t\n\tprint ('',board[:3],[0,1,2],'\\n',board[3:6],[3,4,5],'\\n',board[6:],[6,7,8])\t\t# print the board and how to place a tile in each slot to console \n\t\n\tfor i in range(9):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# 9 turns\n\t\t\n\t\twhile True:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ask the user for an input, ensure it is valid, if it is not ask again informing them of how to \n\t\t\ttry :\n\t\t\t\tif i%2 ==0:\n\t\t\t\t\tnum = int(input('Crosses\\' turn: '))\n\t\t\t\telse:\n\t\t\t\t\tnum = int(input('Noughts\\' turn: '))\n\t\t\t\tif not 0 <= num < 9:\n\t\t\t\t\tprint(' That is not on the board \\n Please enter an integer 0-8 inclusive')\n\t\t\t\telif board[num] != '_':\n\t\t\t\t\tprint(' Space already taken\\n Please choose a different space')\n\t\t\t\telse:\n\t\t\t\t\tbreak\t\n\t\t\texcept ValueError:\n\t\t\t\tprint(' That input type doesn\\'t work\\n Please enter an integer 0-8 inclusive, in digits')\n\t\t\t\t\t\n\t\tif i%2 == 0:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if it's crosses' turn\n\t\t\n\t\t\tboard[num] = 'X'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# put a cross in the board\n\t\t\tlines[0,plus[num,:]] += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t# add one to the appropriate lines for crosses\n\t\t\tprint ('',board[:3],[0,1,2],'\\n',board[3:6],[3,4,5],'\\n',board[6:],[6,7,8])\t\t# reprint the board and guide\n\t\t\tif np.any(lines[0,1:] == 3 ):\t\t\t\t\t\t\t\t\t\t\t\t\t# check if crosses win\n\t\t\t\treturn BoardState.CROSSES_WIN\n\t\t\t\t\n\t\t\t\t\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if it's noughts' turn\n\t\t\t\t\t\t\n\t\t\tboard[num] = 'O'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# put a nought in the board\n\t\t\tlines[1,plus[num,:]] += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t# add one to the appropriate lines for noughts\n\t\t\tprint ('',board[:3],[0,1,2],'\\n',board[3:6],[3,4,5],'\\n',board[6:],[6,7,8])\t\t# reprint the board and guide\n\t\t\tif np.any(lines[1,1:] == 3 ):\t\t\t\t\t\t\t\t\t\t\t\t\t# check if noughts win\n\t\t\t\treturn BoardState.NOUGHTS_WIN\n\t\n\treturn BoardState.DRAW", "def print_instructions():\n print(\"Welcome to the game of Sudoku!\")\n print(\"--------------------------------\")\n print(\"The goal of the game is to fill every 'square' here with a number.\")\n print(\"The rules of the game are simple:\")\n print(\" Rule No 1: You can only enter numbers 1-9 in each square.\")\n print(\" Rule No 2: You cannot repeat the use of a number within a row, column or 3x3 segment.\")\n print(\"--------------------------------\")\n print(\"Instructions:\")\n print(\" - You will be prompted to enter a row, a column, and then a number input.\")\n print(\" - The rows and column inputs are 0-indexed, meaning it goes from 0-8.\")\n print(\" - The number input is expected to be 1-9. Any other inputs will not be accepted.\")\n print(\" - Once you've filled out every square, the game will automatically check to see if your solution is valid!\")\n print(\" - If not, it will prompt you to try again, and you can continue to change your inputs or even write\")\n print(\" over your original entries.\")\n print(\"Good luck, have fun!\")", "def showSolution(self):\n\n try:\n\n if self.showSolVar.get(): # switch to 'show solution' mode\n self.statusbar.set('empty')\n self.creditAvailable = 0\n self.optionsmenu.entryconfig(5, state=DISABLED)\n self.clock.stop()\n self.noSolMoves = 0 # remember how many moves have been played\n # in this mode, so we can return to the\n # correct point afterwards\n\n if self.options.animateSolVar.get(): # animate solution\n self.board.state('disabled')\n self.undoButton.config(state=DISABLED)\n\n for i in range(self.cursor.wrongVariation): # if in a wrong var. currently, go back first\n self.board.undo()\n self.cursor.previous()\n self.noMovesMade = self.noMovesMade - 1\n self.board.update_idletasks()\n time.sleep(0.5 * self.options.replaySpeedVar.get())\n\n if (self.inputColor == 'B' and not self.invertColor) or \\\n (self.inputColor == 'W' and self.invertColor):\n nM = 'B'\n nnM = 'W'\n else:\n nM = 'W'\n nnM = 'B'\n\n nnMove = self.board.invert(self.inputColor)\n\n while not self.cursor.atEnd: # play out solution\n corr = self.cursor.correctChildren()\n c = self.cursor.next(corr[randint(0,len(corr)-1)]) # choose one of the correct variations\n # by random\n pos = self.convCoord(c[nM][0], self.orientation)\n self.board.play(pos,self.inputColor)\n self.noSolMoves = self.noSolMoves+1\n self.master.update_idletasks()\n time.sleep(0.5 * self.options.replaySpeedVar.get())\n if not self.cursor.atEnd:\n c = self.cursor.next(randint(0,self.cursor.noChildren()-1)) # choose an answer randomly\n pos = self.convCoord(c[nnM][0], self.orientation)\n self.board.play(pos, nnMove)\n self.noSolMoves = self.noSolMoves+1\n self.master.update_idletasks()\n time.sleep(0.5 * self.options.replaySpeedVar.get())\n self.statusbar.set('solved2')\n\n else: # navigate Solution\n self.statusbar.set('empty')\n self.board.state('normal', self.navSolutionNextMove)\n self.undoButton.config(command = self.undoNavSol)\n self.markRightWrong()\n\n else: # switch back to normal mode\n self.statusbar.set('empty')\n self.board.delMarks()\n self.undoButton.config(state=NORMAL, command = self.undo2)\n self.optionsmenu.entryconfig(5, state=NORMAL)\n\n self.board.state('normal', self.nextMove)\n self.board.undo(self.noSolMoves)\n for i in range(self.noSolMoves):\n self.cursor.previous()\n self.noSolMoves=0\n\n except SGFError:\n showwarning(_('SGF Error'), _('Error in SGF file!'))", "def solve(self) -> None:\n sudoku = Sudoku(self.get_data())\n solver = SudokuSolver(sudoku)\n validation = solver.validate_sudoku()\n if validation == 1:\n solver.main_sequence()\n self.get_result(solver)\n elif validation == -1:\n self.status_bar.config(text='This sudoku array contains invalid digits.', fg='red')\n return None", "def main():\n move_to_beeper()\n place_in_puzzle()\n return_to_start()", "def checkPuzzle(self):\n print('Got to checkPuzzle')", "def solve_puzzle(self):\n # replace with your code\n return \"\"", "def start():\r\n global sudoku\r\n sudoku.fill(0)\r\n \r\n #scan all entries and collect data\r\n init = [] #list with the coordinates for the initial values\r\n for k,v in ent_dict.items():\r\n txt = v.get()\r\n if txt:\r\n if check_digit(txt):\r\n sudoku[k[0],k[1]]=int(txt)\r\n init.append((k[0],k[1]))\r\n else:\r\n #Wrong input\r\n v.config({\"background\": \"Red\"})\r\n print(\"Error, all inputs must be between 1-9\")\r\n print(\"Invalid input is: \",txt,\" in position: \",k)\r\n txt_var0 = \"Error, all inputs must be between 1-9\\n\"+\"Press Clear\\n\"+\"Enter new data\"\r\n txt_res.insert(tk.INSERT,txt_var0)\r\n return False\r\n #All initial data collected\r\n #Set the Start button to disabled\r\n st.config(state=tk.DISABLED)\r\n #start the game\r\n if sudoku_validate(sudoku):\r\n start_sudoku(sudoku)\r\n gen_output(fr_result,sudoku,set(init))\r\n time_stamp = \"Game at \"+ datetime.now().strftime('%Y-%m-%d %H:%M:%S') + \"\\n\" \r\n txt_res.insert(tk.INSERT,time_stamp)\r\n backtrack,exec_time = get_stats()\r\n txt_var1 = \"the number of recursion steps are: \" + str(backtrack) + \"\\n\" \r\n txt_res.insert(tk.INSERT,txt_var1)\r\n exec_time = round(exec_time,2)\r\n if exec_time < 0.5:\r\n exec_time = \" less than 0.5 seconds \"\r\n txt_var2 = \"the approximated execution time in seconds : \" + str(exec_time) + \"\\n\" + \"\\n\" \r\n txt_res.insert(tk.INSERT,txt_var2)\r\n \r\n else:\r\n print(\">>>>>>>>Invalid initial data>>>>>>>>>\")\r\n txt_var3 = \"Invalid Initial data configuration.\\nPress Clear \"+ \"\\n\"+ \"Enter new correct configuration\"\r\n txt_res.insert(tk.INSERT,txt_var3)", "def input_sudoku(self):\r\n print(\"Input Sudoku:\")\r\n for a,row in enumerate(self._original_sudoku):\r\n self._original_sudoku[a] = [int(i) for i in input(\"Row {0} (using \\\r\n' ' as separators):\\n\".format(a+1)).strip().split()]", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def play_game(self):\r\n try: # Asks user how many rounds they want to play:\r\n game_rounds = int(input(\r\n \"Please enter the desired number of rounds to play: \"\r\n ))\r\n except ValueError: # Ensures input value is correct\r\n print(\"Sorry, I didn't quite catch that.\\nPlease try again,\"\r\n \" and make sure you enter a valid number.\\n\")\r\n return self.play_game()\r\n # Game Starts:\r\n print(\"\\nGame start!\\n\")\r\n for round in range(game_rounds):\r\n print(f\"ROUND {round}:\")\r\n self.play_round()\r\n self.game_over() # Game concludes naturally.\r", "def done_or_not(board): #board[i][j]\n # your solution here\n # ..\n # return 'Finished!'\n # ..\n # or return 'Try again!'\n\n\n compare_list = [1,2,3,4,5,6,7,8,9]\n columns = {0:[], 1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[]}\n squares = {0: [], 1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[]}\n\n \n for row in range(len(board)):\n # print('row',row,' =',sorted(board[row]))\n if sorted(board[row]) != compare_list:\n # print('failed row = ', sorted(board[row]))\n # print(\"Try again!\")\n return \"Try again!\"\n for column in range(len(board)):\n columns[column].append(board[row][column])\n \n \n square = 0\n while square < 9:\n squares[square].extend(board[square][0:3])\n squares[square].extend(board[square + 1][0:3])\n squares[square].extend(board[square + 2][0:3])\n squares[square+1].extend(board[square][3:6])\n squares[square+1].extend(board[square + 1][3:6])\n squares[square+1].extend(board[square + 2][3:6])\n squares[square+2].extend(board[square][6:9])\n squares[square+2].extend(board[square + 1][6:9])\n squares[square+2].extend(board[square + 2][6:9])\n # print(squares)\n\n square += 3\n\n # print('columns = ', columns)\n # print('squares = ', squares)\n \n for column in columns.keys():\n # print('column',column,'=', sorted(columns[column]))\n if sorted(columns[column]) != compare_list:\n # print('column =', column, 'failed sorted column =', sorted(columns[column]))\n # print(\"Try again!\")\n return \"Try again!\"\n for square in squares.keys():\n # print('square',square,'=',sorted(squares[square]))\n if sorted(squares[square]) != compare_list:\n # print('square = ',square,'failed sorted square =', sorted(squares[square]))\n # print(\"Try again!\")\n return \"Try again!\"\n \n # print(\"Finished!\")\n return \"Finished!\"", "def play_game():\n display_board()\n while ongoing_game:\n handle_turn(current_player)\n check_if_game_over()\n swap_player()\n global board\n if winner == \"X\" or winner == \"O\":\n print(\"<-------- Congratulations \" +\n winner + \", you win. -------->\")\n play_again()", "def build_game_board(self):\n # retrieves new sudoku puzzle from dataset\n sudoku_set = self.data.get_sudoku_set()\n sudoku_problem, sudoku_solution = sudoku_set[0], sudoku_set[1]\n\n # removes old game boards\n self.board = []\n self.puzzle = []\n self.alg_solution = []\n self.data_solution = []\n\n # sets up sudoku puzzle to array format\n segment = []\n for num in sudoku_problem:\n segment.append(int(num))\n if len(segment) == 9:\n self.board.append(segment)\n self.puzzle.append(segment[:])\n segment = []\n\n self.alg_solution = alg.solve_sudoku(self.puzzle) # uses sudoku backtracking algorithm to solve puzzle\n\n # sets up the provided sudoku puzzle solution from dataset to array format\n for num in sudoku_solution:\n segment.append(int(num))\n if len(segment) == 9:\n self.data_solution.append(segment)\n segment = []\n\n self.game_state = \"Not Solved, Keep Trying!\"", "def solution(self):\n self.solve()\n for x in self.game:\n print(x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints to console a set of instructions for how to play a game of Sudoku.
def print_instructions(): print("Welcome to the game of Sudoku!") print("--------------------------------") print("The goal of the game is to fill every 'square' here with a number.") print("The rules of the game are simple:") print(" Rule No 1: You can only enter numbers 1-9 in each square.") print(" Rule No 2: You cannot repeat the use of a number within a row, column or 3x3 segment.") print("--------------------------------") print("Instructions:") print(" - You will be prompted to enter a row, a column, and then a number input.") print(" - The rows and column inputs are 0-indexed, meaning it goes from 0-8.") print(" - The number input is expected to be 1-9. Any other inputs will not be accepted.") print(" - Once you've filled out every square, the game will automatically check to see if your solution is valid!") print(" - If not, it will prompt you to try again, and you can continue to change your inputs or even write") print(" over your original entries.") print("Good luck, have fun!")
[ "def instructions():\n\t\n\tprint('''\\n\n\tToday we will play the perennial favorite game of...\\n\n\tRock! Paper!! Scissors!!!.\\n\n\tThe objective of the game is to outthink your opponent (in this case me) and defeat.\\n\n\tThe rules are very simple\\n\n\t1. Paper covers the Rock\\n\n\t2. Rock breaks the Scissors\\n\n\t3. Scissors cut the Paper\\n\\n\n\t\n\tChoose your move from the following:\\n\n\t1. Paper (p)\\n\n\t2. Rock (r)\\n\n\t3. Scissors (s)\\n\\n\n\t\n\tAre you ready? Alright then, let\\'s play...\\n''')", "def _print_instructions():\n print('')\n print('Instructions:')\n print('Start playing when you want to begin the call phrase.')\n if FLAGS.end_call_control_number is not None:\n print('When you want to end the call phrase, signal control number %d '\n 'with value 127, or stop playing and wait one clock tick.'\n % FLAGS.end_call_control_number)\n else:\n print('When you want to end the call phrase, stop playing and wait one '\n 'clock tick.')\n print('Once the response completes, the interface will wait for you to '\n 'begin playing again to start a new call phrase.')\n print('')\n print('To end the interaction, press CTRL-C.')", "def main():\n\n from sys import argv\n\n try:\n grid = argv[1]\n except IndexError:\n print(\"Usage:\", argv[0], \"<grid>\")\n return\n\n try:\n sudoku = Sudoku.parse(grid)\n except Exception as error:\n print(\"[!] Error:\", error)\n return\n\n print(sudoku.pretty_print())", "def instruction():\n print('- - - - - - - - - - - - - - - - - - - - -')\n print(\"this is instruction for tic tac toe game\".upper())\n print('- - - - - - - - - - - - - - - - - - - - -')\n print('This is game for two players')\n print('Each player can choose a number between 1 and 9')\n print('Numbers represent the fields on the board')\n print('You can choose only numbers that are not taken by any player')\n list_of_symbols = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n print_board(list_of_symbols)\n print('You win the game if you have 3 symbols in column, row or diagonally')\n print('- - - - - - - - - - - - - - - - - - - - -')\n\n begin_game()", "def print_grid(puzzle: str) -> None:\r\n grid = generate_grid(puzzle)\r\n print(grid)", "def print_sudoku(sudoku, name='SUDOKU'):\n\n print \"### {} ###\".format(name)\n for row in sudoku:\n print row", "def display_game_board():\n print(game_board[0] + \" | \" + game_board[1] + \" | \" + game_board[2])\n print(game_board[3] + \" | \" + game_board[4] + \" | \" + game_board[5])\n print(game_board[6] + \" | \" + game_board[7] + \" | \" + game_board[8])", "def print_sudoku_solution(solution):\n for row in range(9):\n for col in range(9):\n print(solution['%d-%d' % (row, col)][0], end=\" \"),\n if col == 2 or col == 5:\n print('|', end=\" \"),\n print(\"\")\n if row == 2 or row == 5:\n print('------+-------+------')", "def show_commands():\n print('What action would you like to take:')\n print('[1] Add a Task')\n print('[2] View the Tasks')\n print('[3] Delete Task')\n print('[4] View Occupants')\n print('[5] Add Occupant')\n print('[6] Delete Occupant')\n print('e[X]it app')\n print('[h, ?] Help (this info)')\n print()", "def print_sudoku_solution(solution):\n for row in range(9):\n for col in range(9):\n print solution['%d-%d' % (row, col)][0],\n if col == 2 or col == 5:\n print '|',\n print\n if row == 2 or row == 5:\n print '------+-------+------'", "def show_possible_moves():\n print(\"Possible moves:\")\n print(\"\\t\\\\sw - Moves a card from Stock to Waste.\")\n print(\"\\t\\\\wf <suit> - Moves a card from Waste to the <suit> Foundation. Suit must be one of: \"\n \"clubs/diamonds/hearts/spades.\")\n print(\"\\t\\\\wt <tableau_num> - Moves a card from Waste to the <tableau_num> Tableau. <tableau_num> must be \"\n \"between 1 and 7, inclusive. \")\n print(\"\\t\\\\tf <tableau_num> <suit> - Moves a card from the <tableau_num> Tableau to the <suit> foundation. \"\n \"Same input rules as above. \")\n print(\"\\t\\\\tt <num_1> <num_2> - Moves all face-up cards from <num_1> Tableau to <num_2> Tableau. Same input \"\n \"rules as above. \")\n print(\"\\t\\\\help - Displays all possible moves. \")\n print(\"\\t\\\\quit - Quit the game.\\n\")", "def print_board(self):\n\n print\n\n for row in xrange(8):\n for column in xrange(8):\n if self.squares[row][column]:\n print self.squares[row][column],; sys.stdout.write(u'')\n else:\n if self.dark_square((row, column)):\n print u' __ ',; sys.stdout.write(u'')\n else:\n print u' . ',; sys.stdout.write(u'')\n print\n print", "def print_possibilities(self):\n print('')\n\n for y in range(27):\n s = ''\n\n if y in [3, 6, 12, 15, 21, 24]:\n print(\n '- - - - - - - - - - - █ - - - - - - - - - - - █ - - - - '\n '- - - - - - - ')\n if y in [9, 18]:\n print(\n '█████████████████████████████████████████████████████████'\n '█████████████')\n\n for x in range(9):\n if x in [1, 2, 4, 5, 7, 8]:\n s += '| '\n if x in [3, 6]:\n s += '█ '\n\n number_y = math.floor(y / 3)\n\n for z in range((y % 3) * 3, (y % 3) * 3 + 3):\n s += '{} '.format(self.possibilities[number_y][x][z])\n print(s)\n print('')\n\n print('{} / 81 numbers left to assign'.format(self.get_numbers_left()))\n print('{} / 648 possibilities removed'.format(\n self.get_possibilities_removed()))\n\n self.print_current_puzzle()", "def print_sudoku(sudoku):\n print(BOX_CHARS['TOP LEFT'], end=\"\")\n for x in range(8):\n w = 'D' if x % 3 == 2 else 'S'\n print(3 * BOX_CHARS['HOR D'] + BOX_CHARS[f'TOP {w}'], end=\"\")\n print(3 * BOX_CHARS['HOR D'] + BOX_CHARS['TOP RIGHT'])\n\n for y in range(9):\n print(BOX_CHARS['VER D'], end=\"\")\n chars = [str(i) if i != 0 else \" \" for i in sudoku[:, y]]\n for x in range(8):\n w = 'D' if x % 3 == 2 else 'S'\n print(f\" {chars[x]} {BOX_CHARS[f'VER {w}']}\", end=\"\")\n print(f\" {chars[-1]} {BOX_CHARS['VER D']}\")\n if y == 8:\n break\n\n hw = 'D' if y % 3 == 2 else 'S'\n print(BOX_CHARS[f'LEFT {hw}'], end=\"\")\n for x in range(8):\n print(3 * BOX_CHARS[f'HOR {hw}'], end=\"\")\n vw = 'D' if x % 3 == 2 else 'S'\n print(BOX_CHARS[f'CROSS {hw}{vw}'], end=\"\")\n print(3 * BOX_CHARS[f'HOR {hw}'], end=\"\")\n print(BOX_CHARS[f'RIGHT {hw}'])\n\n print(BOX_CHARS['BOTTOM LEFT'], end=\"\")\n for x in range(8):\n w = 'D' if x % 3 == 2 else 'S'\n print(3 * BOX_CHARS['HOR D'] + BOX_CHARS[f'BOTTOM {w}'], end=\"\")\n print(3 * BOX_CHARS['HOR D'] + BOX_CHARS['BOTTOM RIGHT'])", "def main() -> None:\n print(\"Welcome to the sudoku solver!\")\n\n # prompt users to tell the square size of the sudoku\n # Example: A 12 * 12 soduoku board has SQUARE_DIM=3, BOARD_DIM=4\n while True:\n try:\n square_dim = int(input(\"Please enter the square dimension of\"\n \" your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n while True:\n try:\n board_dim = int(input(\"Please enter the board dimension of \"\n \"your sudoku board: \"))\n break\n except ValueError:\n print(\"Invalid input. Try again please.\")\n\n # get the user input sudoku board\n print(\"Now it is time to enter your sudoku board for solving.\")\n\n to_solve_board = read_a_board(square_dim, board_dim)\n print_board(square_dim, board_dim, to_solve_board)\n solve_board(square_dim, to_solve_board)\n print(\"Solution\")\n print_board(square_dim, board_dim, to_solve_board)", "def main():\n game = SudokuGame(3)\n game.print_grid()\n choice = input(\"Would you like to play the board or would you like a \"\n \"solution to be generated for you? Enter 'Play' or 'Generate' \")\n choice.lower()\n if choice == \"play\":\n while not game.is_full():\n x = int(input(\"What x-coordinate to place your number? \"))\n y = int(input(\"What y-coordinate to place your number? \"))\n num = int(input(\"What number would you like to place?\"))\n game.make_move(x, y, num)\n game.print_grid()\n if not game.verify_solution():\n print(\"WRONG SOLUTION\")\n else:\n print(\"YOU WIN\")\n else:\n game.find_solution()\n game.print_grid()", "def print_sudoku(self):\r\n print(self._sudoku)", "def display(sudoku_map):\n width = 1+max(len(sudoku_map[s]) for s in squares)\n line = '+'.join(['-'*width*3]*3)\n for r in rows:\n print(''.join(sudoku_map[r+c].center(width) + ('|' if c in '36' else '') for c in cols))\n \n if r in 'CF':\n print(line)\n print()", "def print_board(a_guesses, a_checks):\n\n print()\n print(\"Your game so far: \")\n\n for (a_guess, a_check) in zip(a_guesses, a_checks):\n\n # there must be a nicer and cleaner way\n # to print list of chars as string with spaces\n guess_str = \"\"\n for a_peg in a_guess:\n guess_str = \"{} {}\".format(guess_str, a_peg)\n\n print()\n print(\"{} {}\".format(guess_str, a_check))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates four plotly visualizations using the New York Times Archive API
def return_figures(): # Add New York Times API Key nyt = NYTAPI("AsjeHhqDYrePA2GMPpYoY1KAKAdG7P99") # Select Year and Month of articles data = nyt.archive_metadata( date = datetime.datetime(2020, 7, 1) ) def data_to_df(data): # Initiate list for restructured information data_list = [] # Collect Data from API dictionary for article in data: new_data = [article.get("section_name"), article.get("news_desk"), article.get("pub_date"), article.get("headline").get("main"), article.get("abstract"), article.get("lead_paragraph"), article.get("type_of_material"), article.get("word_count")] # Append list of information from article to data list data_list.append(new_data) # Convert data list to DataFrame df = pd.DataFrame(data_list, columns=["section_name","news_desk", "pub_date", "headline", "abstract", "lead_paragraph", "type_of_material", "word_count"]) return df df = data_to_df(data) # first chart plots section distribution # as a pie chart graph_one = [] df_one = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_one.section_name.value_counts().index values = df_one.section_name.value_counts().values graph_one.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_one = dict(title = 'Distribution of sections of this months New York Times articles') # second chart plots section distribution # as a pie chart graph_two = [] df_two = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_two.news_desk.value_counts().index values = df_two.news_desk.value_counts().values graph_two.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_two = dict(title = 'Distribution of news desk of this months articles') # third chart plots section distribution # as a pie chart graph_three = [] df_three = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_three.type_of_material.value_counts().index values = df_three.type_of_material.value_counts().values graph_three.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_three = dict(title = 'Distribution for type of material of this months articles') # fourth chart plots section distribution # as a pie chart graph_four = [] # Convert publishing date columns to datetime format df["pub_date"] = pd.to_datetime(df["pub_date"]).dt.date df_four = df.copy() df_four = df_four.pub_date.value_counts().to_frame().sort_index() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values x_val = df_four.index y_val = df_four.values graph_four.append( go.Scatter( x=df_four.index, y=df_four["pub_date"], mode="lines", name="Articles" ) ) layout_four = dict(title = 'Number of articles published by days') # fourth chart plots section distribution # as a pie chart graph_five = [] # Calculate average number of words for this months articles avg_word_count = round(df.word_count.mean(),0) graph_five.append( go.Table( header=dict(values=['Average Word Count']), cells=dict(values=[avg_word_count]) ) ) layout_five = dict(title = '') # append all charts figures = [] figures.append(dict(data=graph_one, layout=layout_one)) figures.append(dict(data=graph_two, layout=layout_two)) figures.append(dict(data=graph_three, layout=layout_three)) figures.append(dict(data=graph_four, layout=layout_four)) figures.append(dict(data=graph_five, layout=layout_five)) return figures
[ "def return_figures():\n # read the energy data\n energy_df = pd.read_csv(\"data/all_energy_statistics.csv\")\n \n color1 = 'rgb(0,153,0)'\n color2 = 'rgb(02,102,255)'\n color3 = 'rgb(255,204,153)'\n color4 = 'rgb(153,0,153)'\n \n # CHART 1 ================================================\n # select data about Aviation gasoline - Final Consumption\n # as a line chart\n selected_energy_df = energy_df[energy_df[\"commodity_transaction\"].isin([\"Aviation gasoline - Final consumption\"])]\n fr_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"France\"])]\n uk_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"United States\"])]\n pt_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"Portugal\"])]\n mx_dt = selected_energy_df[selected_energy_df[\"country_or_area\"].isin([\"Mexico\"])]\n x1 = fr_dt[\"year\"].values\n y1 = fr_dt[\"quantity\"].values\n x2 = uk_dt[\"year\"].values\n y2 = uk_dt[\"quantity\"].values\n x3 = pt_dt[\"year\"].values\n y3 = pt_dt[\"quantity\"].values\n x4 = mx_dt[\"year\"].values\n y4 = mx_dt[\"quantity\"].values\n\n graph_one = [] \n graph_one.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n mode = 'lines',\n line=dict(color=color1)\n )\n )\n graph_one.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_one.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_one.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_one = dict(title = 'Aviation Gasoline Consumption',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n # CHART 2 ================================================\n # select data about Aviation gasoline - Exports\n # as a line chart\n aviation_gas_exp = energy_df[energy_df[\"commodity_transaction\"].isin([\"Aviation gasoline - Exports\"])]\n fr_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"France\"])]\n uk_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"United States\"])]\n pt_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"Portugal\"])]\n mx_av_exp = aviation_gas_exp[aviation_gas_exp[\"country_or_area\"].isin([\"Mexico\"])]\n\n x1 = fr_av_exp[\"year\"].values\n y1 = fr_av_exp[\"quantity\"].values\n x2 = uk_av_exp[\"year\"].values\n y2 = uk_av_exp[\"quantity\"].values\n x3 = pt_av_exp[\"year\"].values\n y3 = pt_av_exp[\"quantity\"].values\n x4 = mx_av_exp[\"year\"].values\n y4 = mx_av_exp[\"quantity\"].values\n\n graph_two = [] \n graph_two.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n line=dict(color=color1),\n mode = 'lines'\n )\n )\n graph_two.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_two.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_two.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_two = dict(title = 'Aviation gasoline - Exports',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n# CHART 3 ================================================\n # select data about Crude Petroleum - Refinery Capacity\n # as a line chart\n petroleum_ref_cap = energy_df[energy_df[\"commodity_transaction\"].isin([\"Crude petroleum - refinery capacity\"])]\n fr_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"France\"])]\n uk_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"United States\"])]\n pt_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"Portugal\"])]\n mx_ref_cap = petroleum_ref_cap[petroleum_ref_cap[\"country_or_area\"].isin([\"Mexico\"])]\n\n x1 = fr_ref_cap[\"year\"].values\n y1 = fr_ref_cap[\"quantity\"].values\n x2 = uk_ref_cap[\"year\"].values\n y2 = uk_ref_cap[\"quantity\"].values\n x3 = pt_ref_cap[\"year\"].values\n y3 = pt_ref_cap[\"quantity\"].values\n x4 = mx_ref_cap[\"year\"].values\n y4 = mx_ref_cap[\"quantity\"].values\n\n graph_three = [] \n graph_three.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n mode = 'lines',\n line=dict(color=color1)\n )\n )\n graph_three.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_three.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_three.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_three = dict(title = 'Crude Petroleum - Refinery Capacity',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n# CHART 4 ================================================\n # select data about Conventional crude oil - total energy supply\n # as a line chart\n petroleum_supply = energy_df[energy_df[\"commodity_transaction\"].isin([\"Conventional crude oil - total energy supply\"])]\n fr_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"France\"])]\n uk_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"United Kingdom\"])]\n us_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"United States\"])]\n pt_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"Portugal\"])]\n mx_petr_sup = petroleum_supply[petroleum_supply[\"country_or_area\"].isin([\"Mexico\"])]\n\n x1 = fr_petr_sup[\"year\"].values\n y1 = fr_petr_sup[\"quantity\"].values\n x2 = uk_petr_sup[\"year\"].values\n y2 = uk_petr_sup[\"quantity\"].values\n x3 = pt_petr_sup[\"year\"].values\n y3 = pt_petr_sup[\"quantity\"].values\n x4 = mx_petr_sup[\"year\"].values\n y4 = mx_petr_sup[\"quantity\"].values\n\n graph_four = [] \n graph_four.append(\n go.Scatter(\n x = x1,\n y = y1,\n name = 'France',\n mode = 'lines',\n line=dict(color=color1)\n )\n )\n graph_four.append(\n go.Scatter(\n x = x2,\n y = y2,\n name = 'UK',\n mode = 'lines',\n line=dict(color=color2)\n )\n )\n graph_four.append(\n go.Scatter(\n x = x3,\n y = y3,\n name = 'Portugal',\n mode = 'lines',\n line=dict(color=color3)\n )\n )\n graph_four.append(\n go.Scatter(\n x = x4,\n y = y4,\n name = 'Mexico',\n mode = 'lines',\n line=dict(color=color4)\n )\n )\n\n layout_four = dict(title = 'Conventional crude oil - total energy supply',\n xaxis = dict(title = 'Years'),\n yaxis = dict(title = 'Metric Tons (thousand)'),\n )\n\n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n figures.append(dict(data=graph_four, layout=layout_four))\n\n return figures", "def plot_map(end_year: int) -> None:\n total = projection.get_percentage_increase(end_year)\n temps = projection.temp_prediction_all(end_year)\n print(temps)\n\n mapbox_access_token = 'pk.eyJ1Ijoiam9qb29udGhhdCIsImEiOiJja2lta3Uzbnow' \\\n 'YWRtMzVud3NrNjI3N2JjIn0.kYIFPU3HJbjDsNYyQFaGdA'\n df = pd.read_csv(\n 'plotly_map_station_locations.csv')\n site_lat = df.lat\n site_lon = df.lon\n locations_name = df.text.tolist()\n\n temp_values = temps['temp'].tolist()\n max_temp = max(temp_values)\n min_temp = min(temp_values)\n temp_diff = max_temp - min_temp\n location_colors = []\n\n for i in range(len(locations_name)):\n location = locations_name[i]\n locations_name[i] = \"{}: Projected to have {:.4f}% of increase until {}\".format(\n location.title(), total[location], end_year)\n temp_value = temps.loc[temps['location'] == location]['temp'][0][0]\n ratio = (temp_value - min_temp) / temp_diff\n gb_value = 255 - int(ratio * 255)\n rgb = 'rgb(255, {}, {})'.format(gb_value, gb_value)\n location_colors.append(rgb)\n\n\n fig = go.Figure()\n\n fig.add_trace(go.Scattermapbox(\n lat=site_lat,\n lon=site_lon,\n name='Weather Station Temperature',\n mode='markers',\n marker=dict(\n size=20,\n color=location_colors,\n colorscale=[[0, \"rgb(255, 255, 255)\"],\n [1, \"rgb(255, 0, 0)\"]],\n showscale=False,\n opacity=0.75\n ),\n text=locations_name,\n hoverinfo='text',\n hoverlabel=dict(\n bgcolor='rgb(255, 217, 255)',\n font_size=40,\n font_family=\"Helvetica\"\n )\n ))\n\n fig.add_trace(go.Scattermapbox(\n lat=site_lat,\n lon=site_lon,\n name='',\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=0,\n color=temps['temp'],\n colorscale=[[0, \"rgb(255, 255, 255)\"],\n [1, \"rgb(255, 0, 0)\"]],\n showscale=True,\n opacity=0.75\n ),\n text=locations_name,\n hoverinfo='text',\n hoverlabel=dict(\n bgcolor='rgb(182, 252, 213)',\n font_size=40,\n font_family=\"Helvetica\"\n )\n ))\n\n fig.add_trace(go.Scattermapbox(\n lat=site_lat,\n lon=site_lon,\n name='',\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=7,\n color='rgb(182, 252, 213)', # percentage\n opacity=0.8\n ),\n text=locations_name,\n hoverinfo='text',\n hoverlabel=dict(\n bgcolor='rgb(182, 252, 213)',\n font_size=14,\n font_family=\"Helvetica\"\n )\n ))\n\n fig.update_layout(\n title={\n 'text': 'Percentage of Increase in E.Coli Cases for Weather Stations in the UK',\n 'y': 0.9,\n 'x': 0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'},\n autosize=True,\n hovermode='closest',\n font=dict(family=\"Helvetica\", size=18),\n mapbox=dict(\n accesstoken=mapbox_access_token,\n bearing=0,\n center=dict(\n lat=52,\n lon=0.12\n ),\n pitch=10,\n zoom=3,\n style='dark'\n )\n )\n\n fig.update_traces(showlegend=True, selector=dict(type='scattermapbox'))\n\n fig.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"top\",\n y=1.00,\n xanchor=\"right\",\n x=1.00\n ))\n\n fig.show()", "def _dump_plotly(objs, images, func):\n l = len(objs)\n #print(l)\n titles = []\n for i,x in enumerate(objs):\n if 'id' in x:\n titles.append('shape id %d' % x.id)\n else:\n titles.append('item %d' % i)\n fig = tools.make_subplots(rows=l, cols=1, subplot_titles = titles,print_grid=False )\n #print('figure attmpt: ')\n #fig['layout']['xaxis1'].update(title='monkeybar')\n #for x in fig['layout']['xaxis1']:\n #print(x)\n fig.layout.showlegend = False\n for i,x in enumerate(objs):\n traces,annotations,title = func(x,images[i])\n im = {\n \"source\": 'data:image/png;base64, ' + getbase64(images[i]),\n \"x\": 1,\n \"y\": 1 - i/(l-.5),\n \"sizex\": .5,\n \"sizey\": .5,\n }\n fig.layout.images.append(im)\n for t in traces:\n fig.append_trace(t,i+1,1)\n if title is not None:\n fig.layout['xaxis%d' % (i+1)].update(title=title)\n if annotations is not None:\n for a in annotations:\n a['xref'] = 'x%d' % (i+1)\n a['yref'] = 'y%d' % (i+1)\n fig.layout.annotations += annotations\n\n fig['layout'].update(height=400*l, width=1100, margin={\n 'l':80,\n 'r':330,\n 't':100,\n 'b':80,\n 'pad':0,\n 'autoexpand':True,\n },title='plots')\n\n return fig", "def make_timeplot(df_measure, df_prediction):\n # mode = 'confirmed'\n mode = 'active'\n df_measure_confirmed = df_measure[mode]\n colors = px.colors.qualitative.Dark24\n n_colors = len(colors)\n fig = go.Figure()\n for i, country in enumerate(df_measure_confirmed.columns):\n fig.add_trace(go.Scatter(x=df_measure_confirmed.index, \n y=df_measure_confirmed[country],\n name=country[1], mode='markers+lines',\n marker_color=colors[i%n_colors],\n line_color=colors[i%n_colors],\n visible=False))\n for i, country in enumerate(df_prediction.columns):\n fig.add_trace(go.Scatter(x=df_prediction.index, \n y=df_prediction[country],\n name='+' + country[1], mode='lines',\n line_dash='dash',\n line_color=colors[i%n_colors],\n showlegend=False,\n visible=False))\n\n last_day = df_measure_confirmed.index.max()\n day = pd.DateOffset(days=1)\n fig.update_layout(title='',\n xaxis=dict(rangeslider_visible=True,\n range=(last_day - 10 * day,\n last_day + 4 * day)))\n fig.update_layout(\n updatemenus=[\n dict(\n type = \"buttons\",\n direction = \"left\",\n buttons=list([\n dict(\n args=[{\"visible\": [False,]*len(df_measure_confirmed.columns)}],\n label=\"Reset\",\n method=\"update\",\n ),\n dict(\n args=[\"yaxis\", {'type':'log'}],\n label=\"log\",\n method=\"relayout\",\n ),\n dict(\n args=[\"yaxis\", {'type':'linear'}],\n label=\"lin\",\n method=\"relayout\",\n ),\n\n ]),\n pad={\"r\": 10, \"t\": 10, \"b\":5},\n showactive=True,\n x=0.05,\n xanchor=\"left\",\n y=1.35,\n yanchor=\"top\",\n font_color='black',\n ),\n ],\n height=.9*FIRST_LINE_HEIGHT,\n)\n\n return fig", "def create_plot(df):\n fig = px.scatter(df, x=\"Date\", y='Airport',\n size=\"Arrivals\", color=\"Arrivals\",\n hover_name=\"Airport\", size_max=60, template='plotly_white')\n fig.update_layout(legend_title_text='Arrivals')\n fig.update_layout(title_font_size=30)\n fig.update_layout(\n title={\n 'text': 'Airport arrivals'})\n fig.update_yaxes(\n title_text=\"Airport\",\n title_font={\"size\": 20},\n title_standoff=25,\n tickfont_size=17)\n fig.update_xaxes(\n title_text=\"Date\",\n title_font={\"size\": 20},\n title_standoff=25,\n tickfont_size=17)\n\n fig.write_image(\"../images/fig14.png\", scale=1, width=1000, height=800)\n fig.write_html('../images/fig14.html')", "def build_all_graphs(cdata,countryname,xdaydates,ydayavg,pratedates,prate):\n\n newcases = go.Scatter(\n name=\"New Cases\",\n x = cdata['date'],\n y = cdata['new_cases'],\n mode = 'markers',\n marker=dict(\n color='LightSkyBlue',\n size=2,\n opacity=0.5,\n line=dict(\n color='LightSkyBlue',\n width=2)),\n marker_color='LightSkyBlue',\n yaxis='y1'\n )\n\n casestotaldeaths=go.Scatter(\n name=\"Total Deaths\",\n x = cdata['date'],\n y = cdata['total_deaths'],\n mode = 'lines',\n marker_color='#ff4d4d',\n #text=ger['new_cases'],\n yaxis='y1',\n fill='tozeroy',\n fillcolor='rgba(255, 77, 77,0.3)'\n )\n\n sevendays = go.Scatter(\n name=\"Seven Day Average\",\n x = xdaydates,\n y = ydayavg,\n mode = 'lines+markers',\n #marker_color=\"#995c00\",\n marker=dict(\n color='#995c00',\n size=1,\n opacity=0.5),\n yaxis='y1',\n fill='tozeroy',\n fillcolor='rgba(255, 153, 0,0.8)'\n )\n\n percentpositive = go.Scatter(\n name=\"Positive Rate in Percent\",\n x = pratedates,\n #y = ger['positive_rate'],\n y= prate,\n mode = 'lines+markers',\n line = dict(color='#ccffcc', width=2, dash='dot'),\n marker_color='#ccffcc',\n #text=ger['new_cases'],\n yaxis='y2'\n )\n\n\n layout = go.Layout(\n title={\n 'text': 'COVID-19 in {}'.format(countryname),\n 'y':0.95,\n 'x':0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'},\n activeshape={\"fillcolor\": \"#ff0000\"},\n\n plot_bgcolor='#1a1a1a',\n paper_bgcolor=colors['background'],\n font= {'color' : colors['fontcolor']},\n xaxis = {'title': 'Date',\"gridcolor\":colors['background'],\"gridwidth\": 0.5},\n yaxis = {'title': 'Number of Test Positives', \"gridcolor\":colors['background'],\"gridwidth\": 0.5},\n yaxis2 = {'title': 'Test Positive in Percent', \"gridcolor\":\"#4d4d4d\",\"gridwidth\": 0.5,'overlaying':'y','side':'right', \"zerolinecolor\": colors['background'] },\n #yaxis2=dict(title='Test Positive in Percent',overlaying='y',side='right', gridcolor= \"#cccccc\", gridwidth= 0.5),\n height = 700)\n\n\n\n return newcases, casestotaldeaths,sevendays, percentpositive, layout # all subplots + layout", "def return_figures(df):\n\n # first chart plots arable land from 1990 to 2015 in top 10 economies \n # as a line chart\n \n graph_one = []\n dfC = cleandata(df)\n dfC = dfC.mean()\n dfc = dfC.sort_values()\n# dfc['status']=dfc.apply(lambda x:'bad' if (x<0.3 or x>0.7) else 'good')\n# colors = {'good':'steelblue',\n# 'bad':'firebrick'}\n graph_one.append(\n go.Bar(\n x = dfC.index,\n y = dfC,marker={'color':dfC,'colorscale': 'inferno'}\n \n # mode = 'lines'\n\n )\n )\n\n layout_one = dict(title = 'Feature Unbalance review in Dataset',\n xaxis = dict(title = 'Classes',\n autotick=True,tickangle=45,\n categoryorder=\"max ascending\"),\n yaxis = dict(title = 'Ratio of accurence labeled as 1'),\n )\n\n# second chart plots ararble land for 2015 as a bar chart \n \n graph_two = []\n df_two = cleandata(df)\n df_two_fix = fix_features(df_two)\n df_two_fix.sort_values()\n \n graph_two.append(\n go.Bar(\n x = df_two_fix.index,\n y = df_two_fix,marker={'color':df_two_fix,'colorscale': 'inferno'}\n )\n )\n\n layout_two = dict(title = 'Feature Unbalance review in Dataset - after correction',\n xaxis = dict(title = 'Classes',categoryorder=\"max ascending\"),\n yaxis = dict(title = 'Ratio of accurence labeled as 1'))\n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n \n\n return figures", "def create_all_charts(df: pd.DataFrame, s3_resource_bucket):\n\n fig, ax = plt.subplots(4, 1, figsize=(10, 20))\n\n days_back = 30\n ax[0].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[0].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[0].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[0].legend(['MA_30day', 'MA_10day'])\n ax[0].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[0].set_title(f'{text_summary}')\n ax[0].title.set_size(16)\n\n days_back = 90\n ax[1].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[1].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[1].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[1].legend(['MA_30day', 'MA_10day'])\n ax[1].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[1].set_title(f'{text_summary}')\n ax[1].title.set_size(16)\n\n days_back = 365\n ax[2].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[2].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[2].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[2].legend(['MA_30day', 'MA_10day'])\n ax[2].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[2].set_title(f'{text_summary}')\n ax[2].title.set_size(16)\n\n days_back = 3650\n ax[3].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_30day'])\n ax[3].plot(df.tail(days_back)['Date'], df.tail(days_back)['MA_10day'])\n ax[3].scatter(df.tail(days_back)['Date'], df.tail(days_back)['Miles'])\n ax[3].legend(['MA_30day', 'MA_10day'])\n ax[3].set_ylabel('Miles')\n text_summary = create_metrics_text_from_dict(calc_runstats(df=df, num_days_back=days_back))\n ax[3].set_title(f'{text_summary}')\n ax[3].title.set_size(16)\n\n fig.tight_layout(pad=3.0)\n\n fig.savefig('all_charts.png')\n\n s3_resource_bucket.upload_file('all_charts.png', 'all_charts.png',\n ExtraArgs={'ContentType': 'image/png'})\n # remove local file\n os.remove('all_charts.png')", "def draw_observation(data, date_obj, map_region):\n\n # set mapbox token\n px.set_mapbox_access_token(CONFIG.CONFIG['MAPBOX']['token'])\n\n # create figures\n map_center = {'lat':(map_region[2] + map_region[3]) * 0.5,\n 'lon':(map_region[0] + map_region[1]) * 0.5}\n figs = collections.OrderedDict()\n\n # draw precipitation\n bins = [0.1, 10, 25, 50, 100, 250, 1200]\n keys = ['0.1~10', '10~25', '25~50', '50~100', '100~250', '>=250']\n cols = ['lightgreen', 'yellow', 'lightskyblue', 'blue', 'magenta','maroon']\n cols_map = dict(zip(keys, cols))\n data['rain'] = pd.cut(data['PRE_Time_0808'], bins=bins, labels=keys)\n data['Rainfall'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['PRE_Time_0808'].astype(str)\n data['rain_size'] = data['PRE_Time_0808'] + data['PRE_Time_0808'].mean()\n df = data[data['rain'].notna()]\n if df.shape[0] >= 2:\n figs['Rainfall'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"rain\", category_orders={'rain': keys}, color_discrete_map = cols_map,\n hover_data={'Rainfall':True, 'Lon':False, 'Lat':False, 'rain':False, 'rain_size':False},\n mapbox_style='satellite-streets', size=\"rain_size\", center=map_center, size_max=10, zoom=4,\n title = 'Accumulated precipitation ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw maximum temperature\n bins = [35, 37, 40, 60]\n keys = ['35~37', '37~40', '>=40']\n cols = ['rgb(255,191,187)', 'rgb(250,89,0)', 'rgb(230,0,8)']\n cols_map = dict(zip(keys, cols))\n data['max_temp_warning'] = pd.cut(data['TEM_Max'], bins=bins, labels=keys)\n data['max_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['TEM_Max'].astype(str)\n df = data[data['max_temp_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Max_temperature'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"max_temp_warning\", category_orders={'max_temp_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'max_temp':True, 'Lon':False, 'Lat':False, 'max_temp_warning':False, 'TEM_Max':False},\n mapbox_style='satellite-streets', size=\"TEM_Max\", center=map_center, size_max=10, zoom=4,\n title = 'Maximum temperature ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw minimum temperature\n bins = [-120, -40, -30, -20, -10, 0]\n keys = ['<=-40','-40~-30', '-30~-20', '-20~-10', '-10~0']\n cols = ['rgb(178,1,223)', 'rgb(8,7,249)', 'rgb(5,71,162)', 'rgb(5,109,250)', 'rgb(111,176,248)']\n cols_map = dict(zip(keys, cols))\n data['min_temp_warning'] = pd.cut(data['TEM_Min'], bins=bins, labels=keys)\n data['min_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['TEM_Min'].astype(str)\n df = data[data['min_temp_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Min_temprature'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"min_temp_warning\", category_orders={'min_temp_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'min_temp':True, 'Lon':False, 'Lat':False, 'min_temp_warning':False, 'TEM_Min':False},\n mapbox_style='satellite-streets', size=-1.0*df[\"TEM_Min\"], center=map_center, size_max=10, zoom=4,\n title = 'Minimum temperature ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw low visibility\n data['VIS_Min'] /= 1000.0\n bins = [0, 0.05, 0.2, 0.5, 1]\n keys = ['<=0.05','0.05~0.2', '0.2~0.5', '0.5~1']\n cols = ['rgb(0,82,77)', 'rgb(0,153,160)', 'rgb(0,210,204)', 'rgb(95,255,252)']\n cols_map = dict(zip(keys, cols))\n data['min_vis_warning'] = pd.cut(data['VIS_Min'], bins=bins, labels=keys)\n data['VIS_Min_size'] = 2.0-data[\"VIS_Min\"]\n data['min_vis'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['VIS_Min'].astype(str)\n df = data[data['min_vis_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Low_visibility'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"min_vis_warning\", category_orders={'min_vis_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'min_vis':True, 'Lon':False, 'Lat':False, 'min_vis_warning':False, 'VIS_Min_size':False},\n mapbox_style='satellite-streets', size=\"VIS_Min_size\", center=map_center, size_max=10, zoom=4,\n title = 'Low visibility ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw high wind\n bins = [10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 32.7, 37.0, 120]\n keys = ['10.8~13.8','13.9~17.1', '17.2~20.7', '20.8~24.4', '24.5~28.4', '28.5~32.6', '32.7~36.9', '>=37.0']\n cols = ['rgb(0,210,244)', 'rgb(0,125,255)', 'rgb(253,255,0)', 'rgb(247,213,0)',\n 'rgb(255,141,0)', 'rgb(251,89,91)', 'rgb(255,3,0)', 'rgb(178,1,223)']\n cols_map = dict(zip(keys, cols))\n data['max_win_warning'] = pd.cut(data['WIN_S_Max'], bins=bins, labels=keys)\n data['max_win'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['WIN_S_Max'].astype(str)\n df = data[data['max_win_warning'].notna()]\n if df.shape[0] >= 2:\n figs['High_wind'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"max_win_warning\", category_orders={'max_win_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'max_win':True, 'Lon':False, 'Lat':False, 'max_win_warning':False, 'WIN_S_Max':False},\n mapbox_style='satellite-streets', size=\"WIN_S_Max\", center=map_center, size_max=10, zoom=4,\n title = 'Maximum wind speed ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=1000, height=800)\n\n return figs", "def onc_skyplot(t):\n # Convert to Pandas data frame\n data = t.to_pandas()\n data.index = data['id']\n script, div, warning_message = '', '', ''\n\n if 'ra' in data and 'dec' in data:\n \n # Remove objects without RA/Dec\n num_missing = np.sum(pd.isnull(data.get('ra')))\n if num_missing > 0:\n warning_message = 'Note: {} objects had missing coordinate information and were removed.'.format(num_missing)\n data = data[pd.notnull(data.get('ra'))]\n else:\n warning_message = ''\n\n # Coerce to numeric\n data['ra'] = pd.to_numeric(data['ra'])\n data['dec'] = pd.to_numeric(data['dec'])\n\n source = ColumnDataSource(data=data)\n\n tools = \"resize,tap,pan,wheel_zoom,box_zoom,reset\"\n p = figure(tools=tools, title='', plot_width=500, plot_height=300, min_border=0, min_border_bottom=0)\n\n # Add the data\n p.scatter('ra', 'dec', source=source, size=8, alpha=0.6)\n tooltip = [(\"Source ID\", \"@id\"), (\"Name\", \"@shortname\"), (\"(RA, Dec)\", \"(@ra, @dec)\")]\n p.add_tools(HoverTool(tooltips=tooltip))\n\n # When clicked, go to the Summary page\n url = \"inventory/@id\"\n taptool = p.select(type=TapTool)\n taptool.callback = OpenURL(url=url)\n\n # Axis labels\n p.yaxis.axis_label = 'Decl. (deg)'\n p.xaxis.axis_label = 'R.A. (deg)'\n\n script, div = components(p)\n\n return script, div, warning_message", "def make_season_calender_plots(year='2019-20', plot_folder=env.plot_folder, html_folder=env.output_folder + 'views/', web_pickle_folder=env.output_folder + 'webpickles/'):\n\n from_year = int(year[0:4])\n to_year = int('20' + year[-2:])\n\n from_day = dt.date(from_year, 11, 1)\n to_day = dt.date(to_year, 6, 30)\n\n # if the seasons expected end is after todays date, set it to today.\n if to_day > dt.date.today():\n to_day = dt.date.today()\n\n # list of months to be plotted\n months = []\n month = from_day\n\n while month < to_day:\n months.append(month)\n almost_next = month + dt.timedelta(days=35)\n month = dt.date(almost_next.year, almost_next.month, 1)\n\n # Get all regions\n region_ids = gm.get_forecast_regions(year)\n\n # get a list of relevant observers to plot and make pickle for adding to the web-folder\n all_observations_nest = gvp.get_all_observations(year, output='List', geohazard_tids=10)\n all_observations_list = gvp.get_all_observations(year, output='FlatList', geohazard_tids=10)\n\n observer_dict = {}\n for o in all_observations_nest:\n if o.ObserverID in observer_dict.keys():\n observer_dict[o.ObserverID].add_one_observation_count()\n else:\n observer_dict[o.ObserverID] = ObserverData(o.ObserverID, o.NickName, observation_count_inn=1)\n\n observer_list = []\n observer_list_web = []\n ordered_observer_dict = col.OrderedDict(sorted(observer_dict.items(), key=lambda t: t[1].observation_count, reverse=True))\n for k, v in ordered_observer_dict.items():\n if v.observation_count > 4:\n observer_list.append(ObserverData(v.observer_id, v.observer_nick, observation_count_inn=v.observation_count))\n observer_list_web.append([v.observer_id, v.observer_nick, v.observation_count])\n\n if not os.path.exists(web_pickle_folder):\n os.makedirs(web_pickle_folder)\n mp.pickle_anything(observer_list_web, '{0}observerlist.pickle'.format(web_pickle_folder))\n\n # run the stuff\n make_observer_plots(all_observations_list, observer_list, months, plot_folder=plot_folder, html_folder=html_folder)\n make_region_plots(all_observations_list, region_ids, months, plot_folder=plot_folder, html_folder=html_folder)\n make_svv_plots(all_observations_list, observer_dict, region_ids, months, plot_folder=plot_folder, html_folder=html_folder)", "def visualizeOverview(timeslices: dict, imagePath: str, startTime: datetime.datetime, endTime: datetime.datetime, write_out: bool = False):\n\n maxAgents = 0\n minAgents = 4000\n meanAgents = []\n maxs = []\n mins = []\n means = []\n idxs = []\n\n df_data = []\n for hour in sorted(list(timeslices.keys())):\n for minute in sorted(list(timeslices[hour].keys())):\n minVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.min()\n maxVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.max()\n meanVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.mean()\n\n idx = datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=hour, minute=minute)\n idxs.append(idx)\n\n mins.append(minVal)\n maxs.append(maxVal)\n means.append(meanVal)\n\n minAgents = min(minAgents, minVal)\n maxAgents = max(maxAgents, maxVal)\n meanAgents.append(meanVal)\n df_data.append([idx, minVal, meanVal, maxVal])\n\n\n meanAgents = int(np.mean(meanAgents))\n log.debug(f\"Minimum agents at one spot: {minAgents}, mean agents: {meanAgents}, maximum agents: {maxAgents}\")\n\n fig: plt.Figure = plt.figure(figsize=(15, 8), dpi=300)\n ax: plt.Axes = plt.gca()\n\n ax.plot_date(idxs, mins, '-g', label=\"minimum\")\n ax.plot_date(idxs, means, '-y', label=\"avgerage\")\n ax.plot_date(idxs, maxs, '-r', label=\"maximum\")\n\n ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=0))\n ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(\"%H:%M\"))\n\n ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=10))\n\n ax.set_xlim(datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=0, minute=0),\n datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=23, minute=59), emit=False)\n\n # removed for small sample based reproduction # ax.set_ylim(250,900)\n\n fig.autofmt_xdate()\n ax.legend()\n plt.title(f\"Minimum, average and maximum number of vehicles seamlessly reaching one vertex, per 10 minute timeslice\")\n plt.xlabel(f\"time\\nat {startTime.strftime('%d.%m.%y')}\")\n plt.ylabel(\"number of seamlessly reaching vehicles\")\n\n overview_df = pd.DataFrame(df_data, columns=[\"idx\", \"min\", \"mean\", \"max\"])\n if write_out:\n overview_df.to_csv(os.path.join(imagePath, f\"analysis-{startTime.strftime('%d.%m.%y')}-{endTime.strftime('%d.%m.%y')}.csv\"))\n plt.savefig(os.path.join(imagePath, f\"analysis-{startTime.strftime('%d.%m.%y')}-{endTime.strftime('%d.%m.%y')}.png\"))", "def plotly_scatter_3d(traces, width=1000, height=600):\n names = set()\n fig = go.Figure(data=traces)\n fig.for_each_trace(\n lambda trace:\n trace.update(showlegend=False)\n if (trace.name in names) else names.add(trace.name)\n )\n fig.update_layout(\n width=width,\n height=height,\n autosize=True,\n scene=dict(\n aspectratio=dict(x=1.1, y=2.25, z=1.1),\n xaxis=dict(title='UMAP 1'),\n yaxis=dict(title='Batch Number'),\n zaxis=dict(title='UMAP 2'),\n\n ),\n scene_camera=dict(\n up=dict(x=0, y=0, z=1),\n center=dict(x=0, y=0, z=0),\n eye=dict(x=2.5, y=0.1, z=-0.1)\n )\n )\n return fig", "def plot_history(data):\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=data.index, y=data[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_all_time.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_all_time.html'))\n fig.show()\n\n recent = data[:data.first_valid_index() - pd.Timedelta(weeks=52)]\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=recent.index, y=recent[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_past_year.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_past_year.html'))\n fig.show()", "def plot_time_series(activity: models.Activity):\n\n attributes = activity.trace_file.__dict__\n lap_data = models.Lap.objects.filter(trace=activity.trace_file)\n plots = []\n lap_lines = []\n\n timestamps = pd.to_datetime(pd.Series(json.loads(attributes[\"timestamps_list\"]), dtype=float), unit=\"s\")\n x_axis = pd.to_datetime(timestamps).dt.tz_localize(\"utc\").dt.tz_convert(settings.TIME_ZONE)\n x_axis = x_axis - x_axis.min()\n\n for attribute, values in attributes.items():\n if attribute in attributes_to_create_time_series_plot_for:\n values = pd.Series(json.loads(values), dtype=float)\n if values.any():\n attribute = attribute.replace(\"_list\", \"\")\n\n p = figure(\n x_axis_type=\"datetime\",\n plot_height=int(settings.PLOT_HEIGHT / 2.5),\n sizing_mode=\"stretch_width\",\n y_axis_label=plot_matrix[attribute][\"axis\"],\n )\n lap = _add_laps_to_plot(laps=lap_data, plot=p, y_values=values)\n lap_lines += lap\n if attribute == \"speed\":\n # turn speed values from m/s into km/h to be consistent with other speed values\n values = values.mul(3.6)\n if attribute == \"altitude\":\n p.varea(\n x=x_axis,\n y1=values,\n y2=values.min(),\n color=plot_matrix[attribute][\"second_color\"],\n fill_alpha=0.5,\n )\n p.line(\n x_axis,\n values,\n line_width=2,\n color=plot_matrix[attribute][\"color\"],\n legend_label=plot_matrix[attribute][\"title\"],\n )\n else:\n p.line(\n x_axis,\n values,\n line_width=2,\n color=plot_matrix[attribute][\"color\"],\n legend_label=plot_matrix[attribute][\"title\"],\n )\n x_hover = (\"Time\", \"@x\")\n hover = HoverTool(\n tooltips=[(plot_matrix[attribute][\"title\"], f\"@y {plot_matrix[attribute]['axis']}\"), x_hover],\n mode=\"vline\",\n )\n p.add_tools(hover)\n cross = CrosshairTool(dimensions=\"height\")\n p.add_tools(cross)\n p.toolbar.logo = None\n p.toolbar_location = None\n p.xgrid.grid_line_color = None\n p.legend.location = \"top_left\"\n p.legend.label_text_font = \"ubuntu\"\n p.legend.background_fill_alpha = 0.7\n dtf = DatetimeTickFormatter()\n dtf.minutes = [\"%M:%S\"]\n p.xaxis.formatter = dtf\n p.xaxis.major_label_overrides = {0: \"0:00\"}\n plots.append(p)\n values.ffill(inplace=True)\n values.bfill(inplace=True)\n x_axis.ffill(inplace=True)\n x_axis.bfill(inplace=True)\n\n _link_all_plots_with_each_other(all_plots=plots, x_values=x_axis)\n\n all_plots = column(*plots)\n all_plots.sizing_mode = \"stretch_width\"\n\n if lap_data:\n # include button to toggle rendering of laps\n checkbox = CheckboxButtonGroup(labels=[\"Show Laps\"], active=[0], width=100)\n\n js = \"\"\"\n for (line in laps) {\n laps[line].visible = false;\n if (typeof markerGroup != \"undefined\") {\n markerGroup.removeFrom(map);\n }\n }\n for (i in cb_obj.active) {\n if (cb_obj.active[i] == 0) {\n for (line in laps) {\n laps[line].visible = true;\n if (typeof markerGroup != \"undefined\") {\n markerGroup.addTo(map);\n }\n }\n }\n }\n \"\"\"\n callback = CustomJS(args={\"laps\": lap_lines, \"checkbox\": checkbox}, code=js)\n checkbox.js_on_change(\"active\", callback)\n layout = column(all_plots, checkbox)\n layout.sizing_mode = \"stretch_width\"\n script, div = components(layout)\n else:\n script, div = components(all_plots)\n\n return script, div", "def main():\n repo_names = ['alexandre-desvallées', 'antoine-drouard', 'antoine-gosset',\n 'benjamin-brasseur', 'benoît-cochet', 'clément-caillaud',\n 'florian-boche', 'grégoire-meunier', 'guillaume-fourny',\n 'kevin-pautonnier', 'matthieu-fournier', 'matthieu-saint-martin',\n 'maxime-courant', 'nicolas-goureau', 'projet-gata', 'projet-mymeteo',\n 'projet-neuronal-gates', 'projet-nikmabe', 'tanguy-badier',\n 'thomas-pichard', 'yanis-dando']\n time_series = []\n for repo_name in repo_names:\n time_series.append(repo_get_commits('/ML-2019-étudiants/' + repo_name))\n min_timestamp = min([min(series) for series in time_series])\n max_timestamp = max([max(series) for series in time_series])\n # Use two more than the days available to deal with times a bit\n # before or after first and last day.\n days_in_universe = (max_timestamp - min_timestamp).days + 2\n # Now I'd like each time series expressed as a sequence of floats\n # representing days since day 0.\n day_series = []\n for series in time_series:\n day_series.append([(d - min_timestamp).days + (d - min_timestamp).seconds / 3600. / 24.\n for d in series])\n plt.xlim(0, days_in_universe)\n plt.xticks(np.arange(days_in_universe))\n plt.yticks(np.arange(len(repo_names)), repo_names)\n for index, series in enumerate(day_series):\n plt.scatter(series, [index] * len(series))\n plt.show()", "def plot_weather(): # pragma: no cover\n temp, sun, light, clouds, rain, wind = [], [], [], [], [], []\n\n world = World(duration_days=10)\n while not world.step():\n temp.append((world.int_weather['temperature']))\n sun.append(world.int_weather['sun'])\n wind.append(world.int_weather['wind'])\n light.append(world.int_weather['light'])\n clouds.append(world.int_weather['clouds'])\n rain.append(world.int_weather['rain'])\n\n plt.subplot(411)\n plt.plot(temp, label='temperature')\n plt.legend()\n\n plt.subplot(412)\n plt.plot(sun, label='sun')\n plt.plot(light, label='light')\n plt.legend()\n\n plt.subplot(413)\n plt.plot(wind, label='wind')\n plt.plot(clouds, label='clouds')\n plt.legend()\n\n plt.subplot(414)\n plt.plot(rain, label='rain')\n plt.legend()\n plt.show()", "def plot_3d (cities):\n\n # base all measures on first day present in each city\n day = 0\n # date time for the label\n dt = xpath(cities[0], ('data',day,'dt'))\n date=date_repr(dt)\n \n fig = plot.figure()\n ax = fig.gca(projection='3d')\n X = [ xpath(city, ('city','coord','lon')) for city in cities ]\n Y = [ xpath(city, ('city','coord','lat')) for city in cities ]\n P = [ xpath (city, ('data',day,'pressure'))\n for city in cities ]\n ax.plot_trisurf(X, Y, P, cmap=cm.jet, linewidth=0.2,\n label=\"Pressure on %s\"%date)\n ax.set_title (\"Pressure on %s\"%date)\n plot.show()", "def treemap_page(self):\n st.title('Netflix Titles Plot Popularity Detailed Analysis')\n common = st.sidebar.checkbox('Use common words', value=True)\n st.plotly_chart(self.viz.plot_treemap(width=1000, height=600, common=common))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Since virtual steppers are virtual, we don't need pins or step sequences. We're still using delay and n_steps to resemble physical steppers.
def __init__(self, name = None, n_steps = 256, delay = 1e-3): self.fig, self.ax = plt.subplots(figsize=(3, 3)) self.n_steps = n_steps self.delay = delay self.step_size = 2 * pi / self.n_steps if name is None: self.name = 'Stepper {}'.format(VirtualStepper.count + 1) self.angle = 0.0 self.check() self.inv = False VirtualStepper.count += 1 plt.ion() plt.show() self.draw()
[ "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.nsteps)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = sampling.random_exponential_dwell(self.lamb, size=self.nsteps)\n elif self.dwell_distribution == 'power':\n time = sampling.random_power_law_dwell(1 + self.alpha, size=self.nsteps, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.nsteps*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def get_steps_num():\n return 0", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def train_loop_pre(self, current_step):\r\n pass", "def fixed_steps_trajectories(self, noise=0, nt=1, ll=0.1, limit=None):\n\n if type(ll) is not list:\n ll = [ll]\n\n print('Generating Trajectories...')\n for i in tqdm.tqdm(range(self.ntraj)):\n\n if self.hop_distribution == 'gaussian' or self.hop_distribution == 'Gaussian':\n z_position = np.cumsum(\n np.random.normal(loc=0, scale=self.hop_sigma, size=self.length)) # accumulate gaussian steps\n else:\n sys.exit('Please enter a valid hop distance probability distribution')\n\n self.trajectories[i, :, 1] = z_position - z_position[0] # make initial z equal to 0\n\n # hop at random time intervals according to one of the following PDFs\n if self.dwell_distribution == 'exponential':\n time = rand.random_exponential(self.lamb, size=self.length)\n elif self.dwell_distribution == 'power':\n time = rand.random_powerlaw(1 + self.alpha, size=self.length, ll=ll, limit=limit)\n else:\n sys.exit('Please enter a valid dwell time probability distribution')\n\n time = np.cumsum(time) # accumulate dwell times\n time -= time[0]\n\n self.trajectories[i, :, 0] = time\n\n # Add to array with all corners of hop distribution for visualization purposes\n self.trajectory_hops[i, 1::2, 0] = time[1:]\n self.trajectory_hops[i, 2::2, 0] = time[1:]\n\n self.trajectory_hops[i, ::2, 1] = self.trajectories[i, :, 1]\n self.trajectory_hops[i, 1:-1:2, 1] = self.trajectories[i, :-1, 1]\n self.trajectory_hops[i, -1, 1] = self.trajectories[i, -1, 1]\n\n print('Interpolating Trajectories...')\n # make uniform time intervals with the same interval for each simulated trajectory\n max_time = np.min(self.trajectories[:, -1, 0])\n self.time_uniform = np.linspace(0, max_time, self.length*10)\n\n if nt > 1:\n # self.pbar = tqdm.tqdm(total=self.ntraj)\n pool = Pool(nt)\n for i, t in enumerate(pool.map(self.interpolate_trajectories, range(self.ntraj))):\n self.z_interpolated[i, :] = t\n else:\n for t in tqdm.tqdm(range(self.ntraj)):\n self.z_interpolated[t, :] = self.trajectories[t, np.digitize(self.time_uniform,\n self.trajectories[t, :, 0], right=False) - 1, 1]\n #self.z_interpolated[t, :] = self.interpolate_trajectories(t, noise=noise)", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def simulate(self, n):\n for _ in range(n):\n self.walking_home()\n print \"Distance: {:4} -> Path Length: {}\".\\\n format(self._home, self._steplist)", "def steps(self,num_steps):\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()\n for step in range(num_steps):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n self.start_episode()", "def simulate(self, ntrs):\n self.trtimes = list(np.arange(ntrs)*self.expectedtr)", "def step(self, state):", "def turn_steps(self, steps, delay_ms=1):\n if steps < 0:\n direction = -1\n else:\n direction = 1\n for _ in range(abs(int(steps))):\n self.current_step += direction\n element = STEP_ELEMENTS[self.current_step % N_STEP_ELEMENTS ]\n self.set_bits(element)\n time.sleep_ms(delay_ms)", "def pzt_scan(pzt_motor, start, stop, steps, detectors=[Vout2], sleep_time=1, md=None):\n if Andor in detectors:\n exposure_time = yield from bps.rd(Andor.cam.acquire_time)\n yield from mv(Andor.cam.acquire, 0)\n yield from mv(Andor.cam.image_mode, 0)\n yield from mv(Andor.cam.num_images, 1)\n Andor.cam.acquire_period.put(exposure_time)\n\n motor = pzt_motor.setpos\n motor_readback = pzt_motor.pos\n motor_ini_pos = motor_readback.get()\n detector_set_read = [motor, motor_readback]\n detector_all = detector_set_read + detectors\n\n _md = {\n \"detectors\": [det.name for det in detector_all],\n \"detector_set_read\": [det.name for det in detector_set_read],\n \"motors\": [motor.name],\n \"XEng\": XEng.position,\n \"plan_args\": {\n \"pzt_motor\": pzt_motor.name,\n \"start\": start,\n \"stop\": stop,\n \"steps\": steps,\n \"detectors\": \"detectors\",\n \"sleep_time\": sleep_time,\n },\n \"plan_name\": \"pzt_scan\",\n \"hints\": {},\n \"motor_pos\": wh_pos(print_on_screen=0),\n \"operator\": \"FXI\",\n }\n _md.update(md or {})\n try:\n dimensions = [(pzt_motor.hints[\"fields\"], \"primary\")]\n except (AttributeError, KeyError):\n pass\n else:\n _md[\"hints\"].setdefault(\"dimensions\", dimensions)\n\n @stage_decorator(list(detector_all))\n @run_decorator(md=_md)\n def pzt_inner_scan():\n my_var = np.linspace(start, stop, steps)\n print(my_var)\n for x in my_var:\n print(x)\n yield from mv(motor, x)\n yield from bps.sleep(sleep_time)\n yield from trigger_and_read(list(detector_all))\n yield from mv(motor, motor_ini_pos)\n\n uid = yield from pzt_inner_scan()\n\n h = db[-1]\n scan_id = h.start[\"scan_id\"]\n det = [det.name for det in detectors]\n det_name = \"\"\n for i in range(len(det)):\n det_name += det[i]\n det_name += \", \"\n det_name = \"[\" + det_name[:-2] + \"]\"\n txt1 = get_scan_parameter()\n txt2 = f\"detectors = {det_name}\"\n txt = txt1 + \"\\n\" + txt2\n insert_text(txt)\n print(txt)\n return uid\n\n # def pzt_scan(moving_pzt, start, stop, steps, read_back_dev, record_dev, delay_time=5, print_flag=1, overlay_flag=0):\n \"\"\"\n Input:\n -------\n moving_pzt: pv name of the pzt device, e.g. 'XF:18IDA-OP{Mir:DCM-Ax:Th2Fine}SET_POSITION.A'\n\n read_back_dev: device (encoder) that changes with moving_pzt, e.g., dcm.th2\n\n record_dev: signal you want to record, e.g. Vout2\n\n delay_time: waiting time for device to response\n \"\"\"", "def simulate(self, t, inputs=None):\n outputDimension = self.system.outputOrder\n if outputDimension:\n output = np.zeros((t.size, outputDimension))\n\n t0 = t[0]\n index = 0\n tnew = t\n current_sample = 0\n num_samples = len(t)\n\n if 'jitter' in self.options:\n jitter_range = self.options['jitter']['range']\n if jitter_range > (t[1] - t[0])/2.:\n raise \"Too large jitter range. Time steps could change order\"\n tnew = t + (np.random.rand(t.size) - 0.5) * jitter_range\n print(\"With Jitter!\")\n # print(t)\n # print(tnew)\n\n for timeInstance in tnew[1:]:\n # Store observations\n if outputDimension:\n output[index, :] = self.system.output(self.state)\n index += 1\n\n def f(x, t):\n \"\"\"\n Compute the system derivative considering state control and input.\n \"\"\"\n hom = np.dot(self.system.A, x.reshape((self.system.order,1))).flatten()\n control = self.control.fun(t)\n input = np.zeros_like(hom)\n if inputs:\n for signal in inputs:\n input += signal.fun(timeInstance)\n\n return hom + control + input\n if \"noise\" in self.options:\n # Shock Noise\n noise_state = np.zeros((self.system.order, len(self.options[\"noise\"])))\n for i, noiseSource in enumerate(self.options['noise']):\n # print(noiseSource['std'])\n if noiseSource['std'] > 0:\n # std = np.sqrt(noiseSource[\"std\"] ** 2 * (timeInstance - t0)) * noiseSource[\"steeringVector\"]\n std = noiseSource[\"std\"] * noiseSource[\"steeringVector\"]\n noise_state[:, i]= std\n # noise_state += (np.random.rand() - 0.5 ) * 2 * std \n def g(x, t):\n return noise_state\n \n else:\n def g(x,t):\n return np.zeros((self.system.order, 1))\n\n # Solve ordinary differential equation\n # self.state = odeint(derivate, self.state, np.array([t0, timeInstance]), mxstep=100, rtol=1e-13, hmin=1e-12)[-1, :]\n tspace = np.linspace(t0, timeInstance, 10)\n self.state = sdeint.itoint(f, g, self.state, tspace)[-1, :]\n # If thermal noise should be simulated\n # if \"noise\" in self.options:\n\n # # Shock Noise\n # noise_state = np.zeros(self.system.order)\n # for noiseSource in self.options['noise']:\n # # print(noiseSource['std'])\n # if noiseSource['std'] > 0:\n # # std = np.sqrt(noiseSource[\"std\"] ** 2 * (timeInstance - t0)) * noiseSource[\"steeringVector\"]\n # std = noiseSource[\"std\"] * noiseSource[\"steeringVector\"]\n # noise_state += np.random.randn() * std\n # # noise_state += (np.random.rand() - 0.5 ) * 2 * std\n # self.state += noise_state\n\n # # Thermal Noise Simulation\n # for noiseSource in self.options['noise']:\n # if noiseSource['std'] > 0:\n # def noiseDerivative(x, t):\n # hom = np.dot(self.system.A, x.reshape((self.system.order,1))).flatten()\n # noise = np.random.randn() * noiseSource[\"std\"] * noiseSource[\"steeringVector\"]\n # return hom + noise\n # noise_state = odeint(noiseDerivative, np.zeros_like(self.state), np.array([t0, timeInstance]))[-1, :]\n # # print(\"Noise state %s\" %noise_state)\n # # print(\"state before \", self.state)\n # self.state += noise_state\n # # print(\"noise \", noise_state)\n # # print(\"state after \", self.state)\n\n # Increase time\n t0 = timeInstance\n # Update control descisions\n # print(self.state)\n\n # Clip if state is out of bound\n if True:\n bound = 1.\n above = self.state > bound\n below = self.state < -bound\n\n oob_states = np.arange(self.system.order)[np.logical_or(above,below)]\n if any(oob_states):\n # self.log(\"STATE BOUND EXCEEDED! Sample #: {}\".format(current_sample))\n # self.log(\"X_{} = {}\".format(oob_states, self.state[oob_states]))\n self.num_oob += 1\n #self.state[above] = bound\n #self.state[below] = -bound\n\n # print(self.state)\n current_sample += 1\n self.control.update(self.state)\n\n # Print progress every 1e4 samples\n try:\n if current_sample % (num_samples//1e4) == 0:\n print(\"Simulation Progress: %.2f%% \\r\" % (100*(current_sample/num_samples)), end='', flush=True)\n except ZeroDivisionError:\n pass\n\n # Return simulation object\n return {\n 't': t,\n 'control': self.control,\n 'output': output,\n 'system': self.system,\n 'state': self.state,\n 'options': self.options,\n 'log': self.logstr,\n 'num_oob': self.num_oob\n }", "def write_delay_stimulus(self):\n\n self.check_arguments()\n\n # obtains list of time-points for each rising clk edge\n self.create_test_cycles()\n\n # creates and opens stimulus file for writing\n self.delay_stim_sp = \"delay_stim.sp\"\n temp_stim = \"{0}/{1}\".format(OPTS.openram_temp, self.delay_stim_sp)\n self.sf = open(temp_stim, \"w\")\n\n if OPTS.spice_name == \"spectre\":\n self.sf.write(\"simulator lang=spice\\n\")\n self.sf.write(\"* Delay stimulus for period of {0}n load={1}fF slew={2}ns\\n\\n\".format(self.period,\n self.load,\n self.slew))\n self.stim = stimuli(self.sf, self.corner)\n # include files in stimulus file\n self.stim.write_include(self.trim_sp_file)\n\n self.write_generic_stimulus()\n\n # generate data and addr signals\n self.sf.write(\"\\n* Generation of data and address signals\\n\")\n self.gen_data()\n self.gen_addr()\n\n # generate control signals\n self.sf.write(\"\\n* Generation of control signals\\n\")\n self.gen_control()\n\n self.sf.write(\"\\n* Generation of Port clock signal\\n\")\n for port in self.all_ports:\n self.stim.gen_pulse(sig_name=\"CLK{0}\".format(port),\n v1=0,\n v2=self.vdd_voltage,\n offset=self.period,\n period=self.period,\n t_rise=self.slew,\n t_fall=self.slew)\n\n # self.load_all_measure_nets()\n self.write_delay_measures()\n # self.write_simulation_saves()\n\n # run until the end of the cycle time\n self.stim.write_control(self.cycle_times[-1] + self.period)\n\n self.sf.close()", "def run(simulation,N_step,skip):\n return [tissue.copy() for tissue in itertools.islice(simulation,0,N_step,skip)]", "def run_generator(simulation,N_step,skip):\n return itertools.islice(simulation,0,N_step,skip)", "def run(self,number_steps: int, timed: bool = False) -> list:\n\t\toutput = []\n\t\tcurrent = resolveRandom(self.initial_state)\n\t\tc = 0\n\t\twhile c < number_steps:\n\t\t\t[symbol, next_state, time_spent] = self.next(current)\n\t\t\toutput.append(symbol)\n\t\t\tif timed:\n\t\t\t\toutput.append(time_spent)\n\t\t\tcurrent = next_state\n\t\t\tc += 1\n\t\toutput.append(self.labelling[current])\n\t\treturn output", "def simulate_chain(self, n):\n logger = self._logger\n writer = self._writer\n\n device = self.current_position.device\n\n for _ in range(n):\n curr_q = self.current_position\n # Resample momentum.\n curr_p = self._momentum_dist.sample()\n # Sample slice variable.\n curr_K = _kin_energy(curr_p, self._inv_mass)\n curr_U = self._pot_energy_func(curr_q)\n curr_prob = torch.exp(-curr_U - curr_K) # unnormalized probability\n u = torch.rand(1).to(device) * curr_prob\n\n # Initialize some variables.\n q_minus = curr_q\n p_minus = curr_p\n\n q_plus = curr_q\n p_plus = curr_p\n\n # The new state.\n q_new = None\n\n j = 0\n n = 1\n s = 1\n\n while s == 1:\n # Choose random direction.\n v = -1 if torch.rand(1) < .5 else 1\n\n if v == -1:\n q_minus, p_minus, _, _, q_proposed, n_new, s_new = \\\n self._build_tree(q_minus, p_minus, u, v, j)\n else:\n _, _, q_plus, p_plus, q_proposed, n_new, s_new = \\\n self._build_tree(q_plus, p_plus, u, v, j)\n\n if s_new == 1:\n if torch.rand(1) < min(1, n_new/n):\n q_new = q_proposed\n\n n += n_new\n s = s_new * self._u_turn(q_minus, q_plus, p_minus) * \\\n self._u_turn(q_minus, q_plus, p_plus)\n j += 1\n\n if q_new is not None:\n self._accumulated_accept += 1\n self._positions.append(q_new.detach().clone())\n\n new_U = self._pot_energy_func(q_new)\n\n else:\n self._positions.append(curr_q.detach().clone())\n\n new_U = curr_U\n\n if not self._positions[-1].requires_grad:\n self._positions[-1].requires_grad = True\n\n new_U = new_U.detach().cpu().numpy()\n\n self._position = self._positions[-1]\n self._num_states += 1\n\n # Log progress.\n if logger is not None and \\\n (self.num_states-1) % self._log_interval == 0:\n logger.debug('NUTS state %d: Current Potential Energy: %f - ' \\\n % (self.num_states, new_U) + \\\n 'Acceptance probability: %.2f%%.' \\\n % (self.acceptance_probability * 100))\n\n if writer is not None:\n tag = self._writer_tag\n writer.add_scalar('%snuts/potential' % tag, new_U,\n global_step=self.num_states,\n display_name='Potential Energy')\n writer.add_scalar('%snuts/accept' % tag,\n self.acceptance_probability,\n global_step=self.num_states,\n display_name='Acceptance Probability')\n writer.add_scalar('%snuts/tree_depth' % tag, j,\n global_step=self.num_states,\n display_name='Tree Depth')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rotate the stepper by this angle (radians unless specified) Positive angles rotate clockwise, negative angles rotate counterclockwise
def rotate_by(self, angle, degrees = False): target = angle * pi / 180 if degrees else angle if self.inv: target = -target if target > 0: n = int(target // self.step_size) + 1 for _ in range(n): self.step_c() else: n = int(-target // self.step_size) + 1 for _ in range(n): self.step_cc() if self.inv: diff = -diff
[ "def rotateWheel(self, radians):\n pass", "def rotate(self, direction):\n electro = pygame.mixer.Sound('resources/Electro_Motor.wav')\n electro.set_volume(0.2)\n self.rotation += min(max(direction, -1), 1)\n if self.rotation >= 4:\n self.rotation = 0\n elif self.rotation <= -1:\n self.rotation = 3\n if self.speakers:\n self.speakers.play(electro)\n new_turn = \"r={}\".format(self.rotation)\n self._call_gamelog_callbacks(new_turn)", "def run_angle(self, speed: int, rotation_angle: int, then: Stop = Stop.HOLD, wait: bool = True):\n ...", "def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)", "def clockwise(self, duration=None, wait=False, speed=100):\n try:\n self.__move_motor(1, duration, wait, 'spinning clockwise', speed)\n except Exception:\n pass", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def rotate(self,direction):\n if direction == 0:\n self.rot_idx = ( self.rot_idx + 1 ) % 4\n elif direction == 1:\n self.rot_idx = ( self.rot_idx - 1 ) % 4", "def rotate_clockwise(self, theta):\n self.theta = (self.theta + theta) % (2 * math.pi)", "def __run_rotation(self, angle):\n\n cur_angle = self.ensure_in_bounds(_kit.servo[self.__channel_number].angle)\n delta = angle - cur_angle\n\n # divide delta in steps which will be added on the current angle until the destined angle is reached\n for _ in range(int(abs(delta) / self.step_size)):\n if delta < 0:\n cur_angle -= self.step_size\n else:\n cur_angle += self.step_size\n if self.__dump_rotations:\n return\n _kit.servo[self.__channel_number].angle = cur_angle\n time.sleep(self.step_time)\n if self.__dump_rotations:\n return\n if angle != cur_angle:\n _kit.servo[self.__channel_number].angle = angle\n time.sleep(self.step_time)\n if self.__print_rotations:\n print(\"{} performed movement to: {}\".format(self.name,\n _kit.servo[self.__channel_number].angle))", "def rotate_clockwise(self, angle):\r\n angle = degrees_to_radians(angle)\r\n current_angle = atan(self.x / self.y)\r\n angle += current_angle\r\n\r\n length = self.length\r\n self.x = length*sin(angle)\r\n self.y = length*cos(angle)", "def steps_to_angle():\n pass", "def clockwise_rotate(self, speed):\n\t\tif self._last_dir != 'c': # \"c\" indicates that the last rotation of this wheel was clockwise.\n\t\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\t\tGPIO.output(self._dir_pin_2, GPIO.LOW)\n\t\t\tself._last_dir = 'c'\n\n\t\tself._current_dc_val = speed\n\t\tif self._current_dc_val != self._last_dc_val:\n\t\t\tself._motor_pwm.ChangeDutyCycle(speed) # 0.0 - 100.0\n\t\t\tself._last_dc_val = self._current_dc_val", "def rotation(angle, pas):\n\n return (angle + pas) % 360", "def Rotation_fromAngleDirection(*args):\n return _almathswig.Rotation_fromAngleDirection(*args)", "def rotate(self):\n pass", "def rotate(self,amount):\n self.angle += amount\n if self.drawn == True:\n self.draw()", "def rotate(orientation, clockwise=True):\n rotation = -math.pi/2 if clockwise else math.pi/2\n orientation = orientation + rotation\n if orientation > math.pi:\n orientation -= math.pi\n return orientation", "async def rotate(self, angle: float, duration: float) -> None:\n angle *= self._ratio\n if duration < 0:\n raise ValueError\n if angle == 0:\n if duration > 0:\n await asyncio.sleep(duration)\n return\n if duration == 0 or angle / duration > self._max_speed:\n duration = abs(angle / self._max_speed)\n start = time.perf_counter()\n sequence_count = 0\n if angle > 0:\n plus_minus = 1\n else:\n plus_minus = -1\n # Times 2 because half-step\n steps = 2 * abs(int(float(angle) / 360 * self.STEPS_PER_REV))\n for i in range(steps):\n for pin in range(4):\n current_pin = self._pins[pin]\n if self.SEQUENCE[sequence_count][pin] != 0:\n GPIO.output(current_pin, True)\n else:\n GPIO.output(current_pin, False)\n sequence_count += plus_minus\n # If we reach the end of the sequence start again\n if sequence_count == self.rotation_seq_count:\n sequence_count = 0\n if sequence_count < 0:\n sequence_count = self.rotation_seq_count - 1\n # Wait to match entered duration\n wait = (float(i) / steps * duration) - (time.perf_counter() - start)\n if wait > 0:\n await asyncio.sleep(wait)\n for pin in self._pins:\n GPIO.output(pin, False)", "def rotate90(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert csv into numpy
def csv_2_numpy(file, path=INPUT_PATH, sep=',', type='int8'): file_path = path + file reader = csv.reader(open(file_path, "r"), delimiter=sep) x = list(reader) dataset = numpy.array(x).astype(type) return dataset
[ "def csv_to_numpy(path, skip_header=False):\n data = [r for r in iterate_csv(path, skip_header)]\n result = np.array(data, dtype=object)\n return result", "def csv_to_numpy_list(self, src):\n\t\t# with open(src, 'r') as input:\n\t\t\t# reader = csv.reader(input, delimiter=\",\")\n\t\tdat = pd.read_csv(src, delimiter=\",\")\n\t\tnp_list = np.array(dat.as_matrix())\n\t\treturn np_list\n\n\t\t# \tfor index, row in enumerate(reader):\n\t\t# \t\tprint(\"Reading index#\"+ str(index))\n\t\t# \t\trow = np.array(row)\n\n\t\t# \t\tif(index==0):\n\t\t# \t\t\tnp_list = row\n\t\t# \t\telse:\n\t\t# \t\t\tnp_list = np.append(np_list, row, axis=0)\n\n\t\t# \tprint(\"Finished Reading \"+str(src))\n\t\t# \treturn np_list", "def parse(csvfilename):\r\n with open(csvfilename, 'r') as f:\r\n reader = csv.reader(f, delimiter=';')\r\n #reader = csv.reader(f, delimiter=';', quotechar=\"'\")\r\n data = list(reader)\r\n # transform data into numpy array\r\n data = np.array(data).astype(float)\r\n return data", "def _csv_to_array(filename):\n return np.genfromtxt(\n filename, delimiter=',', skip_header=True\n )", "def file_to_numpy(filename):\n df = pd.read_csv(filename)\n return df.to_numpy()", "def from_csv(filename) -> np.array:\n df = pd.read_csv(r\"{}\".format(filename), header=None)\n out = df.to_numpy()\n return out", "def csvToArray(filename):\n (num_rows, num_cols) = xFileInfo(filename)\n X = numpy.zeros((num_rows, num_cols), dtype=float) #[row_i][col_i] : float\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n for (row_i, row) in enumerate(reader):\n col_i = 0\n for val in row:\n if val: #ignore empty strings (e.g. at end of row)\n X[row_i, col_i] = float(val)\n col_i += 1\n f.close()\n return X", "def readCSV(filename):\n df = pd.read_csv(filename)\n array = df.to_numpy()\n return array", "def csv_to_numpy(data):\n pixel_list = np.array(data.split(\" \"))\n pixel_list = pixel_list.astype(np.uint8).reshape(FACE_SIZE, FACE_SIZE)\n pixel_list = Image.fromarray(pixel_list).convert('RGB')\n pixel_list = np.array(pixel_list)[:, :, ::-1].copy()\n pixel_list = preprocess_image(pixel_list)\n return pixel_list", "def _csv_reader_numpy(self, mypath):\n onlyfiles = [f for f in listdir(mypath) if f.endswith(\".csv\")]\n all_data = None\n for index, file in enumerate(onlyfiles):\n data = np.loadtxt(file, delimiter=\",\", dtype=float)\n if index == 0:\n all_data = data\n else:\n all_data = np.vstack((all_data, data))\n return all_data", "def read_csv(path):\n rows = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file)\n header = reader.next()\n if header[0].isdigit():\n print \"Warning: Discarding header that looks like numbers.\"\n for line in reader:\n rows.append(map(float, line))\n return np.array(rows)", "def load_csv(fichero):\r\n data = np.loadtxt(fichero, delimiter=',')\r\n X = data[:,:-1]\r\n y = data[:,-1]\r\n return X, y", "def parse_features_to_numpy(path: str):\n\n # Reading the file\n file = open(path,\"r\")\n file = file.read()\n # Replace undesired elements\n file = file.replace('(', '').replace(')', '').replace(' ', '')\n # Split the string and create a numpy array with floats\n descriptor_array = np.array(file.split(',')).astype(float)\n \n return descriptor_array", "def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def csv_to_traindata(csv):\n new = pd.read_csv(csv)\n TRAIN_DATA = []\n for i, row in new.iterrows():\n item = []\n item.append(row[0])\n ents = ast.literal_eval(row[1])\n item.append(ents)\n TRAIN_DATA.append(item)\n return TRAIN_DATA", "def _read_array(filename, dtype, separator=','):\n cast = np.cast\n data = [[] for dummy in xrange(len(dtype))]\n fi = open(filename, 'r')\n # Skip the first line\n #fi.readline()\n for line in fi:\n fields = line.strip().split(separator)\n for i, number in enumerate(fields):\n data[i].append(float(number))\n for i in xrange(len(dtype)):\n data[i] = cast[dtype[i]](data[i])\n return np.rec.array(data, dtype=dtype)", "def read_data(path, filename, drop_col=\"index\", dt=\"float32\"):\n\tdata = pd.read_csv(path + filename, sep=\",\", dtype=dt)\n\tdata = data.drop(drop_col, axis=1)\n\treturn data.as_matrix()", "def line_to_data(line, np_array=True, dtype=int):\n if np_array:\n return np.fromstring(line, dtype=dtype, sep=\" \")\n else:\n return [dtype(x) for x in line.split(\" \")]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a vocabulary mapping from word to index based on the sentences. Returns vocabulary mapping and inverse vocabulary mapping.
def build_vocab(sentences): # Build vocabulary word_counts = Counter(itertools.chain(*sentences)) # 实际没用到 # Mapping from index to word vocabulary_inv = [x[0] for x in word_counts.most_common()] vocabulary_inv = list(sorted(vocabulary_inv)) # 加入 <UNK> vocabulary_inv.insert(0, '</s>') # Mapping from word to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return [vocabulary, vocabulary_inv]
[ "def build_vocab(self, sentences):\n\t\t# Build the vocab\n\t\tword_counts = collections.Counter(sentences)\n\n\t\t# Mapping from index to word (get the indices of most common words)\n\t\tvocab_inv = [x[0] for x in word_counts.most_common()] # Do we need this?\n\t\tvocab_inv = list(sorted(vocab_inv))\n\n\t\t# Mapping from word to index\n\n\t\tvocab = {x: i for i,x in enumerate(vocab_inv)}\n\n\t\treturn [vocab, vocab_inv]", "def build_vocab(sentences):\r\n # Build vocabulary\r\n word_counts = Counter(itertools.chain(*sentences))\r\n # Mapping from index to word\r\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\r\n # Mapping from word to index\r\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\r\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def build_vocab(self):\n # 根据是否指定词典路径来初始化词典,若指定,使用给定词典,未指定,根据语料生成\n # 词典中项表示字符编号,从0开始,{'我':3,'们':4,'不':5}\n if self.input_dict:\n self.dictionary = self.read_dictionary()\n else:\n counter = collections.Counter(self.words)\n count_pairs = sorted(counter.items(), key = lambda x : -x[1])\n words, _ = list(zip(*count_pairs))\n self.dictionary = dict(zip(words, range(1, len(words))))\n self.save_dictionary()", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def get_sentence_to_context_map(sentences):\n # Load the vocab\n en_vocab = get_english_vocab(DATA_DIR,VOCAB_SIZE)\n\n # Allocate the sentences to buckets\n bucketed = {}\n for sentence in sentences:\n bucket_id = get_bucket(en_vocab,sentence)\n bucketed.setdefault(bucket_id,[])\n bucketed[bucket_id].append(sentence)\n\n mapped = {}\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(sess, True, train_dir=TRAIN_DIR)\n model.batch_size = BATCH_SIZE # We decode 64 sentence at a time.\n # Iterate over each bucket\n for bucket_id,sentences in bucketed.iteritems():\n for batch in chunker(sentences,BATCH_SIZE):\n data = []\n # Tokenize each sentence\n for sentence in batch:\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)\n expected_output = []\n data.append((token_ids, expected_output))\n # Use the model to obtain contexts for each sentence in the batch\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: data}, bucket_id)\n contexts = model.step_context(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id)\n features = np.hstack(contexts)\n print 'Encoded {0} sentences into {1} dimensional vectors'.format(*features.shape)\n # Now we align sentences with their contexts\n for i,sentence in enumerate(batch):\n mapped[sentence] = features[i,:].tolist()\n return mapped", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def build_vocab(words):\n \n list_of_words = []\n for line in words:\n for word in line:\n if word not in list_of_words:\n list_of_words.append(word)\n list_of_words.sort()\n \n position_of_word_in_list = {}\n pos=0\n for word in list_of_words:\n position_of_word_in_list[word]=pos\n pos+=1\n \n return position_of_word_in_list", "def word2Id(self):\r\n \r\n self.word2IdVocabulary = {}\r\n self.id2WordVocabulary = {}\r\n self.freqOfWordsInCorpus = {}\r\n \r\n index = 0\r\n for doc in self.docsString:\r\n for word in doc:\r\n if word in self.freqOfWordsInCorpus:\r\n self.freqOfWordsInCorpus[word] += 1\r\n else:\r\n self.freqOfWordsInCorpus[word] = 1\r\n if word not in self.word2IdVocabulary:\r\n self.word2IdVocabulary[word] = index\r\n self.id2WordVocabulary[index] = word\r\n index += 1", "def _build_inverted_index_by_length(self, words):\n inverted_index_by_length = defaultdict(lambda: defaultdict(set))\n\n for word in words:\n length = len(word)\n for key in self._generate_segments(word):\n inverted_index_by_length[length][key].add(word)\n\n return inverted_index_by_length", "def _build_vocab(self, text):\n # print('begin: \\n', text, 'end.\\n')\n # print(type(text))\n vocab = {}\n for indexs in text.index:\n\n entry = text.loc[indexs]\n if not isinstance(entry, str):\n # self.cnt -= 1\n continue\n # print(indexs, type(entry), entry)\n for word in entry.split():\n try:\n vocab[word] += 1\n except:\n vocab[word] = 1\n\n filtered_vocab = {}\n for key in vocab.keys():\n if vocab[key] >= 500:\n filtered_vocab[key] = vocab[key]\n\n self.words_cnt = len(filtered_vocab)\n\n with open(r'Data\\vocab.txt', 'w', encoding='utf-8') as f:\n i = 0\n for key in sorted(filtered_vocab.keys()):\n f.write(\"{0},{1},{2}\\n\".format(i, key, filtered_vocab[key]))\n i += 1\n # print(vocab)", "def _word_to_form_indices(cls, words, vocab, word_to_forms):\n word_to_form_indices = {}\n for word in words:\n if word not in word_to_form_indices:\n forms = word_to_forms(word)\n indices = [vocab.word2index(form) for form in forms if form in vocab] # avoid including UNK\n word_to_form_indices[word] = indices\n return word_to_form_indices", "def _build_vocabulary(words, vocabulary_size):\n\n # create dictionary with the most common heroes\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n\n for word, _ in count:\n dictionary[word] = len(dictionary)\n\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n # the word is unknown\n index = 0\n unk_count = unk_count + 1\n data.append(index)\n\n count[0][1] = unk_count\n\n # save the dictionary's reversed version for later usage\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reverse_dictionary", "def build_inverted_index():\r\n # vacabulary list (with out common_words)\r\n file_read = read_file()\r\n vacabulary_list = []\r\n common_words = read_common_words()\r\n for key in file_read:\r\n for element in file_read[key]:\r\n if (element not in vacabulary_list) & (element not in common_words):\r\n vacabulary_list.append(element)\r\n\r\n # word list of each file\r\n content = remove_common_words(file_read, common_words) # content = stopping()\r\n\r\n # generate direction to save result\r\n inverted_index = {}\r\n for item in vacabulary_list:\r\n inverted_index[item] = {}\r\n\r\n for file_id in content.keys():\r\n frequency = Counter(\r\n content[file_id]) # the frequency of words in a file : {'slipstream': 5, 'lift': 4, 'wing': 3}\r\n for word in frequency.keys():\r\n inverted_index[word][file_id] = frequency[word]\r\n\r\n inverted_index = sorted(inverted_index.items(), key=lambda d: d[0], reverse=False)\r\n inverted_index = dict(inverted_index)\r\n return inverted_index", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def convert_to_index(sentences):\n\n\twords=[]\n\tfor idx, sentence in enumerate(sentences):\n\t\tfor word, label, sid, book, bert in sentence:\n\t\t\twords.append([book, sid, word, label])\n\n\treturn words", "def sentence_to_indices(sentence, word_dict):\n return [word_dict.to_index(word) for word in word_tokenize(sentence)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Estimate the true signal mean and interpolate bad channels. This function implements the functionality of the `performReference` function as part of the PREP pipeline on mne raw object. Notes This function calls robust_reference first Currently this function only implements the functionality of default settings, i.e., doRobustPost
def perform_reference(self): # Phase 1: Estimate the true signal mean with robust referencing self.robust_reference() if self.noisy_channels["bad_all"]: self.raw.info["bads"] = self.noisy_channels["bad_all"] self.raw.interpolate_bads() self.reference_signal = ( np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6 ) rereferenced_index = [ self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels ] self.EEG = self.remove_reference( self.EEG, self.reference_signal, rereferenced_index ) # Phase 2: Find the bad channels and interpolate self.raw._data = self.EEG * 1e-6 noisy_detector = NoisyChannels(self.raw) noisy_detector.find_all_bads(ransac=self.ransac) # Record Noisy channels and EEG before interpolation self.bad_before_interpolation = noisy_detector.get_bads(verbose=True) self.EEG_before_interpolation = self.EEG.copy() bad_channels = _union(self.bad_before_interpolation, self.unusable_channels) self.raw.info["bads"] = bad_channels self.raw.interpolate_bads() reference_correct = ( np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6 ) self.EEG = self.raw.get_data() * 1e6 self.EEG = self.remove_reference( self.EEG, reference_correct, rereferenced_index ) # reference signal after interpolation self.reference_signal_new = self.reference_signal + reference_correct # MNE Raw object after interpolation self.raw._data = self.EEG * 1e-6 # Still noisy channels after interpolation self.interpolated_channels = bad_channels noisy_detector = NoisyChannels(self.raw) noisy_detector.find_all_bads(ransac=self.ransac) self.still_noisy_channels = noisy_detector.get_bads() self.raw.info["bads"] = self.still_noisy_channels return self
[ "def robust_reference(raw, reference_out, montage_kind='standard_1020'):\n raw.rename_channels(lambda s: s.strip(\".\"))\n ch_names = raw.info['ch_names']\n\n # Warn if evaluation and reference channels are not the same for robust\n if not set(reference_out['ref_chs']) == set(reference_out['eval_chs']):\n logger.warning('robustReference: Reference channels and'\n 'evaluation channels should be same for robust reference')\n\n # raw._data = detrend(raw.get_data())\n\n # Determine unusable channels and remove them from the reference channels\n signal_noisy = bad_channels_detector(raw)\n signal_noisy.find_noisy_channels()\n noisy_channels = {'bad_by_nan': signal_noisy.bad_by_nan,\n 'bad_by_flat': signal_noisy.bad_by_flat,\n 'bad_by_deviation': signal_noisy.bad_by_deviation,\n 'bad_by_hf_noise': signal_noisy.bad_by_hf_noise,\n 'bad_by_correlation': signal_noisy.bad_by_correlation,\n 'bad_by_dropout': signal_noisy.bad_by_dropout,\n 'bad_by_ransac': signal_noisy.bad_by_ransac,\n 'bad_all': signal_noisy.get_bads()}\n logger.info('Bad channels: {}'.format(noisy_channels))\n\n unusable_channels = list(set(signal_noisy.bad_by_nan + signal_noisy.bad_by_flat))\n reference_channels = set_diff(reference_out['ref_chs'], unusable_channels)\n\n # Get initial estimate of the mean by the specified method\n signal = raw.get_data()\n ref_initial = np.median(raw.get_data(picks=reference_channels), axis=0)\n unusable_index = [ch_names.index(ch) for ch in unusable_channels]\n signal_tmp = remove_reference(signal, ref_initial, unusable_index)\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n montage = mne.channels.read_montage(kind=montage_kind, ch_names=raw_tmp.ch_names)\n raw_tmp.set_montage(montage)\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp\n signal_noisy = bad_channels_detector(raw_tmp)\n signal_noisy.find_noisy_channels()\n noisy_channels['bad_by_nan'] = union(noisy_channels['bad_by_nan'], signal_noisy.bad_by_nan)\n noisy_channels['bad_by_flat'] = union(noisy_channels['bad_by_flat'], signal_noisy.bad_by_flat)\n noisy_channels['bad_by_deviation'] = union(noisy_channels['bad_by_deviation'], signal_noisy.bad_by_deviation)\n noisy_channels['bad_by_hf_noise'] = union(noisy_channels['bad_by_hf_noise'], signal_noisy.bad_by_hf_noise)\n noisy_channels['bad_by_correlation'] = union(noisy_channels['bad_by_correlation'],\n signal_noisy.bad_by_correlation)\n noisy_channels['bad_by_dropout'] = union(noisy_channels['bad_by_dropout'], signal_noisy.bad_by_dropout)\n noisy_channels['bad_by_ransac'] = union(noisy_channels['bad_by_ransac'], signal_noisy.bad_by_ransac)\n noisy_channels['bad_all'] = union(noisy_channels['bad_all'], signal_noisy.get_bads())\n logger.info('Bad channels: {}'.format(noisy_channels))\n\n if iterations > 1 and (not noisy_channels['bad_all'] or\n set(noisy_channels['bad_all']) == set(noisy_channels_old)) or \\\n iterations > max_iteration_num:\n break\n noisy_channels_old = noisy_channels['bad_all'].copy()\n\n if raw_tmp.info['nchan']-len(noisy_channels['bad_all']) < 2:\n logger.error('robustReference:TooManyBad '\n 'Could not perform a robust reference -- not enough good channels')\n\n if noisy_channels['bad_all']:\n raw_tmp._data = signal\n raw_tmp.info['bads'] = noisy_channels['bad_all']\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data()\n else:\n signal_tmp = signal\n reference_signal = np.nanmean(raw_tmp.get_data(picks=reference_channels), axis=0)\n signal_tmp = remove_reference(signal, reference_signal, unusable_index)\n iterations = iterations + 1\n logger.info('Iterations: {}'.format(iterations))\n\n logger.info('Robust reference done')\n return noisy_channels", "def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal", "def compensate(manual=False, num_imgs=3, led_power=0.1, sample_ref_img=None, laser_ref_img=None, ref_coords=None):\r\n # Never touch these again while compensating.\r\n insert_DM()\r\n block_sc()\r\n time.sleep(0.1)\r\n #global sample ref image to be deilluminated\r\n deillum_sample_ref_img = sample_ref_img.astype(float) / illumination\r\n deillum_sample_ref_img = equalize_histogram_and_8bit(deillum_sample_ref_img)\r\n \r\n # Take laser reference image\r\n LED_power(0)\r\n unblock_laser()\r\n curr_laser_img = equalize_histogram_and_8bit(sp.ndimage.median_filter(get_pco_image(num_imgs), 3))\r\n # Take current sample image (with laser spot)\r\n LED_power(led_power)\r\n curr_img_8bit = take_deilluminated_image(num_imgs=num_imgs)\r\n \r\n # Compensate.\r\n if manual:\r\n _manual_compensate(num_imgs=num_imgs, led_power=led_power,\r\n sample_img=curr_img_8bit, laser_img=curr_laser_img,\r\n sample_ref_img=deillum_sample_ref_img, laser_ref_img=laser_ref_img, ref_coords=ref_coords)\r\n else:\r\n good = _auto_compensate(num_imgs=num_imgs, led_power=led_power,\r\n sample_img=curr_img_8bit, laser_img=curr_laser_img,\r\n sample_ref_img=deillum_sample_ref_img, laser_ref_img=laser_ref_img, ref_coords=ref_coords)\r\n if not good:\r\n _manual_compensate(num_imgs=num_imgs, led_power=led_power,\r\n sample_img=curr_img_8bit, laser_img=curr_laser_img,\r\n sample_ref_img=deillum_sample_ref_img, laser_ref_img=laser_ref_img, ref_coords=ref_coords)\r\n return True", "def calibrate_image(rgb_image, ref_means, ref_stds):\n calibrated_img = rgb_image.copy().astype('float32')\n for i in range(3):\n calibrated_img[:,:,i] = calibrated_img[:,:,i]-np.mean(calibrated_img[:,:,i])\n calibrated_img[:,:,i] = calibrated_img[:,:,i]/np.std(calibrated_img[:,:,i])\n calibrated_img[:,:,i] = calibrated_img[:,:,i]*ref_stds[i] + ref_means[i]\n calibrated_img[:,:,i] = np.clip(calibrated_img[:,:,i],0,255)\n return calibrated_img.astype('uint8')", "def recall(test, reference):\n\n assert type(test) == np.ndarray, \"Test type: {}\".format(type(test))\n assert type(reference) == np.ndarray,\\\n \"Reference type: {}\".format(type(reference))\n assert test.shape == reference.shape, \"Shapes {} and {}\".format(\n test.shape, reference.shape)\n if not (np.any(test) and np.any(reference)):\n return 0.\n\n return np.sum((test != 0)*(reference != 0)) /\\\n np.sum(reference != 0, dtype=np.float32)", "def calibrate(raw_data, white_reference, dark_reference):\n # Auto-increment device\n params.device += 1\n\n # Collect the number of wavelengths present\n num_bands = len(white_reference.wavelength_dict)\n den = white_reference.array_data - dark_reference.array_data\n\n # Calibrate using reflectance = (raw data - dark reference) / (white reference - dark reference)\n output_num = []\n for i in range(0, raw_data.lines):\n ans = raw_data.array_data[i,].astype(np.float16) - dark_reference.array_data\n output_num.append(ans)\n num = np.stack(output_num, axis=2)\n output_calibrated = []\n for i in range(0, raw_data.lines):\n ans1 = raw_data.array_data[i,] / den\n output_calibrated.append(ans1)\n\n # Reshape into hyperspectral datacube\n scalibrated = np.stack(output_calibrated, axis=2)\n calibrated_array = np.transpose(scalibrated[0], (1, 0, 2))\n calibrated_array[np.where(calibrated_array < 0)] = 0\n\n # Find array min and max values\n max_pixel = float(np.amax(calibrated_array))\n min_pixel = float(np.amin(calibrated_array))\n\n # Make a new class instance with the calibrated hyperspectral image\n calibrated = Spectral_data(array_data=calibrated_array, max_wavelength=raw_data.max_wavelength,\n min_wavelength=raw_data.min_wavelength, max_value=max_pixel, min_value=min_pixel,\n d_type=raw_data.d_type,\n wavelength_dict=raw_data.wavelength_dict, samples=raw_data.samples,\n lines=raw_data.lines, interleave=raw_data.interleave,\n wavelength_units=raw_data.wavelength_units, array_type=raw_data.array_type,\n pseudo_rgb=None, filename=None, default_bands=raw_data.default_bands)\n\n # Make pseudo-rgb image for the calibrated image\n calibrated.pseudo_rgb = _make_pseudo_rgb(spectral_array=calibrated)\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(calibrated.pseudo_rgb)\n elif params.debug == \"print\":\n print_image(calibrated.pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_calibrated_rgb.png\"))\n\n return calibrated", "def run(self, sensorRef, exposure):\n filterName = exposure.getFilter().getName()\n if filterName != 'y':\n # No correction to be made\n return\n if sensorRef.dataId[\"ccd\"] in range(104, 112):\n # No correction data: assume it's zero\n return\n\n # The LEDs that are causing the Y straylight have not been covered yet (on 2017-11-27),\n # but they will be covered in the near future.\n # Once the LEDs are covered, we will have to uncomment the following statement:\n #\n # if (ccdExposure is newer than a certain date):\n # return\n\n header = sensorRef.get('raw_md')\n if self.config.doRotatorAngleCorrection:\n angleStart, angleEnd = inrStartEnd(header)\n self.log.debug(\n \"(INR-STR, INR-END) = ({:g}, {:g}) (FITS header says ({:g}, {:g})).\".format(\n angleStart, angleEnd, header.getDouble('INR-STR'), header.getDouble('INR-END'))\n )\n else:\n angleStart = header.getDouble('INR-STR')\n angleEnd = None\n\n self.log.info(\"Correcting y-band background\")\n filename = sensorRef.get(\"yBackground_filename\")[0]\n model = get_ybackground(filename, angleStart, None if angleStart == angleEnd else angleEnd)\n\n # Some regions don't have useful model values because the amplifier is dead when the darks were taken\n # \n badAmps = {9: [0, 1, 2, 3], 33: [0, 1], 43: [0]} # Known bad amplifiers in the data: {ccdId: [ampId]}\n detId = exposure.getDetector().getId()\n if detId in badAmps:\n isBad = numpy.zeros_like(model, dtype=bool)\n for ii in badAmps[detId]:\n amp = exposure.getDetector()[ii]\n box = amp.getBBox()\n isBad[box.getBeginY():box.getEndY(), box.getBeginX():box.getEndX()] = True\n mask = exposure.getMaskedImage().getMask()\n if numpy.all(isBad):\n model[:] = 0.0\n else:\n model[isBad] = numpy.median(model[~isBad])\n mask.array[isBad] |= mask.getPlaneBitMask(\"SUSPECT\")\n\n model *= exposure.getInfo().getVisitInfo().getExposureTime()\n exposure.image.array -= model", "def _fitgeometry_refband(ellipsefit, geometry0, majoraxis, refband='r', verbose=False,\n integrmode='median', sclip=3, nclip=2):\n smamax = majoraxis # inner, outer radius\n #smamax = 1.5*majoraxis\n smamin = ellipsefit['psfsize_{}'.format(refband)] / ellipsefit['refpixscale']\n\n if smamin > majoraxis:\n print('Warning! this galaxy is smaller than three times the seeing FWHM!')\n \n t0 = time.time()\n print('Finding the mean geometry using the reference {}-band image...'.format(refband), end='')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n factor = np.arange(1.0, 6, 0.5) # (1, 2, 3, 3.5, 4, 4.5, 5, 10)\n for ii, fac in enumerate(factor): # try a few different starting sma0\n sma0 = smamin*fac\n try:\n iso0 = ellipse0.fit_image(sma0, integrmode=integrmode, sclip=sclip, nclip=nclip)\n except:\n iso0 = []\n sma0 = smamin\n if len(iso0) > 0:\n break\n print('...took {:.3f} sec'.format(time.time()-t0))\n\n if len(iso0) == 0:\n print('Initial ellipse-fitting failed.')\n else:\n # Try to determine the mean fitted geometry, for diagnostic purposes,\n # masking out outliers and the inner part of the galaxy where seeing\n # dominates.\n good = (iso0.sma > smamin) * (iso0.stop_code <= 4)\n #good = ~sigma_clip(iso0.pa, sigma=3).mask\n #good = (iso0.sma > smamin) * (iso0.stop_code <= 4) * ~sigma_clip(iso0.pa, sigma=3).mask\n #good = (iso0.sma > 3 * ellipsefit['psfsigma_{}'.format(refband)]) * ~sigma_clip(iso0.pa, sigma=3).mask\n #good = (iso0.stop_code < 4) * ~sigma_clip(iso0.pa, sigma=3).mask\n\n ngood = np.sum(good)\n if ngood == 0:\n print('Too few good measurements to get ellipse geometry!')\n else:\n ellipsefit['success'] = True\n ellipsefit['init_smamin'] = iso0.sma[good].min()\n ellipsefit['init_smamax'] = iso0.sma[good].max()\n\n ellipsefit['x0_median'] = np.mean(iso0.x0[good])\n ellipsefit['y0_median'] = np.mean(iso0.y0[good])\n ellipsefit['x0_err'] = np.std(iso0.x0[good]) / np.sqrt(ngood)\n ellipsefit['y0_err'] = np.std(iso0.y0[good]) / np.sqrt(ngood)\n\n ellipsefit['pa_moment'] = (np.degrees(np.mean(iso0.pa[good]))+90) % 180\n ellipsefit['pa_moment_err'] = np.degrees(np.std(iso0.pa[good])) / np.sqrt(ngood)\n ellipsefit['eps_moment'] = np.mean(iso0.eps[good])\n ellipsefit['eps_moment_err'] = np.std(iso0.eps[good]) / np.sqrt(ngood)\n\n if verbose:\n print(' x0 = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['x0_median'], ellipsefit['x0_err'], ellipsefit['x0_moment']))\n print(' y0 = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['y0_median'], ellipsefit['y0_err'], ellipsefit['y0_moment']))\n print(' PA = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['pa_moment'], ellipsefit['pa_moment_err'], np.degrees(geometry0.pa)+90))\n print(' eps = {:.3f}+/-{:.3f} (initial={:.3f})'.format(\n ellipsefit['eps_moment'], ellipsefit['eps_moment_err'], geometry0.eps))\n\n return ellipsefit", "def compute_reference_spectrum(self):\n self.reference_spectrum = np.mean(self.raw_spec[self.refs, :],axis=0)", "def refere(eeg, channels, mode='contralateral'):\n\tbipolar_map = {'Fp1':'Fp2', 'Fp2':'Fp2', 'F3':'F4', 'F4':'F4', 'C3':'C4', 'C4':'C4', 'T3':'T4', 'T4':'T4', 'P3':'P4', 'P4':'P4', 'O1':'O2', 'O2':'O2'}\n\tif mode not in ['monopolar', 'contralateral', 'bipolar', 'linked', 'average']:\n\t\tprint 'WARNING - refere(): parameter \"mode\" can only be \"monopolar\", \"contralateral\", \"bipolar\" or \"linked\". Using \"contralateral\"!'\n\t\tmode = 'contralateral'\n\tif mode == 'linked':\t\t\n\t\treference = (eeg[:,channels.index('A1')] + eeg[:,channels.index('A2')])/2.\n\tif mode == 'average':\n\t\treference = np.zeros(len(eeg), dtype=np.float32)\n\t\tchcounter = 0\n\t\tfor channel in range(len(channels)):\n\t\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t\treference += eeg[:, channel]\n\t\t\t\tchcounter += 1\n\t\treference /= chcounter\n\tfor channel in range(len(channels)):\n\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t# mindenkit referalunk kiveve magukat a referencia csatornakat\n\t\t\tif mode == 'contralateral':\n\t\t\t\tif (channels[channel] in ['Fp2', 'F4', 'C4', 'T4', 'P4', 'O2']):\n\t\t\t\t\tref_channel = channels.index('A1')\n\t\t\t\telif (channels[channel] in ['Fp1', 'F3', 'C3', 'T3', 'P3', 'O1']):\n\t\t\t\t\tref_channel = channels.index('A2')\n\t\t\t\telse:\n\t\t\t\t\tprint \"Error: what kind of channel is this: \", channels[channel], \" cannot reference!!!!\"\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\tif mode == 'bipolar':\n\t\t\t\tref_channel = channels.index(bipolar_map[channels[channel]])\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\teeg[:, channel] -= reference", "def reref_data(self, data):\n if self._ref_channels is not None or self._channels_to_ref is not None:\n if self._ref_channels is None: # Re-reference to global average.\n self._ref_channels = [range(data.shape[1])]\n if self._channels_to_ref is None: # Re-reference all channels.\n self._channels_to_ref = [range(data.shape[1])]\n d = np.copy(data) # create copy to avoid using re-referenced data\n for ref, chans in zip(self._ref_channels, self._channels_to_ref):\n data[:, list(chans)] -= np.mean(d[:, list(ref)], axis=1, keepdims=True)\n return data", "def applyNormalization(ds, reference, target=-1):\n print 'normalization of', ds.title\n # Store reference name for later\n refname = str(reference)\n # Normalization\n reference = getattr(ds,reference)\n\n # check if reference/target is a number\n # TODO: gumpy doesn't allow us to handle a scalar with variance\n # for multiplying arrays, so we can't propagate variance at present\n numericReference = isinstance(reference, (int, long, float))\n \n # check arguments\n if not numericReference:\n if reference.ndim != 1:\n raise AttributeError('reference.ndim != 1')\n if reference.shape[0] != ds.shape[0]:\n raise AttributeError('reference.shape[0] != ds.shape[0] (%d != %d)' % (reference.shape[0],ds.shape[0]))\n\n def do_norm(rs, f, varf):\n # We propagate errors in the data, but not in\n # the ancillary values\n print 'In do_norm, given %f(%f)' % (f,varf)\n # Funny syntax below to make sure we write into the original area,\n # not assign a new value\n rs.var *= f * f\n rs.var += varf * rs * rs\n rs.storage *= f\n try: #These may be absent in some cases\n rs.bm1_counts *= f\n rs.bm2_counts *= f\n rs.bm3_counts *= f\n rs.detector_time *= f\n rs.total_counts *= f\n except AttributeError:\n pass\n \n # normalization\n rs = ds.__copy__()\n copy_metadata_deep(rs,ds) #NeXuS metadata\n rs.copy_cif_metadata(ds) #CIF metadata\n if numericReference and target > 0:\n # We have a single number to refer to for normalisation, so\n # we are effectively scaling everything by a single number\n scale_factor = float(target)/reference\n variance = scale_factor * target/(reference*reference)\n do_norm(rs, scale_factor, variance)\n info_string = \"Data multiplied by %f with variance %f\" % (scale_factor,variance)\n elif not numericReference:\n # Each step has a different value, and we manually perform the\n # error propagation \n reference = Data(reference)\n if target <= 0:\n target = reference.max()\n for i in xrange(rs.shape[0]):\n # handle unexpected zero values\n one_reference = reference[i]\n if one_reference == 0:\n one_reference = 0.1 #so small it is like zero\n print \"Warning: zero monitor counts found at step %d\" % i\n f = float(target)/one_reference\n v = f*target/(one_reference*one_reference)\n # Funny syntax below to make sure we write into the original area,\n # not assign a new value\n tar_shape = [1,rs.shape[1],rs.shape[2]]\n tar_origin = [i,0,0]\n rss = rs.storage.get_section(tar_origin,tar_shape).get_reduced()\n rsv = rs.var.get_section(tar_origin,tar_shape).get_reduced()\n rs.var[i] = rsv*f * f\n rs.var[i] += v * rss * rss\n rs.storage[i] = rs.storage[i]*f\n info_string = \"Data normalised to %f on %s with error propagation assuming counting statistics\" % (float(target),refname)\n else:\n # interesting note - if we get here, we are passed a single reference number\n # and a negative target, meaning that we use the reference as the target and\n # end up multiplying by 1.0, so no need to do anything at all.\n target = reference\n info_string = \"No normalisation applied to data.\"\n rs.add_metadata('_pd_proc_info_data_reduction',info_string, append=True)\n print 'normalized:', ds.title\n return rs,target", "def compute_reference_spectra(self):\n self.reference_spectra = np.zeros((self.nrefs, self.nchan))\n for iref in range(self.nrefs):\n the_refs = range(self.ref_ranges[iref][0], \n self.ref_ranges[iref][1] + 1)\n self.reference_spectra[iref, :] = np.mean(\n self.raw_spec[the_refs, :], axis=0)", "def _guess_scalar_gain_from_vis(Nant,vis_over_mod,vis_over_mod_weight,refant_i,antenna_i,antenna_j):\n\n\n # Make a more substantitive guess from baselines to refant\n \n # TBD: Handle flagged baselines to refant below!\n\n # Nominal guess is all ones\n g0=np.ones(Nant,dtype=complex)\n gwt=np.zeros(Nant,dtype=float)\n\n # masks to select baselines to refant\n not_acs=antenna_i!=antenna_j\n mask_j=np.logical_and(antenna_j==refant_i,not_acs) # selects baselines i-refant\n mask_i=np.logical_and(antenna_i==refant_i,not_acs) # selects baselines refant-j\n mask_ij=np.logical_or(mask_i,mask_j) # all refant baselines\n\n ants_i_to_refant=antenna_i[mask_j]\n ants_j_to_refant=antenna_j[mask_i]\n ants_to_refant=np.hstack((ants_i_to_refant,ants_j_to_refant)) # all ants except refant\n\n # NB: In vis (power) units to start\n g0[ants_i_to_refant]=vis_over_mod[mask_j]\n g0[ants_j_to_refant]=np.conj(vis_over_mod[mask_i])\n gwt[ants_to_refant]=vis_over_mod_weight[mask_ij]\n\n if np.sum(gwt) > 0.0:\n # set refant to mean amp of others\n A=np.average(np.absolute(g0),0,gwt)\n g0[refant_i]=complex(A)\n\n # Divide all by sqrt(A) to convert all to gain (~voltage) units\n g0/=np.sqrt(A)\n else:\n # refant is no good, so just use 1,0\n g0[:]=complex(1.0)\n \n # Return the result\n return g0", "def compute_reference(self):\n print \"compute reference\"\n self.ref_micro_model_parameters = self.micro_model_parameters\n self.ref_micro_state = self.evolution_operator(self,self.ref_tmax,reference=True)\n self.ref_macro_state = self.restriction_operator(self,self.ref_micro_state)\n \n return self.ref_macro_state", "def updateRefImage(self):\n for i in range(self.tSize):\n if np.sum(self.img_array[i,:]) != 0:\n self.ref_array[i,:] = self.refSource\n \n \"\"\"Re-calculate the corrector space image as well\"\"\"\n self.kick_array[i,:] = self.locateKicks(self.img_array[i,:])", "def bipolar_reference(raw, dist_thresh=0.01, verbose=True):\n raw.load_data()\n ch_names = [name.replace(' ', '') for name in raw.ch_names] # no spaces\n bipolar_names = list()\n locs = list()\n data = list()\n for i, ch in enumerate(ch_names):\n elec_name = ''.join([letter for letter in ch if\n not letter.isdigit()]).rstrip()\n number = ''.join([letter for letter in ch if\n letter.isdigit()]).rstrip()\n pair = f'{elec_name}{int(number) + 1}'\n if pair not in ch_names:\n continue\n j = ch_names.index(pair)\n loc = raw.info['chs'][i]['loc'][:3]\n loc2 = raw.info['chs'][j]['loc'][:3]\n if np.linalg.norm(loc - loc2) > dist_thresh:\n continue\n data.append(raw._data[i] - raw._data[j])\n locs.append((loc + loc2) / 2)\n bipolar_names.append(f'{ch}-{pair}')\n if verbose:\n print(f'Bipolar referencing {ch} and {pair}')\n bipolar_info = mne.create_info(bipolar_names, raw.info['sfreq'], 'seeg')\n for loc, ch in zip(locs, bipolar_info['chs']):\n ch['loc'][:3] = loc\n return mne.io.RawArray(np.array(data), bipolar_info, raw.first_samp)", "def _uncross_reference_optimization(self) -> None:\n for unused_key, deqatn in self.dequations.items():\n deqatn.uncross_reference()\n for unused_key, dresp in self.dresps.items():\n dresp.uncross_reference()\n for unused_key, dconstrs in self.dconstrs.items():\n for dconstr in dconstrs:\n dconstr.uncross_reference()\n\n for unused_key, dvcrel in self.dvcrels.items():\n dvcrel.uncross_reference()\n for unused_key, dvmrel in self.dvmrels.items():\n dvmrel.uncross_reference()\n for unused_key, dvprel in self.dvprels.items():\n dvprel.uncross_reference()\n for unused_key, desvar in self.desvars.items():\n desvar.uncross_reference()\n for unused_key, desvar in self.topvar.items():\n desvar.uncross_reference()", "def correctQE(self):\n\t\tif self.lambd.max()-self.lambd.min() > 3000 or n.mean(self.lambd)<7300 or n.mean(self.lambd)>8300 :\n\t\t\tprint( \"cannot QE correct\" )\n\n\t\txravg = 8900\n\t\tyravg = 150\n\t\tcorrectionavg = self.survey.paramsEndr[0] + self.survey.paramsEndr[1] * self.lambd\n\t\tself.xavg = (self.lambd - xravg)/yravg \n\t\tok1 = (self.xavg > 0) & ( self.xavg < 1)\n\t\tself.cor2avg = correctionavg*self.xavg + 1*(1-self.xavg)\n\t\tok2=(ok1)&(self.cor2avg>1)\n\t\tself.cor2avg[(ok2==False)] = n.ones_like(self.cor2avg[(ok2==False)])\n\n\t\t#npixel=len(self.lambd)\n\t\tself.left=(self.lambd<=self.lambdSwitch) # n.arange(4096)\n\t\tself.right=(self.lambd>self.lambdSwitch) # n.arange(4096,4096*2,1)\n\n\t\t#xx_b=self.lambd[self.left]\n\t\t#xx_r=self.lambd[self.right]\n\n\t\t#corr_b = params[num,0] + params[num,1]*self.lambd[self.left] + params[num,2]*self.lambd[self.left]**2\n\t\t#corr_r = params[num+4,0] + params[num+4,1]*self.lambd[self.right] + params[num+4,2]*self.lambd[self.right]**2\n\t\tcorr_b = 1./( self.survey.params.T[self.chipNO][0] + self.survey.params.T[self.chipNO][1] * self.lambd[self.left] + self.survey.params.T[self.chipNO][2]*self.lambd[self.left]**2 )\n\t\tcorr_r = 1./( self.survey.params.T[self.chipNO+4][0] + self.survey.params.T[self.chipNO+4][1]* self.lambd[self.right] + self.survey.params.T[self.chipNO+4][2] *self.lambd[self.right]**2 )\n\t\t# print( corr_b, corr_r, self.cor2avg)\n\t\t# print) \"spectrum\",self.spec)\n\n\t\tself.specTMP=n.zeros_like(self.spec)\n\t\tself.specErrTMP=n.zeros_like(self.specErr)\n\t\tself.ivarTMP=n.zeros_like(self.ivar)\n\n\t\tself.specTMP[self.left]=self.spec[self.left]*corr_b\n\t\tself.specTMP[self.right]=self.spec[self.right]*corr_r* self.cor2avg[self.right]\n\n\t\tself.specErrTMP[self.left]=self.specErr[self.left]*corr_b\n\t\tself.specErrTMP[self.right]=self.specErr[self.right]*corr_r* self.cor2avg[self.right]\n\n\t\tself.ivarTMP[self.left]=self.ivar[self.left]/(corr_b*corr_b)\n\t\tself.ivarTMP[self.right]=self.ivar[self.right]/(corr_r*corr_r* self.cor2avg[self.right]*self.cor2avg[self.right] )\n\n\t\tself.specTMP=self.specTMP/self.survey.throughput.y[self.pixSampled]\n\t\tself.specErrTMP=self.specErrTMP/self.survey.throughput.y[self.pixSampled]\n\t\tself.ivarTMP=self.ivarTMP*self.survey.throughput.y[self.pixSampled]**2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detect bad channels and estimate the robust reference signal. This function implements the functionality of the `robustReference` function as part of the PREP pipeline on mne raw object.
def robust_reference(self): raw = self.raw.copy() raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq) # Determine unusable channels and remove them from the reference channels noisy_detector = NoisyChannels(raw, do_detrend=False) noisy_detector.find_all_bads(ransac=self.ransac) self.noisy_channels_original = { "bad_by_nan": noisy_detector.bad_by_nan, "bad_by_flat": noisy_detector.bad_by_flat, "bad_by_deviation": noisy_detector.bad_by_deviation, "bad_by_hf_noise": noisy_detector.bad_by_hf_noise, "bad_by_correlation": noisy_detector.bad_by_correlation, "bad_by_ransac": noisy_detector.bad_by_ransac, "bad_all": noisy_detector.get_bads(), } self.noisy_channels = self.noisy_channels_original.copy() logger.info("Bad channels: {}".format(self.noisy_channels)) self.unusable_channels = _union( noisy_detector.bad_by_nan, noisy_detector.bad_by_flat ) # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR) self.reference_channels = _set_diff( self.reference_channels, self.unusable_channels ) # Get initial estimate of the reference by the specified method signal = raw.get_data() * 1e6 self.reference_signal = ( np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6 ) reference_index = [ self.ch_names_eeg.index(ch) for ch in self.reference_channels ] signal_tmp = self.remove_reference( signal, self.reference_signal, reference_index ) # Remove reference from signal, iteratively interpolating bad channels raw_tmp = raw.copy() iterations = 0 noisy_channels_old = [] max_iteration_num = 4 while True: raw_tmp._data = signal_tmp * 1e-6 noisy_detector = NoisyChannels(raw_tmp) noisy_detector.find_all_bads(ransac=self.ransac) self.noisy_channels["bad_by_nan"] = _union( self.noisy_channels["bad_by_nan"], noisy_detector.bad_by_nan ) self.noisy_channels["bad_by_flat"] = _union( self.noisy_channels["bad_by_flat"], noisy_detector.bad_by_flat ) self.noisy_channels["bad_by_deviation"] = _union( self.noisy_channels["bad_by_deviation"], noisy_detector.bad_by_deviation ) self.noisy_channels["bad_by_hf_noise"] = _union( self.noisy_channels["bad_by_hf_noise"], noisy_detector.bad_by_hf_noise ) self.noisy_channels["bad_by_correlation"] = _union( self.noisy_channels["bad_by_correlation"], noisy_detector.bad_by_correlation, ) self.noisy_channels["bad_by_ransac"] = _union( self.noisy_channels["bad_by_ransac"], noisy_detector.bad_by_ransac ) self.noisy_channels["bad_all"] = _union( self.noisy_channels["bad_all"], noisy_detector.get_bads() ) logger.info("Bad channels: {}".format(self.noisy_channels)) if ( iterations > 1 and ( not self.noisy_channels["bad_all"] or set(self.noisy_channels["bad_all"]) == set(noisy_channels_old) ) or iterations > max_iteration_num ): break noisy_channels_old = self.noisy_channels["bad_all"].copy() if raw_tmp.info["nchan"] - len(self.noisy_channels["bad_all"]) < 2: raise ValueError( "RobustReference:TooManyBad " "Could not perform a robust reference -- not enough good channels" ) if self.noisy_channels["bad_all"]: raw_tmp._data = signal * 1e-6 raw_tmp.info["bads"] = self.noisy_channels["bad_all"] raw_tmp.interpolate_bads() signal_tmp = raw_tmp.get_data() * 1e6 else: signal_tmp = signal self.reference_signal = ( np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0) * 1e6 ) signal_tmp = self.remove_reference( signal, self.reference_signal, reference_index ) iterations = iterations + 1 logger.info("Iterations: {}".format(iterations)) logger.info("Robust reference done") return self.noisy_channels, self.reference_signal
[ "def robust_reference(raw, reference_out, montage_kind='standard_1020'):\n raw.rename_channels(lambda s: s.strip(\".\"))\n ch_names = raw.info['ch_names']\n\n # Warn if evaluation and reference channels are not the same for robust\n if not set(reference_out['ref_chs']) == set(reference_out['eval_chs']):\n logger.warning('robustReference: Reference channels and'\n 'evaluation channels should be same for robust reference')\n\n # raw._data = detrend(raw.get_data())\n\n # Determine unusable channels and remove them from the reference channels\n signal_noisy = bad_channels_detector(raw)\n signal_noisy.find_noisy_channels()\n noisy_channels = {'bad_by_nan': signal_noisy.bad_by_nan,\n 'bad_by_flat': signal_noisy.bad_by_flat,\n 'bad_by_deviation': signal_noisy.bad_by_deviation,\n 'bad_by_hf_noise': signal_noisy.bad_by_hf_noise,\n 'bad_by_correlation': signal_noisy.bad_by_correlation,\n 'bad_by_dropout': signal_noisy.bad_by_dropout,\n 'bad_by_ransac': signal_noisy.bad_by_ransac,\n 'bad_all': signal_noisy.get_bads()}\n logger.info('Bad channels: {}'.format(noisy_channels))\n\n unusable_channels = list(set(signal_noisy.bad_by_nan + signal_noisy.bad_by_flat))\n reference_channels = set_diff(reference_out['ref_chs'], unusable_channels)\n\n # Get initial estimate of the mean by the specified method\n signal = raw.get_data()\n ref_initial = np.median(raw.get_data(picks=reference_channels), axis=0)\n unusable_index = [ch_names.index(ch) for ch in unusable_channels]\n signal_tmp = remove_reference(signal, ref_initial, unusable_index)\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n montage = mne.channels.read_montage(kind=montage_kind, ch_names=raw_tmp.ch_names)\n raw_tmp.set_montage(montage)\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp\n signal_noisy = bad_channels_detector(raw_tmp)\n signal_noisy.find_noisy_channels()\n noisy_channels['bad_by_nan'] = union(noisy_channels['bad_by_nan'], signal_noisy.bad_by_nan)\n noisy_channels['bad_by_flat'] = union(noisy_channels['bad_by_flat'], signal_noisy.bad_by_flat)\n noisy_channels['bad_by_deviation'] = union(noisy_channels['bad_by_deviation'], signal_noisy.bad_by_deviation)\n noisy_channels['bad_by_hf_noise'] = union(noisy_channels['bad_by_hf_noise'], signal_noisy.bad_by_hf_noise)\n noisy_channels['bad_by_correlation'] = union(noisy_channels['bad_by_correlation'],\n signal_noisy.bad_by_correlation)\n noisy_channels['bad_by_dropout'] = union(noisy_channels['bad_by_dropout'], signal_noisy.bad_by_dropout)\n noisy_channels['bad_by_ransac'] = union(noisy_channels['bad_by_ransac'], signal_noisy.bad_by_ransac)\n noisy_channels['bad_all'] = union(noisy_channels['bad_all'], signal_noisy.get_bads())\n logger.info('Bad channels: {}'.format(noisy_channels))\n\n if iterations > 1 and (not noisy_channels['bad_all'] or\n set(noisy_channels['bad_all']) == set(noisy_channels_old)) or \\\n iterations > max_iteration_num:\n break\n noisy_channels_old = noisy_channels['bad_all'].copy()\n\n if raw_tmp.info['nchan']-len(noisy_channels['bad_all']) < 2:\n logger.error('robustReference:TooManyBad '\n 'Could not perform a robust reference -- not enough good channels')\n\n if noisy_channels['bad_all']:\n raw_tmp._data = signal\n raw_tmp.info['bads'] = noisy_channels['bad_all']\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data()\n else:\n signal_tmp = signal\n reference_signal = np.nanmean(raw_tmp.get_data(picks=reference_channels), axis=0)\n signal_tmp = remove_reference(signal, reference_signal, unusable_index)\n iterations = iterations + 1\n logger.info('Iterations: {}'.format(iterations))\n\n logger.info('Robust reference done')\n return noisy_channels", "def perform_reference(self):\n # Phase 1: Estimate the true signal mean with robust referencing\n self.robust_reference()\n if self.noisy_channels[\"bad_all\"]:\n self.raw.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n self.raw.interpolate_bads()\n self.reference_signal = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n rereferenced_index = [\n self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels\n ]\n self.EEG = self.remove_reference(\n self.EEG, self.reference_signal, rereferenced_index\n )\n\n # Phase 2: Find the bad channels and interpolate\n self.raw._data = self.EEG * 1e-6\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n\n # Record Noisy channels and EEG before interpolation\n self.bad_before_interpolation = noisy_detector.get_bads(verbose=True)\n self.EEG_before_interpolation = self.EEG.copy()\n\n bad_channels = _union(self.bad_before_interpolation, self.unusable_channels)\n self.raw.info[\"bads\"] = bad_channels\n self.raw.interpolate_bads()\n reference_correct = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n self.EEG = self.raw.get_data() * 1e6\n self.EEG = self.remove_reference(\n self.EEG, reference_correct, rereferenced_index\n )\n # reference signal after interpolation\n self.reference_signal_new = self.reference_signal + reference_correct\n # MNE Raw object after interpolation\n self.raw._data = self.EEG * 1e-6\n\n # Still noisy channels after interpolation\n self.interpolated_channels = bad_channels\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.still_noisy_channels = noisy_detector.get_bads()\n self.raw.info[\"bads\"] = self.still_noisy_channels\n return self", "def bipolar_reference(raw, dist_thresh=0.01, verbose=True):\n raw.load_data()\n ch_names = [name.replace(' ', '') for name in raw.ch_names] # no spaces\n bipolar_names = list()\n locs = list()\n data = list()\n for i, ch in enumerate(ch_names):\n elec_name = ''.join([letter for letter in ch if\n not letter.isdigit()]).rstrip()\n number = ''.join([letter for letter in ch if\n letter.isdigit()]).rstrip()\n pair = f'{elec_name}{int(number) + 1}'\n if pair not in ch_names:\n continue\n j = ch_names.index(pair)\n loc = raw.info['chs'][i]['loc'][:3]\n loc2 = raw.info['chs'][j]['loc'][:3]\n if np.linalg.norm(loc - loc2) > dist_thresh:\n continue\n data.append(raw._data[i] - raw._data[j])\n locs.append((loc + loc2) / 2)\n bipolar_names.append(f'{ch}-{pair}')\n if verbose:\n print(f'Bipolar referencing {ch} and {pair}')\n bipolar_info = mne.create_info(bipolar_names, raw.info['sfreq'], 'seeg')\n for loc, ch in zip(locs, bipolar_info['chs']):\n ch['loc'][:3] = loc\n return mne.io.RawArray(np.array(data), bipolar_info, raw.first_samp)", "def refere(eeg, channels, mode='contralateral'):\n\tbipolar_map = {'Fp1':'Fp2', 'Fp2':'Fp2', 'F3':'F4', 'F4':'F4', 'C3':'C4', 'C4':'C4', 'T3':'T4', 'T4':'T4', 'P3':'P4', 'P4':'P4', 'O1':'O2', 'O2':'O2'}\n\tif mode not in ['monopolar', 'contralateral', 'bipolar', 'linked', 'average']:\n\t\tprint 'WARNING - refere(): parameter \"mode\" can only be \"monopolar\", \"contralateral\", \"bipolar\" or \"linked\". Using \"contralateral\"!'\n\t\tmode = 'contralateral'\n\tif mode == 'linked':\t\t\n\t\treference = (eeg[:,channels.index('A1')] + eeg[:,channels.index('A2')])/2.\n\tif mode == 'average':\n\t\treference = np.zeros(len(eeg), dtype=np.float32)\n\t\tchcounter = 0\n\t\tfor channel in range(len(channels)):\n\t\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t\treference += eeg[:, channel]\n\t\t\t\tchcounter += 1\n\t\treference /= chcounter\n\tfor channel in range(len(channels)):\n\t\tif (channels[channel] in EEG_CHANNELS):\n\t\t\t# mindenkit referalunk kiveve magukat a referencia csatornakat\n\t\t\tif mode == 'contralateral':\n\t\t\t\tif (channels[channel] in ['Fp2', 'F4', 'C4', 'T4', 'P4', 'O2']):\n\t\t\t\t\tref_channel = channels.index('A1')\n\t\t\t\telif (channels[channel] in ['Fp1', 'F3', 'C3', 'T3', 'P3', 'O1']):\n\t\t\t\t\tref_channel = channels.index('A2')\n\t\t\t\telse:\n\t\t\t\t\tprint \"Error: what kind of channel is this: \", channels[channel], \" cannot reference!!!!\"\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\tif mode == 'bipolar':\n\t\t\t\tref_channel = channels.index(bipolar_map[channels[channel]])\n\t\t\t\treference = eeg[:, ref_channel]\n\t\t\t\tprint \"channel \", channels[channel], \" referenced to \", channels[ref_channel]\n\t\t\teeg[:, channel] -= reference", "def test_valid_snr_get_reference_spectrum():\n ref_band = \"J\"\n wav_ref, flux_ref = snrnorm.get_reference_spectrum(\"M0-K-1.0-100k\", ref_band=ref_band)\n band_min, band_max = utils.band_limits(ref_band)\n\n # Test the wavelength is in the refernce band wavelength range\n assert np.all(wav_ref <= band_max)\n assert np.all(wav_ref >= band_min)\n # test properties of output\n assert len(wav_ref) == len(flux_ref)\n assert isinstance(wav_ref, np.ndarray)\n assert isinstance(flux_ref, np.ndarray)", "def fixed_pattern_correction(image, black_reference):\n corrected_image = image - black_reference\n # correctedImage[correctedImage < 0] = 0\n # This has been removed because I was improperly enforcing a cutoff value for noise.\n # Per Winfried, negative values are acceptable in background corrected images\n return corrected_image", "def detect_bad_channels_ibl(\n raw,\n fs,\n psd_hf_threshold,\n dead_channel_thr=-0.5,\n noisy_channel_thr=1.0,\n outside_channel_thr=-0.75,\n n_neighbors=11,\n nyquist_threshold=0.8,\n welch_window_ms=0.3,\n):\n _, nc = raw.shape\n raw = raw - np.mean(raw, axis=0)[np.newaxis, :]\n nperseg = int(welch_window_ms * fs / 1000)\n import scipy.signal\n\n fscale, psd = scipy.signal.welch(raw, fs=fs, axis=0, window=\"hann\", nperseg=nperseg)\n\n # compute similarities\n ref = np.median(raw, axis=1)\n xcorr = np.sum(raw * ref[:, np.newaxis], axis=0) / np.sum(ref**2)\n\n # compute coherence\n xcorr_neighbors = detrend(xcorr, n_neighbors)\n xcorr_distant = xcorr - detrend(xcorr, n_neighbors) - 1\n\n # make recommendation\n psd_hf = np.mean(psd[fscale > (fs / 2 * nyquist_threshold), :], axis=0)\n\n ichannels = np.zeros(nc, dtype=int)\n idead = np.where(xcorr_neighbors < dead_channel_thr)[0]\n inoisy = np.where(np.logical_or(psd_hf > psd_hf_threshold, xcorr_neighbors > noisy_channel_thr))[0]\n\n ichannels[idead] = 1\n ichannels[inoisy] = 2\n\n # the channels outside of the brains are the contiguous channels below the threshold on the trend coherency\n # the chanels outide need to be at either extremes of the probe\n ioutside = np.where(xcorr_distant < outside_channel_thr)[0]\n if ioutside.size > 0 and (ioutside[-1] == (nc - 1) or ioutside[0] == 0):\n a = np.cumsum(np.r_[0, np.diff(ioutside) - 1])\n ioutside = ioutside[a == np.max(a)]\n ichannels[ioutside] = 3\n\n return ichannels", "def run(self, sensorRef, exposure):\n filterName = exposure.getFilter().getName()\n if filterName != 'y':\n # No correction to be made\n return\n if sensorRef.dataId[\"ccd\"] in range(104, 112):\n # No correction data: assume it's zero\n return\n\n # The LEDs that are causing the Y straylight have not been covered yet (on 2017-11-27),\n # but they will be covered in the near future.\n # Once the LEDs are covered, we will have to uncomment the following statement:\n #\n # if (ccdExposure is newer than a certain date):\n # return\n\n header = sensorRef.get('raw_md')\n if self.config.doRotatorAngleCorrection:\n angleStart, angleEnd = inrStartEnd(header)\n self.log.debug(\n \"(INR-STR, INR-END) = ({:g}, {:g}) (FITS header says ({:g}, {:g})).\".format(\n angleStart, angleEnd, header.getDouble('INR-STR'), header.getDouble('INR-END'))\n )\n else:\n angleStart = header.getDouble('INR-STR')\n angleEnd = None\n\n self.log.info(\"Correcting y-band background\")\n filename = sensorRef.get(\"yBackground_filename\")[0]\n model = get_ybackground(filename, angleStart, None if angleStart == angleEnd else angleEnd)\n\n # Some regions don't have useful model values because the amplifier is dead when the darks were taken\n # \n badAmps = {9: [0, 1, 2, 3], 33: [0, 1], 43: [0]} # Known bad amplifiers in the data: {ccdId: [ampId]}\n detId = exposure.getDetector().getId()\n if detId in badAmps:\n isBad = numpy.zeros_like(model, dtype=bool)\n for ii in badAmps[detId]:\n amp = exposure.getDetector()[ii]\n box = amp.getBBox()\n isBad[box.getBeginY():box.getEndY(), box.getBeginX():box.getEndX()] = True\n mask = exposure.getMaskedImage().getMask()\n if numpy.all(isBad):\n model[:] = 0.0\n else:\n model[isBad] = numpy.median(model[~isBad])\n mask.array[isBad] |= mask.getPlaneBitMask(\"SUSPECT\")\n\n model *= exposure.getInfo().getVisitInfo().getExposureTime()\n exposure.image.array -= model", "def calibrate(raw_data, white_reference, dark_reference):\n # Auto-increment device\n params.device += 1\n\n # Collect the number of wavelengths present\n num_bands = len(white_reference.wavelength_dict)\n den = white_reference.array_data - dark_reference.array_data\n\n # Calibrate using reflectance = (raw data - dark reference) / (white reference - dark reference)\n output_num = []\n for i in range(0, raw_data.lines):\n ans = raw_data.array_data[i,].astype(np.float16) - dark_reference.array_data\n output_num.append(ans)\n num = np.stack(output_num, axis=2)\n output_calibrated = []\n for i in range(0, raw_data.lines):\n ans1 = raw_data.array_data[i,] / den\n output_calibrated.append(ans1)\n\n # Reshape into hyperspectral datacube\n scalibrated = np.stack(output_calibrated, axis=2)\n calibrated_array = np.transpose(scalibrated[0], (1, 0, 2))\n calibrated_array[np.where(calibrated_array < 0)] = 0\n\n # Find array min and max values\n max_pixel = float(np.amax(calibrated_array))\n min_pixel = float(np.amin(calibrated_array))\n\n # Make a new class instance with the calibrated hyperspectral image\n calibrated = Spectral_data(array_data=calibrated_array, max_wavelength=raw_data.max_wavelength,\n min_wavelength=raw_data.min_wavelength, max_value=max_pixel, min_value=min_pixel,\n d_type=raw_data.d_type,\n wavelength_dict=raw_data.wavelength_dict, samples=raw_data.samples,\n lines=raw_data.lines, interleave=raw_data.interleave,\n wavelength_units=raw_data.wavelength_units, array_type=raw_data.array_type,\n pseudo_rgb=None, filename=None, default_bands=raw_data.default_bands)\n\n # Make pseudo-rgb image for the calibrated image\n calibrated.pseudo_rgb = _make_pseudo_rgb(spectral_array=calibrated)\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(calibrated.pseudo_rgb)\n elif params.debug == \"print\":\n print_image(calibrated.pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_calibrated_rgb.png\"))\n\n return calibrated", "def calc_fidelity(inimg,refimg,pbimg='',psfimg='',fudge_factor=1.0,scale_factor=1.0,pb_thresh=0.25,clean_up=True,outfile=''):\n\n ia=iatool()\n\n ia.open(inimg)\n # average over the stokes axis to get it down to 3 axes which is what our other one has\n imvals=np.squeeze(ia.getchunk()) * scale_factor\n img_cs = ia.coordsys()\n # how to trim the freq axis--\n #img_shape = (ia.shape())[0:3]\n img_shape = ia.shape()\n ia.close()\n # get beam info\n hdr = imhead(imagename=inimg,mode='summary')\n bmaj_str = str(hdr['restoringbeam']['major']['value'] * fudge_factor)+hdr['restoringbeam']['major']['unit']\n bmin_str = str(hdr['restoringbeam']['minor']['value'] * fudge_factor)+hdr['restoringbeam']['minor']['unit']\n bpa_str = str(hdr['restoringbeam']['positionangle']['value'])+hdr['restoringbeam']['positionangle']['unit']\n\n # i should probably also be setting the beam * fudge_factor in the *header* of the input image\n\n if len(pbimg) > 0:\n ia.open(pbimg)\n pbvals=np.squeeze(ia.getchunk())\n pbvals /= np.max(pbvals)\n pbvals = np.where( pbvals < pb_thresh, 0.0, pbvals)\n #good_pb_ind=np.where( pbvals >= pb_thresh)\n #bad_pb_ind=np.where( pbvals < pb_thresh)\n #pbvals[good_pb_ind] = 1.0\n #if bad_pb_ind[0]:\n # pbvals[bad_pb_ind] = 0.0\n else:\n pbvals = imvals*0.0 + 1.0\n #good_pb_ind = np.where(pbvals)\n #bad_pb_ind = [np.array([])]\n\n ##\n\n ##############\n # open, smooth, and regrid reference image\n #\n\n smo_ref_img = refimg+'.TMP.smo'\n\n # if given a psf image, use that for the convolution. need to regrid onto input\n # model coordinate system first. this is mostly relevant for the single dish\n # if the beam isn't very gaussian (as is the case for alma sim tp)\n if len(psfimg) > 0:\n # consider testing and fixing the case the reference image isn't jy/pix\n ia.open(refimg)\n ref_cs=ia.coordsys()\n ref_shape=ia.shape()\n ia.close()\n ia.open(psfimg)\n psf_reg_im=ia.regrid(csys=ref_cs.torecord(),shape=ref_shape,outfile=psfimg+'.TMP.regrid',overwrite=True,axes=[0,1])\n psf_reg_im.done()\n ia.close()\n ia.open(refimg)\n # default of scale= -1.0 autoscales the PSF to have unit area, which preserves \"flux\" in units of the input map\n # scale=1.0 sets the PSF to have unit *peak*, which results in flux per beam in the output \n ref_convd_im=ia.convolve(outfile=smo_ref_img,kernel=psfimg+'.TMP.regrid',overwrite=True,scale=1.0)\n ref_convd_im.setbrightnessunit('Jy/beam')\n ref_convd_im.done()\n ia.close()\n if clean_up:\n rmtables(psfimg+'.TMP.regrid')\n else:\n # consider testing and fixing the case the reference image isn't jy/pix\n ia.open(refimg) \n im2=ia.convolve2d(outfile=smo_ref_img,axes=[0,1],major=bmaj_str,minor=bmin_str,pa=bpa_str,overwrite=True)\n im2.done()\n ia.close()\n\n smo_ref_img_regridded = smo_ref_img+'.TMP.regrid'\n ia.open(smo_ref_img)\n im2=ia.regrid(csys=img_cs.torecord(),shape=img_shape,outfile=smo_ref_img_regridded,overwrite=True,axes=[0,1])\n refvals=np.squeeze(im2.getchunk())\n im2.done()\n ia.close()\n\n ia.open(smo_ref_img_regridded)\n refvals=np.squeeze(ia.getchunk())\n ia.close()\n\n # set all pixels to zero where the PB is low - to avoid NaN's\n imvals = np.where(pbvals,imvals,0.0)\n refvals = np.where(pbvals,refvals,0.0)\n #if len(bad_pb_ind) > 0:\n #imvals[bad_pb_ind] = 0.0\n #refvals[bad_pb_ind] = 0.0\n\n deltas=(imvals-refvals).flatten()\n # put both image and model values in one array to calculate Beta for F_3- \n allvals = np.array( [np.abs(imvals.flatten()),np.abs(refvals.flatten())])\n # the max of (image_pix_i,model_pix_i), in one flat array of length nixels\n maxvals = allvals.max(axis=0)\n\n # carilli definition. rosero eq1\n f_eq1 = 1.0 - np.max(np.abs(deltas))/np.max(refvals)\n f_eq2 = 1.0 - (refvals.flatten() * np.abs(deltas)).sum() / (refvals * imvals).sum()\n f_eq2b = 1.0 - (refvals.flatten() * np.abs(deltas)).sum() / (refvals * refvals).sum()\n #f_eq3 = 1.0 - (maxvals[gi] * np.abs(deltas[gi])).sum() / (maxvals[gi] * maxvals[gi]).sum()\n f_eq3 = 1.0 - (pbvals.flatten() * maxvals * np.abs(deltas)).sum() / (pbvals.flatten() * maxvals * maxvals).sum()\n\n # if an output image was requested, and a pbimg was given; make one.\n if ((len(outfile)>0) & (len(pbimg)>0)):\n weightfile= 'mypbweight.TMP.im'\n rmtables(weightfile)\n immath(imagename=[pbimg],mode='evalexpr',expr='ceil(IM0/max(IM0) - '+str(pb_thresh)+')',outfile=weightfile)\n betafile = 'mybeta.TMP.im'\n rmtables(betafile)\n immath(imagename=[inimg,smo_ref_img_regridded],mode='evalexpr',expr='iif(abs(IM0) > abs(IM1),abs(IM0),abs(IM1))',outfile=betafile)\n # 19sep19 - change to the actual F_3 contrib ie put abs() back in\n rmtables(outfile)\n print(\" Writing fidelity error image: \"+outfile)\n immath(imagename=[inimg,smo_ref_img_regridded,weightfile,betafile],expr='IM3*IM2*abs(IM0-IM1)/sum(IM3*IM3*IM2)',outfile=outfile)\n # 19sep19 - add fractional error (rel to beta) to output\n rmtables(outfile+'.frac')\n print(\" Writing fractional error image: \"+outfile+'.frac')\n immath(imagename=[inimg,smo_ref_img_regridded,weightfile,betafile],expr='IM2*(IM0-IM1)/IM3',outfile=outfile+'.frac')\n if clean_up:\n rmtables(weightfile)\n rmtables(betafile)\n\n # pearson correlation coefficient evaluated above beta = 1% peak reference image\n gi=np.where( np.abs(maxvals) > 0.01 * np.abs(refvals.max()) )\n ii = imvals.flatten()\n mm = refvals.flatten()\n mm -= mm.min()\n # (x-mean(x)) * (y-mean(y)) / sigma_x / sigma_y\n cc = (ii[gi] - ii[gi].mean()) * (mm[gi] - mm[gi].mean()) / (np.std(ii[gi]) * np.std(mm[gi]))\n #cc = (ii[gi] - ii[gi].mean()) * (mm[gi] - mm[gi].mean()) / (np.std(mm[gi]))**2\n corco = cc.sum() / cc.shape[0]\n\n fa = np.abs(mm) / np.abs(mm - ii)\n fa_0p1 = np.median( fa[ (np.abs(ii) > 1e-3 * mm.max()) | (np.abs(mm) > 1e-3 * mm.max()) ])\n fa_1 = np.median( fa[ (np.abs(ii) > 1e-2 * mm.max()) | (np.abs(mm) > 1e-2 * mm.max()) ])\n fa_3 = np.median( fa[ (np.abs(ii) > 3e-2 * mm.max()) | (np.abs(mm) > 3e-2 * mm.max()) ])\n fa_10 = np.median( fa[ (np.abs(ii) > 1e-1 * mm.max()) | (np.abs(mm) > 1e-1 * mm.max()) ] )\n\n #gi2 = (np.abs(ii) > 1e-3 * mm.max()) | (np.abs(mm) > 1e-3 * mm.max()) \n\n print(\"*************************************\")\n print('image: ',inimg,'reference image:',refimg)\n print(\"Eq1 / Eq2 / Eq2b / Eq3 / corrCoeff \")\n print(f_eq1, f_eq2, f_eq2b, f_eq3,corco)\n print(' ALMA (A_0.1%, A_1%, A_3%, A_10%): ',fa_0p1,fa_1,fa_3,fa_10)\n print(\"*************************************\")\n\n fidelity_results = {'f1': f_eq1, 'f2': f_eq2, 'f2b': f_eq2b, 'f3': f_eq3, 'falma': [fa_0p1, fa_1, fa_3, fa_10]}\n\n if clean_up:\n rmtables(smo_ref_img)\n rmtables(smo_ref_img_regridded)\n\n return fidelity_results", "def ccm_unred(wave, flux, ebv, r_v=\"\"):\n import numpy as np\n wave = np.array(wave, float)\n flux = np.array(flux, float)\n \n if wave.size != flux.size: raise TypeError, 'ERROR - wave and flux vectors must be the same size'\n \n if not bool(r_v): r_v = 3.1\n \n x = 10000.0/wave\n npts = wave.size\n a = np.zeros(npts, float)\n b = np.zeros(npts, float)\n \n ###############################\n #Infrared\n \n good = np.where( (x > 0.3) & (x < 1.1) )\n a[good] = 0.574 * x[good]**(1.61)\n b[good] = -0.527 * x[good]**(1.61)\n \n ###############################\n # Optical & Near IR\n \n good = np.where( (x >= 1.1) & (x < 3.3) )\n y = x[good] - 1.82\n \n c1 = np.array([ 1.0 , 0.104, -0.609, 0.701, 1.137, \\\n -1.718, -0.827, 1.647, -0.505 ])\n c2 = np.array([ 0.0, 1.952, 2.908, -3.989, -7.985, \\\n 11.102, 5.491, -10.805, 3.347 ] )\n \n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n ###############################\n # Mid-UV\n \n good = np.where( (x >= 3.3) & (x < 8) )\n y = x[good]\n F_a = np.zeros(np.size(good),float)\n F_b = np.zeros(np.size(good),float)\n good1 = np.where( y > 5.9 )\n \n if np.size(good1) > 0:\n y1 = y[good1] - 5.9\n F_a[ good1] = -0.04473 * y1**2 - 0.009779 * y1**3\n F_b[ good1] = 0.2130 * y1**2 + 0.1207 * y1**3\n \n a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a\n b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b\n \n ###############################\n # Far-UV\n \n good = np.where( (x >= 8) & (x <= 11) )\n y = x[good] - 8.0\n c1 = [ -1.073, -0.628, 0.137, -0.070 ]\n c2 = [ 13.670, 4.257, -0.420, 0.374 ]\n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n # Applying Extinction Correction\n \n a_v = r_v * ebv\n a_lambda = a_v * (a + b/r_v)\n \n funred = flux * 10.0**(0.4*a_lambda) \n \n return funred", "def scoreCirc_CmosVoltageReference_2(circuit, gen, indi, MOEAMODE):\n \n if debug > 2:\n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi)\n #----------#\n VREF = 1.5\n #----------#\n\n #---------------------------------------------------------BigMatrix stuff, check short-circuits, matrix density, matrix identifier (obsolete) \n FullBigCircuitMatrix = copy(circuit.fullRedundancyMatrix)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #--------------------------------------------------------- \n \n score = np.array([0,0,0], dtype=\"float64\") if MOEAMODE == 1 else 0\n \n score += 2e4*np.exp(OcSc)\n results = None\n if OcSc > 1:\n score += 1e4*np.exp(OcSc)\n else:\n #----------------------------------------------------------Try to make netlist and evaluate the individual\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n #----------------------------------------------------------Start of results analysis and objectives creation\n disfCount = 0\n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n psrr = results['psrr']['nominal']\n# if np.isnan(np.array(psrr, dtype=float)):\n# disfCount = disfCount + 1\n# psr = 0\n# else:\n# psr = 1.0/psrr #abs(90 - psrr) if psrr < 90 else 0 #tole kot objective ni ok. ker je opravljena meritev samo pri vdd=15 je to precej stala.\n\n\n #----------------------------------------------------------Score function SINGLE-OBJECTIVE\n if MOEAMODE == 0:\n score =(vdd_s_t1 + 5*vdd_s_t1_d +\n\t 2*vdd_s_t2 + 2*vdd_s_t2_d +\n\t vdd_s_t3 + 5*vdd_s_t3_d +\n\t #vdd_s_r1 + 2*vdd_s_r1_d +\n\t #vdd_s_r2 + 2*vdd_s_r2_d + \n\t #vdd_s_r3 + 2*vdd_s_r3_d + \n\t (100*powe)\n )\n if disfCount > 0:\n\tscore = 0 + np.exp(disfCount) * 1e3\n\t\n #----------------------------------------------------------Score function MULTI-OBJECTIVE\t\n else: #MOEAMODE == 1:\n oMediana = vdd_s_t1 + vdd_s_t2 + vdd_s_t3\n oPsrr = vdd_s_t1_d + vdd_s_t2_d + vdd_s_t3_d\t#DC rejection\n #oPsrr = psr\n oP = powe\n\t\t\t\t\t #add constraints\n score = (np.array([oMediana, oPsrr, oP]) \t+ (oMediana if oMediana > 4 else 0) + \n\t\t\t\t\t\t#+ (oPsrr*1000 if oPsrr > 1.0/40 else 0) +\n\t\t\t\t\t\t+ (oPsrr if oPsrr > 3 else 0) +\n\t\t\t\t\t\t+ (oP if oP > 1e-1 else 0)\n )\n if disfCount > 0:\n\tscore = (np.array([0,0,0])+np.exp(disfCount) * 1e3) + random.randint(0, 200)\n\n #-------------------------------------------------------------------\n if debug > 2: \n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n \n \n # TRIGGER STOP SIGNAL if:\n if (vdd_s_t2 <= 0.001 and \n\tpsrr >= 80 and \n\tpowe <= 1e-5):\n globalVars.DONE = 1 # End evolution, feasible solution evolved.\n \n\n return score, results", "def calibrate_image(rgb_image, ref_means, ref_stds):\n calibrated_img = rgb_image.copy().astype('float32')\n for i in range(3):\n calibrated_img[:,:,i] = calibrated_img[:,:,i]-np.mean(calibrated_img[:,:,i])\n calibrated_img[:,:,i] = calibrated_img[:,:,i]/np.std(calibrated_img[:,:,i])\n calibrated_img[:,:,i] = calibrated_img[:,:,i]*ref_stds[i] + ref_means[i]\n calibrated_img[:,:,i] = np.clip(calibrated_img[:,:,i],0,255)\n return calibrated_img.astype('uint8')", "def recall(test, reference):\n\n assert type(test) == np.ndarray, \"Test type: {}\".format(type(test))\n assert type(reference) == np.ndarray,\\\n \"Reference type: {}\".format(type(reference))\n assert test.shape == reference.shape, \"Shapes {} and {}\".format(\n test.shape, reference.shape)\n if not (np.any(test) and np.any(reference)):\n return 0.\n\n return np.sum((test != 0)*(reference != 0)) /\\\n np.sum(reference != 0, dtype=np.float32)", "def compute_reference_spectrum(self):\n self.reference_spectrum = np.mean(self.raw_spec[self.refs, :],axis=0)", "def compensate(manual=False, num_imgs=3, led_power=0.1, sample_ref_img=None, laser_ref_img=None, ref_coords=None):\r\n # Never touch these again while compensating.\r\n insert_DM()\r\n block_sc()\r\n time.sleep(0.1)\r\n #global sample ref image to be deilluminated\r\n deillum_sample_ref_img = sample_ref_img.astype(float) / illumination\r\n deillum_sample_ref_img = equalize_histogram_and_8bit(deillum_sample_ref_img)\r\n \r\n # Take laser reference image\r\n LED_power(0)\r\n unblock_laser()\r\n curr_laser_img = equalize_histogram_and_8bit(sp.ndimage.median_filter(get_pco_image(num_imgs), 3))\r\n # Take current sample image (with laser spot)\r\n LED_power(led_power)\r\n curr_img_8bit = take_deilluminated_image(num_imgs=num_imgs)\r\n \r\n # Compensate.\r\n if manual:\r\n _manual_compensate(num_imgs=num_imgs, led_power=led_power,\r\n sample_img=curr_img_8bit, laser_img=curr_laser_img,\r\n sample_ref_img=deillum_sample_ref_img, laser_ref_img=laser_ref_img, ref_coords=ref_coords)\r\n else:\r\n good = _auto_compensate(num_imgs=num_imgs, led_power=led_power,\r\n sample_img=curr_img_8bit, laser_img=curr_laser_img,\r\n sample_ref_img=deillum_sample_ref_img, laser_ref_img=laser_ref_img, ref_coords=ref_coords)\r\n if not good:\r\n _manual_compensate(num_imgs=num_imgs, led_power=led_power,\r\n sample_img=curr_img_8bit, laser_img=curr_laser_img,\r\n sample_ref_img=deillum_sample_ref_img, laser_ref_img=laser_ref_img, ref_coords=ref_coords)\r\n return True", "def correctQE(self):\n\t\tif self.lambd.max()-self.lambd.min() > 3000 or n.mean(self.lambd)<7300 or n.mean(self.lambd)>8300 :\n\t\t\tprint( \"cannot QE correct\" )\n\n\t\txravg = 8900\n\t\tyravg = 150\n\t\tcorrectionavg = self.survey.paramsEndr[0] + self.survey.paramsEndr[1] * self.lambd\n\t\tself.xavg = (self.lambd - xravg)/yravg \n\t\tok1 = (self.xavg > 0) & ( self.xavg < 1)\n\t\tself.cor2avg = correctionavg*self.xavg + 1*(1-self.xavg)\n\t\tok2=(ok1)&(self.cor2avg>1)\n\t\tself.cor2avg[(ok2==False)] = n.ones_like(self.cor2avg[(ok2==False)])\n\n\t\t#npixel=len(self.lambd)\n\t\tself.left=(self.lambd<=self.lambdSwitch) # n.arange(4096)\n\t\tself.right=(self.lambd>self.lambdSwitch) # n.arange(4096,4096*2,1)\n\n\t\t#xx_b=self.lambd[self.left]\n\t\t#xx_r=self.lambd[self.right]\n\n\t\t#corr_b = params[num,0] + params[num,1]*self.lambd[self.left] + params[num,2]*self.lambd[self.left]**2\n\t\t#corr_r = params[num+4,0] + params[num+4,1]*self.lambd[self.right] + params[num+4,2]*self.lambd[self.right]**2\n\t\tcorr_b = 1./( self.survey.params.T[self.chipNO][0] + self.survey.params.T[self.chipNO][1] * self.lambd[self.left] + self.survey.params.T[self.chipNO][2]*self.lambd[self.left]**2 )\n\t\tcorr_r = 1./( self.survey.params.T[self.chipNO+4][0] + self.survey.params.T[self.chipNO+4][1]* self.lambd[self.right] + self.survey.params.T[self.chipNO+4][2] *self.lambd[self.right]**2 )\n\t\t# print( corr_b, corr_r, self.cor2avg)\n\t\t# print) \"spectrum\",self.spec)\n\n\t\tself.specTMP=n.zeros_like(self.spec)\n\t\tself.specErrTMP=n.zeros_like(self.specErr)\n\t\tself.ivarTMP=n.zeros_like(self.ivar)\n\n\t\tself.specTMP[self.left]=self.spec[self.left]*corr_b\n\t\tself.specTMP[self.right]=self.spec[self.right]*corr_r* self.cor2avg[self.right]\n\n\t\tself.specErrTMP[self.left]=self.specErr[self.left]*corr_b\n\t\tself.specErrTMP[self.right]=self.specErr[self.right]*corr_r* self.cor2avg[self.right]\n\n\t\tself.ivarTMP[self.left]=self.ivar[self.left]/(corr_b*corr_b)\n\t\tself.ivarTMP[self.right]=self.ivar[self.right]/(corr_r*corr_r* self.cor2avg[self.right]*self.cor2avg[self.right] )\n\n\t\tself.specTMP=self.specTMP/self.survey.throughput.y[self.pixSampled]\n\t\tself.specErrTMP=self.specErrTMP/self.survey.throughput.y[self.pixSampled]\n\t\tself.ivarTMP=self.ivarTMP*self.survey.throughput.y[self.pixSampled]**2", "def test_wrong_ref_power_mfcc():\n with raises(FeatureParamsError):\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")", "def correct_matched_filter_image(science, reference):\n\n science_kernel, reference_kernel = noise_kernels(science, reference)\n science_source_noise = source_noise(science, science_kernel)\n reference_source_noise = source_noise(reference, reference_kernel)\n science_registration_noise = registration_noise(science, science_kernel)\n reference_registration_noise = registration_noise(reference, reference_kernel)\n noise = science_source_noise + reference_source_noise + science_registration_noise + reference_registration_noise\n return noise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the reference signal from the original EEG signal. This function implements the functionality of the `removeReference` function as part of the PREP pipeline on mne raw object.
def remove_reference(signal, reference, index=None): if np.ndim(signal) != 2: raise ValueError( "RemoveReference: EEG signal must be 2D array (channels * times)" ) if np.ndim(reference) != 1: raise ValueError("RemoveReference: Reference signal must be 1D array") if np.shape(signal)[1] != np.shape(reference)[0]: raise ValueError( "RemoveReference: The second dimension of EEG signal must be " "the same with the length of reference signal" ) if index is None: signal_referenced = signal - reference else: if not isinstance(index, list): raise TypeError( "RemoveReference: Expected type list, got {} instead".format( type(index) ) ) signal_referenced = signal.copy() signal_referenced[np.asarray(index), :] = ( signal[np.asarray(index), :] - reference ) return signal_referenced
[ "def removeReference(self, reference: ghidra.program.model.symbol.Reference) -> None:\n ...", "def remove_reference(self):\n\n if hasattr(self, '_reference'):\n delattr(self, '_reference')", "def removeReferenceGlyph(self, *args):\n return _libsbml.GeneralGlyph_removeReferenceGlyph(self, *args)", "def removeOperandReference(self, opIndex: int, refAddr: ghidra.program.model.address.Address) -> None:\n ...", "def remove_refs(self):\n\n self.reference = None\n self.url = None", "def unsetReference(self):\n return _libsbml.Association_unsetReference(self)", "def perform_reference(self):\n # Phase 1: Estimate the true signal mean with robust referencing\n self.robust_reference()\n if self.noisy_channels[\"bad_all\"]:\n self.raw.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n self.raw.interpolate_bads()\n self.reference_signal = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n rereferenced_index = [\n self.ch_names_eeg.index(ch) for ch in self.rereferenced_channels\n ]\n self.EEG = self.remove_reference(\n self.EEG, self.reference_signal, rereferenced_index\n )\n\n # Phase 2: Find the bad channels and interpolate\n self.raw._data = self.EEG * 1e-6\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n\n # Record Noisy channels and EEG before interpolation\n self.bad_before_interpolation = noisy_detector.get_bads(verbose=True)\n self.EEG_before_interpolation = self.EEG.copy()\n\n bad_channels = _union(self.bad_before_interpolation, self.unusable_channels)\n self.raw.info[\"bads\"] = bad_channels\n self.raw.interpolate_bads()\n reference_correct = (\n np.nanmean(self.raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n self.EEG = self.raw.get_data() * 1e6\n self.EEG = self.remove_reference(\n self.EEG, reference_correct, rereferenced_index\n )\n # reference signal after interpolation\n self.reference_signal_new = self.reference_signal + reference_correct\n # MNE Raw object after interpolation\n self.raw._data = self.EEG * 1e-6\n\n # Still noisy channels after interpolation\n self.interpolated_channels = bad_channels\n noisy_detector = NoisyChannels(self.raw)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.still_noisy_channels = noisy_detector.get_bads()\n self.raw.info[\"bads\"] = self.still_noisy_channels\n return self", "def remove_contact_reference(self):\n self.reference_contact_datetime = None\n self.save()", "def drop_reference_points(self):\n self._cpp_obj.drop_reference_points()\n return self", "def robust_reference(self):\n raw = self.raw.copy()\n raw._data = removeTrend(raw.get_data(), sample_rate=self.sfreq)\n\n # Determine unusable channels and remove them from the reference channels\n noisy_detector = NoisyChannels(raw, do_detrend=False)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels_original = {\n \"bad_by_nan\": noisy_detector.bad_by_nan,\n \"bad_by_flat\": noisy_detector.bad_by_flat,\n \"bad_by_deviation\": noisy_detector.bad_by_deviation,\n \"bad_by_hf_noise\": noisy_detector.bad_by_hf_noise,\n \"bad_by_correlation\": noisy_detector.bad_by_correlation,\n \"bad_by_ransac\": noisy_detector.bad_by_ransac,\n \"bad_all\": noisy_detector.get_bads(),\n }\n self.noisy_channels = self.noisy_channels_original.copy()\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n self.unusable_channels = _union(\n noisy_detector.bad_by_nan, noisy_detector.bad_by_flat\n )\n # unusable_channels = _union(unusable_channels, noisy_detector.bad_by_SNR)\n self.reference_channels = _set_diff(\n self.reference_channels, self.unusable_channels\n )\n\n # Get initial estimate of the reference by the specified method\n signal = raw.get_data() * 1e6\n self.reference_signal = (\n np.nanmedian(raw.get_data(picks=self.reference_channels), axis=0) * 1e6\n )\n reference_index = [\n self.ch_names_eeg.index(ch) for ch in self.reference_channels\n ]\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n\n # Remove reference from signal, iteratively interpolating bad channels\n raw_tmp = raw.copy()\n\n iterations = 0\n noisy_channels_old = []\n max_iteration_num = 4\n\n while True:\n raw_tmp._data = signal_tmp * 1e-6\n noisy_detector = NoisyChannels(raw_tmp)\n noisy_detector.find_all_bads(ransac=self.ransac)\n self.noisy_channels[\"bad_by_nan\"] = _union(\n self.noisy_channels[\"bad_by_nan\"], noisy_detector.bad_by_nan\n )\n self.noisy_channels[\"bad_by_flat\"] = _union(\n self.noisy_channels[\"bad_by_flat\"], noisy_detector.bad_by_flat\n )\n self.noisy_channels[\"bad_by_deviation\"] = _union(\n self.noisy_channels[\"bad_by_deviation\"], noisy_detector.bad_by_deviation\n )\n self.noisy_channels[\"bad_by_hf_noise\"] = _union(\n self.noisy_channels[\"bad_by_hf_noise\"], noisy_detector.bad_by_hf_noise\n )\n self.noisy_channels[\"bad_by_correlation\"] = _union(\n self.noisy_channels[\"bad_by_correlation\"],\n noisy_detector.bad_by_correlation,\n )\n self.noisy_channels[\"bad_by_ransac\"] = _union(\n self.noisy_channels[\"bad_by_ransac\"], noisy_detector.bad_by_ransac\n )\n self.noisy_channels[\"bad_all\"] = _union(\n self.noisy_channels[\"bad_all\"], noisy_detector.get_bads()\n )\n logger.info(\"Bad channels: {}\".format(self.noisy_channels))\n\n if (\n iterations > 1\n and (\n not self.noisy_channels[\"bad_all\"]\n or set(self.noisy_channels[\"bad_all\"]) == set(noisy_channels_old)\n )\n or iterations > max_iteration_num\n ):\n break\n noisy_channels_old = self.noisy_channels[\"bad_all\"].copy()\n\n if raw_tmp.info[\"nchan\"] - len(self.noisy_channels[\"bad_all\"]) < 2:\n raise ValueError(\n \"RobustReference:TooManyBad \"\n \"Could not perform a robust reference -- not enough good channels\"\n )\n\n if self.noisy_channels[\"bad_all\"]:\n raw_tmp._data = signal * 1e-6\n raw_tmp.info[\"bads\"] = self.noisy_channels[\"bad_all\"]\n raw_tmp.interpolate_bads()\n signal_tmp = raw_tmp.get_data() * 1e6\n else:\n signal_tmp = signal\n self.reference_signal = (\n np.nanmean(raw_tmp.get_data(picks=self.reference_channels), axis=0)\n * 1e6\n )\n\n signal_tmp = self.remove_reference(\n signal, self.reference_signal, reference_index\n )\n iterations = iterations + 1\n logger.info(\"Iterations: {}\".format(iterations))\n\n logger.info(\"Robust reference done\")\n return self.noisy_channels, self.reference_signal", "def remove_rn(reference_node_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.remove_rn\")\n\n last_r = reference_node_name.rfind('R')\n rn_removed = reference_node_name[:last_r]\n\n flg.info(\"Converting {0} to {1}.\".format(reference_node_name, rn_removed))\n return rn_removed", "def deleteReferenceImage(self, name):\n blobName = self._getReferenceImageBlobName(name)\n self.productSearch.productClient.delete_reference_image(name=name)\n self.productSearch.bucket.blob(blobName).delete()", "def removeSpeciesReferenceGlyph(self, *args):\n return _libsbml.ReactionGlyph_removeSpeciesReferenceGlyph(self, *args)", "def remove_edge(e, R):\n\tR.remove_edge(e[0], e[1])\n\tupdate_after_mod(e,R)", "def remove_reference(self, dataset_id=None):\n if not dataset_id:\n raise aspecd.exceptions.MissingDatasetError\n for index, reference in enumerate(self.references):\n if dataset_id == reference.id:\n del self.references[index]\n break", "def removeMnemonicReference(self, refAddr: ghidra.program.model.address.Address) -> None:\n ...", "def unlink_obj(self, ref_frame, obj_name=None, delete=True):\n self.scene.remove_attached_object(ref_frame, obj_name)\n if delete:\n self.remove_obj(obj_name)", "def removeCompartmentReference(self, *args):\n return _libsbml.MultiCompartmentPlugin_removeCompartmentReference(self, *args)", "def delete_reference_array(self):\r\n del self.pxarray\r\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the buy list for the board
def setBuyList(self, buyList): parsedBuyList = [] for bought in buyList: if hasattr(bought, "unitType"): parsedBuyList.append(bought) elif isinstance(bought, dict) and u'unitType' in bought and u'territory' in bought: parsedBuyList.append(createBoughtUnitFromDict(bought, self.board.territories)) else: raise Exception("Invalid buy list", buyList) sumCost = self.costOfUnits(parsedBuyList) if sumCost <= self.board.currentCountry.money: self.board.buyList = parsedBuyList[:] # copy in buyList return True else: return False
[ "def buys(self, buys):\n\n self._buys = buys", "def set_buy_sell_deal_account(self, account_list):\n self.multiple_items_selection_from_kendo_dropdown(self.buy_sell_deal_account_dropdown_locator, account_list)\n self.wait_for_ajax_spinner_load()", "def update_buy_caps(self):\n for stock in self.stocks:\n core.configure_item(\n f\"stock.{stock.name}.owned\",\n max_value=self.player.get_owned_stocks(stock)\n + int(self.player.cash / stock.price),\n )", "def set_target_buy_list(self, item_name, is_first_item):\n if is_first_item is True:\n self.single_selection_from_static_kendo_dropdown(self.target_buy_list_kendo_dropdown_arrow_locator, first_item=True)\n else:\n self.single_selection_from_static_kendo_dropdown(self.target_buy_list_kendo_dropdown_arrow_locator, item_name)", "def set_stocklist(self, stocklist):\n self.stock_list = stocklist", "def set_board(board):", "def sells(self, sells):\n\n self._sells = sells", "def set_target_stocks_list(self, list_of_stocks):\n self.target_stocks = list_of_stocks", "def buy_loot(self, buy, cost):\n if type(buy) is list:\n buy = buy[0]\n buy = ' '.join([i.lower().capitalize() for i in buy.split()])\n self.loot['items'].append(buy)\n self.loot['gp'] -= cost", "def shop_buy_bike(self,model): \r\n\t\tself.inventory.append(model.model_name) # add model name to shop inventory[]\r\n\t\tindex = model.factory_name.factory_inv.index(model.model_name)\r\n\t\tmodel.factory_name.factory_inv.pop(index)", "def lay_nobles_on_board(self) -> None:\n self.nobles_on_board = set(self.deck.pop_many_nobles(NOBLES_ON_BOARD_INITIAL))", "def buyStockList(sdk, stockToBuyList, quotes, tradingNumber=None, budgetPercent=0.995):\n # pdb.set_trace()\n posList = getPositionList(sdk) # 调用PositionList(sdk)函数,函数返回值赋给posList\n if posList == None:\n return None\n positionBefore = getPositionDictDetail(sdk) # 调用PositionDictDetail(sdk)函数,函数返回值赋给positionBefore\n if positionBefore == None:\n return None\n\n orders = []\n if tradingNumber is None:\n accountInfo = sdk.getAccountInfo() # 资金账户查询(返回:帐户的可用资金、交易冻结、保证金占用、手续费等)\n if accountInfo == None:\n return None\n budget = budgetPercent * accountInfo.availableCash / (1+len(stockToBuyList)) # 将资金在买入股票列表中平均分配\n else:\n accountInfo = sdk.getAccountInfo()\n if accountInfo == None:\n return None\n if tradingNumber - len(posList) <= 0: # 如果限制买入的股票数量小持仓股票数量\n return {}\n budget = budgetPercent * accountInfo.availableCash / (tradingNumber - len(posList))\n # 将资金在剔除已有股票后在平均分配\n\n for stock in stockToBuyList:\n price = round(quotes[stock] * 100) / 100 # 使在四舍五入后保留两位小数\n volume = np.floor(budget / price / 100) * 100 # 交易量取小于对象的最近的整数\n if price > 0 and volume > 0:\n orders.append([stock, price, volume, 'BUY']) # 定义买入指令内容\n\n try:\n sdk.sdklog(orders) # 将交易指令记入日志\n sdk.makeOrders(orders)\n except:\n print 'makeOrders error!'\n return None\n\n positionAfter = getPositionDictDetail(sdk) # 更新详细持仓字典\n if positionAfter == None:\n return None\n ret = {}\n\n setBefore = set(positionBefore.keys()) # 前期持仓股票代码列表\n setAfter = set(positionAfter.keys()) # 更新后持仓股票代码列表\n\n if len(setAfter - setBefore) > 0: # 如果新买入了股票品种\n for stock in list(setAfter - setBefore):\n ret[stock] = positionAfter[stock][0] # 将新买入股票赋值给ret列表\n else:\n for stock in positionAfter.keys():\n if positionAfter[stock][0] > positionBefore[stock][0]: # 更新后股票品种持仓量大于原有该股票持仓量\n ret[stock] = positionAfter[stock][0] - positionBefore[stock][0] # 将原股票增仓量赋值给ret列表\n\n return ret", "def add_buy(self, trade):\n trade = self._format_sql(trade, self.buy_table)\n self.buys[trade['id']] = trade", "def buyTradedVal(self, buyTradedVal):\n\n self._buyTradedVal = buyTradedVal", "def doBuyIn(self):\n self.protocol.sendPacket(networkpackets.PacketPokerBuyIn(amount=self.max_buy_in, **self._serial_and_game_id))\n self.protocol.sendPacket(networkpackets.PacketPokerAutoBlindAnte(**self._serial_and_game_id))", "def trade_payables(self, trade_payables):\n self._trade_payables = trade_payables", "def buyProperty(self):\n \n for i in range(len(self._props)):\n if self._props[i][0] == self._players[self._current].getPosition():\n \n # Player loses money when buying\n self._players[self._current].loseMoney(self._props[i][2])\n self._players[self._current].addProperty(self._props[i][1])\n \n # Change property availability from False to True\n self._props[i][3] = True\n self.changeTurn()", "def buy_items(self, itemdata):\n with self.scopped_session(fail=self.fail) as session:\n # find rows that are still up for sale\n q = session.query(AuctionHouse).filter(\n AuctionHouse.seller != self.seller.seller,\n AuctionHouse.sell_date == 0,\n AuctionHouse.sale == 0,\n )\n # loop rows\n for row in q:\n with self.capture(fail=self.fail):\n # skip blacklisted rows\n if row.id not in self.blacklist:\n # get item data\n try:\n data = itemdata[row.itemid]\n except KeyError:\n self.error('item missing from database: %d', row.itemid)\n data = None\n\n if data is not None:\n # buy stacks\n if row.stack:\n # check permissions\n if data.buy12:\n # check price\n if row.price <= data.price12:\n date = timeutils.timestamp(datetime.datetime.now())\n self.buyer.buy_item(row, date, data.price12)\n else:\n self.info('price too high! itemid=%d %d <= %d',\n row.itemid, row.price, data.price12)\n self.add_to_blacklist(row.id)\n else:\n self.debug('not allowed to buy item! itemid=%d', row.itemid)\n self.add_to_blacklist(row.id)\n # buy singles\n else:\n # check permissions\n if data.buy01:\n # check price\n if row.price <= data.price01:\n date = timeutils.timestamp(datetime.datetime.now())\n self.buyer.buy_item(row, date, data.price01)\n else:\n self.info('price too high! itemid=%d %d <= %d',\n row.itemid, row.price, data.price01)\n self.add_to_blacklist(row.id)\n else:\n self.debug('not allowed to buy item! itemid=%d', row.itemid)\n self.add_to_blacklist(row.id)\n else:\n # item data missing\n self.add_to_blacklist(row.id)\n else:\n # row was blacklisted\n self.debug('skipping row %d', row.id)", "def buy_date(self, buy_date):\n\n self._buy_date = buy_date" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts json string in related object object_to_serialize have to be an instace of the desired to convert object
def DeserializeJson(self, json_string, object_to_serialize): object_to_serialize.__dict__ = json.loads(str(json_string)) return object_to_serialize
[ "def json_deserialize(json_object):\n raise NotImplementedError('json_deserialize must be overriden')", "def serialize(self, obj):\n return obj", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def _json_to_obj(cls, serialized_str):\n json_dict = json.loads(serialized_str)\n if 'metadata' in json_dict.keys():\n metadata_dict = json_dict['metadata']\n return Metadata(metadata_dict)", "def convert_for_json(obj):\n if isinstance(obj, datetime.datetime):\n return obj.__str__()\n return obj", "def object_to_json(self, obj):\n json_string = self.simplifier.to_json_object(obj, obj.base_cls_name)\n\n return json_string", "def to_json(obj):\n\n if not isinstance(obj, Serializable):\n raise TypeError(f\"{obj} is not JSON serializable\")\n\n return obj.json", "def to_python(self, value):\n # Composite types are serialized as JSON blobs. If BaseField.to_python\n # is called with a string, assume it was produced by value_to_string\n # and decode it\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except ValueError as exc:\n raise ValidationError(\n self.error_messages[\"bad_json\"],\n code=\"bad_json\",\n ) from exc\n\n return self.Meta.model(\n **{\n name: field.to_python(value.get(name))\n for name, field in self.Meta.fields\n }\n )\n\n return super().to_python(value)", "def deserialize(cls, json_str):\n return cls.deserialize_json(json.loads(json_str))", "def default(self, o):\r\n if hasattr(o, '__class__'): \r\n if hasattr(o.__class__, '__json__'): return o.__json__()\r\n \r\n raise TypeError(\"%r is not JSON serializable\" % (o,))", "def _json_convert(self, obj):\n string_types = (bytes, str)\n if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support\n return SON(((k, self._json_convert(v)) for k, v in obj.iteritems()))\n elif isinstance(obj, BaseDocument):\n return self._json_convert(transform(obj))\n elif hasattr(obj, '__iter__') and not isinstance(obj, string_types):\n return list((self._json_convert(v) for v in obj))\n try:\n return json_util.default(obj)\n except TypeError:\n return obj", "def _json_to_obj(cls, serialized_str):\n\n ret = None\n json_response = json.loads(serialized_str)\n\n # Creating a deep copy just in case later we want the original resp\n json_dict = copy.deepcopy(json_response)\n\n # Replacing attribute response names if they are Python reserved words\n # with a trailing underscore, for ex. id for id_ or if they have a\n # special character within the name replacing it for an underscore too\n json_dict = cls._replace_dict_key(\n json_dict, 'id', 'id_', recursion=True)\n\n if cls.NETWORK in json_dict:\n network_dict = json_dict.get(cls.NETWORK)\n ret = Network(**network_dict)\n return ret", "def _json_translation(obj):\r\n if type(obj) in ApiV1._json_valid_translation:\r\n return obj\r\n else:\r\n return str(obj)", "def json_datetime_serializer(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"{} is not JSON serializable.\".format(obj))", "def _json_to_obj(cls, serialized_str):\n\n ret = None\n json_response = json.loads(serialized_str)\n\n # Creating a deep copy just in case later we want the original resp\n json_dict = copy.deepcopy(json_response)\n\n # Replacing attribute response names if they are Python reserved words\n # with a trailing underscore, for ex. id for id_ or if they have a\n # special character within the name replacing it for an underscore too\n json_dict = cls._replace_dict_key(\n json_dict, 'id', 'id_', recursion=True)\n\n if cls.PORT in json_dict:\n subnet_dict = json_dict.get(cls.PORT)\n ret = Port(**subnet_dict)\n return ret", "def toJSON(cls, obj):\n return json.dumps(obj)", "def from_json_string(my_obj):\n\n return json.loads(my_obj)", "def convert_json_to_objects(json_string):\n\n pass", "def _object_to_json(obj):\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n return repr(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a DVR object
def DVR( domain=None, divs=None, classes=None, potential_function=None, g=None, g_deriv=None, scf=False, potential_optimize=False, **base_opts ): return DVRConstructor.construct( domain=domain, divs=divs, classes=classes, potential_function=potential_function, g=g, g_deriv=g_deriv, scf=scf, potential_optimize=potential_optimize, **base_opts )
[ "def __init__(self, dr_ds: DatasetReader) -> None:\n super().__init__()\n\n self.dr_ds = dr_ds\n try:\n self.cmap = dr_ds.colormap(1)\n except ValueError:\n pass\n\n crs = dr_ds.crs\n res = dr_ds.res[0]\n\n with WarpedVRT(dr_ds, crs=crs) as dr:\n minx, miny, maxx, maxy = dr.bounds\n\n mint: float = 0\n maxt: float = sys.maxsize\n\n coords = (minx, maxx, miny, maxy, mint, maxt)\n self.index.insert(0, coords, 'dr')\n\n self._crs = cast(CRS, crs)\n self.res = cast(float, res)", "def from_rd(cls, r: 'RayDifferential'):\n\t\tself = cls(r.o, r.d, r.mint, r.maxt, r.depth, r.time)\n\t\tself.has_differentials = r.has_differentials\n\t\tself.rxOrigin = r.rxOrigin.copy()\n\t\tself.ryOrigin = r.ryOrigin.copy()\n\t\tself.rxDirection = r.rxDirection.copy()\n\t\tself.ryDirection = r.ryDirection.copy()\n\n\t\treturn self", "def __build(self,vs,ndarray):\n self.v = vs\n self.t = ndarray\n return self", "def __init__(self, parent):\n\t\tCtrlDev.__init__(self, parent)\n\n\t\tself._name = \"DVD/CD\"\n\t\tself._category = \"Armazenamento\"\n\n\t\tself._diag = DiagDVDCD(self)\n\t\tself._compat = CompatDVDCD(self)\n\t\tself._guiClass = GUIDVDCD", "def __init__(self, vrpdata):\n self.vrpdata = vrpdata\n self.objective = 0\n self.routes = []\n self.solutionValid = False", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def create_model(self):\n\t\t# Creates the EDSR model\n\t\tself.edsr_model = self.edsr()\n\t\tself.callbacks_list = self.get_callbacks()\n\n\t\tprint('Models are created.')\n\t\treturn self", "def _createVetor(cls, elem):\n return cls(elem)", "def __init__(self, *args):\n this = _almathswig.new_Velocity3D(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, uri=None, original=None,\n **kwargs):\n super(LDPCv, self).__init__(uri, **kwargs)\n self.original = original\n self.type_label = 'LDPCv'", "def cdd_Vrepresentation(self):\n return cdd_Vrepresentation(self._cdd_type, \n self.vertices(),\n [r for r in self.ray_generator()],\n [l for l in self.line_generator()] )", "def create(self) -> VUnit:\r\n return VUnit.from_argv()", "def __init__(\n self,\n lattice_resolution: int = None,\n number_of_objectives: int = None,\n creation_type: str = \"Uniform\",\n vector_type: str = \"Spherical\",\n ref_point: list = None,\n ):\n\n self.number_of_objectives = number_of_objectives\n self.lattice_resolution = lattice_resolution\n self.number_of_vectors = 0\n self.creation_type = creation_type\n self.vector_type = vector_type\n self.values = []\n self.values_planar = []\n self.ref_point = [1] * number_of_objectives if ref_point is None else ref_point\n self._create(creation_type)\n self.initial_values = np.copy(self.values)\n self.initial_values_planar = np.copy(self.values_planar)\n self.neighbouring_angles()\n # self.iteractive_adapt_1() Can use this for a priori preferences!", "def __init__(self, addr: ghidra.program.model.address.Address, componentPath: List[int], dt: ghidra.program.model.data.DataType):\n ...", "def __init__(self,*args):\n RNAstructure_wrap.Dynalign_object.__init__(self,*args)", "def __init__(self):\n #MdvData. __init__(self,model.target_fragments)\n #self.mdv = {}\n self.mdvtc ={}\n self.mode = \"timecourse\"", "def __init__(self, *args):\n this = _almathswig.new_Velocity6D(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, name='segmentation_node'):\n self._name = name\n\n self.accept_params()\n\n rospy.init_node(name)\n\n self._input_image = None\n self._input_image_raw = None\n\n self._init_pub_sub()\n\n self._model = nnio.zoo.edgetpu.segmentation.DeepLabV3(device=self._inference_device)\n\n self._timer = rospy.Timer(rospy.Duration(1.0 / self._max_inference_rate),\n self.inference)", "def __init__(self, **kwargs):\n super(SimpleGADriver, self).__init__(**kwargs)\n\n # What we support\n self.supports['integer_design_vars'] = True\n self.supports['inequality_constraints'] = True\n self.supports['equality_constraints'] = True\n self.supports['multiple_objectives'] = True\n\n # What we don't support yet\n self.supports['two_sided_constraints'] = False\n self.supports['linear_constraints'] = False\n self.supports['simultaneous_derivatives'] = False\n self.supports['active_set'] = False\n\n self._desvar_idx = {}\n self._ga = None\n\n # random state can be set for predictability during testing\n if 'SimpleGADriver_seed' in os.environ:\n self._randomstate = int(os.environ['SimpleGADriver_seed'])\n else:\n self._randomstate = None\n\n # Support for Parallel models.\n self._concurrent_pop_size = 0\n self._concurrent_color = 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert the ``Response`` object into django's ``HttpResponse``
def _finalize_response(self, response): res = HttpResponse(content=response.content, content_type=self._get_content_type()) # status_code is set separately to allow zero res.status_code = response.code return res
[ "def to_http_response(self) -> HttpResponse:\n response = (\n JsonResponse(self.body)\n if (self.headers or {}).get(\"Content-Type\") == \"application/json\"\n else HttpResponse(self.body)\n )\n response.headers = self.headers\n return response", "def make_response(request, result):\n response = request.response\n response.text = json.dumps(result)\n return response", "def to_response(self):\n return make_response(self.res, self.status)", "def get_final_response(self,request,response):\n return response", "def _make_response(status=200, content=None):\n response = HttpResponse()\n response.status_code = status\n response['Content-Type'] = \"application/json\"\n response.content = content\n return response", "def _to_response(result: Union[Dict, Response]) -> Response:\n if isinstance(result, Response):\n return result\n\n logger.debug(\"Simple response detected, serializing return before constructing final response\")\n return Response(\n status_code=200,\n content_type=\"application/json\",\n body=json.dumps(result, separators=(\",\", \":\"), cls=Encoder),\n )", "def get_django_response(proxy_response, strict_cookies=False):\n status = proxy_response.status\n headers = proxy_response.headers\n\n logger.debug('Proxy response headers: %s', headers)\n\n content_type = headers.get('Content-Type')\n\n logger.debug('Content-Type: %s', content_type)\n\n if should_stream(proxy_response):\n logger.info('Content-Length is bigger than %s', DEFAULT_AMT)\n response = StreamingHttpResponse(proxy_response.stream(DEFAULT_AMT),\n status=status,\n content_type=content_type)\n else:\n content = proxy_response.data or b''\n response = HttpResponse(content, status=status,\n content_type=content_type)\n\n logger.info('Normalizing response headers')\n set_response_headers(response, headers)\n\n logger.debug('Response headers: %s', getattr(response, '_headers'))\n\n cookies = proxy_response.headers.getlist('set-cookie')\n logger.info('Checking for invalid cookies')\n for cookie_string in cookies:\n cookie_dict = cookie_from_string(cookie_string,\n strict_cookies=strict_cookies)\n # if cookie is invalid cookie_dict will be None\n if cookie_dict:\n response.set_cookie(**cookie_dict)\n\n logger.debug('Response cookies: %s', response.cookies)\n\n return response", "def respond(self, code):\n\n # TODO: respect encodings etc. in the request\n resp = HttpResponse(code, self.connection)\n resp.request = self\n if hasattr(self, 'version'):\n resp.version = self.version\n return resp", "def _process_response(self, request, response):\n if http_utils.is_ajax(request) and hasattr(request, 'horizon'):\n queued_msgs = request.horizon['async_messages']\n if type(response) == http.HttpResponseRedirect:\n # Drop our messages back into the session as per usual so they\n # don't disappear during the redirect. Not that we explicitly\n # use django's messages methods here.\n for tag, message, extra_tags in queued_msgs:\n getattr(django_messages, tag)(request, message, extra_tags)\n if response['location'].startswith(settings.LOGOUT_URL):\n redirect_response = http.HttpResponse(status=401)\n # This header is used for handling the logout in JS\n redirect_response['logout'] = True\n if self.logout_reason is not None:\n utils.add_logout_reason(\n request, redirect_response, self.logout_reason,\n 'error')\n else:\n redirect_response = http.HttpResponse()\n # Use a set while checking if we want a cookie's attributes\n # copied\n cookie_keys = {'max_age', 'expires', 'path', 'domain',\n 'secure', 'httponly', 'logout_reason'}\n # Copy cookies from HttpResponseRedirect towards HttpResponse\n for cookie_name, cookie in response.cookies.items():\n cookie_kwargs = dict((\n (key, value) for key, value in cookie.items()\n if key in cookie_keys and value\n ))\n redirect_response.set_cookie(\n cookie_name, cookie.value, **cookie_kwargs)\n redirect_response['X-Horizon-Location'] = response['location']\n upload_url_key = 'X-File-Upload-URL'\n if upload_url_key in response:\n self._copy_headers(response, redirect_response,\n (upload_url_key, 'X-Auth-Token'))\n return redirect_response\n if queued_msgs:\n # TODO(gabriel): When we have an async connection to the\n # client (e.g. websockets) this should be pushed to the\n # socket queue rather than being sent via a header.\n # The header method has notable drawbacks (length limits,\n # etc.) and is not meant as a long-term solution.\n response['X-Horizon-Messages'] = json.dumps(queued_msgs)\n return response", "def build_response(self, data, status=200):\r\n # TODO: Remove the Django.\r\n # This should be plain old WSGI by default, if possible.\r\n # By default, Django-esque.\r\n from django.http import HttpResponse\r\n resp = HttpResponse(data, content_type='application/json')\r\n resp.status_code = status\r\n return resp", "def from_sync_httpx_response(cls, httpx_response, target, **kwargs):\n return httpcore.Response(\n status=httpx_response.status_code,\n headers=httpx_response.headers.raw,\n content=httpx_response.stream,\n extensions=httpx_response.extensions,\n )", "def _post_process_response(self):\n self.tweak_response_object()\n self.request[\"response\"].raise_for_status()\n set_response_encoding(self.request[\"response\"])", "def serialize_response(self, response):\n raise NotImplementedError()", "def test_create_http_response(self):\r\n \r\n response = self._http_core_foctory.create_http_response(\"headers\",\r\n b\"content\")\r\n \r\n self.assertTrue(isinstance(response, HttpResponse))\r\n self.assertEqual(\"headers\", response.headers)\r\n self.assertEqual(b\"content\", response.content)", "def _prepare_response(self):\n formatted_response = {}\n try:\n if self.response and self.response.ok:\n json_response = self.response.json()\n formatted_response = json_response[0]\n else:\n formatted_response['status'] = 'False'\n self.response = formatted_response\n except:\n self.response = {}", "def __response__(self) -> requests.Response:\n return self._self_response", "def render_response(self, content, response_type='json'):\n if response_type == 'json':\n response = HttpResponse(content_type=\"application/json; charset=UTF-8\")\n response.write(\n json.dumps(content, cls=JSONEncoder, ensure_ascii=False))\n return response\n return HttpResponse(content)", "def twiml_response(self, func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n response = func(*args, **kwargs)\n if isinstance(response, twiml.Response):\n response = response.toxml()\n return HttpResponse(\n response,\n content_type='text/xml; charset=utf-8')\n return decorated_view", "def response(self):\r\n error = unicode(self)\r\n return HttpResponseBadRequest(json.dumps({'error': error}))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ContentType header with charset info.
def _get_content_type(self): return '%s; charset=%s' % (self.content_type, self.charset)
[ "def getCharset(content):\n\treturn content.headers['content-type'].split('charset=')[-1]", "def get_ctype(self):\n ctype = self.response.getheader('Content-Type')\n\n end = 0\n try:\n end = ctype.index(';')\n mediatype = ctype[:end]\n except:\n mediatype = 'x-application/unknown'\n\n try:\n start = 8 + ctype.index('charset=', end)\n end = ctype.index(';', start)\n charset = ctype[start:end].rstrip()\n except:\n charset = 'ISO-8859-1' # TODO\n\n return mediatype, charset", "def charset(self) -> Optional[str]:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_dict.get(\"charset\") # type: ignore[union-attr]", "def content_type_header(request: Request) -> str:\n return request.content_type", "def get_content_type_and_encoding(content_type_header):\n\tif not content_type_header:\n\t\treturn (None, None)\n\t\n\th_parts = content_type_header.split(';')\n\tcontent_type = h_parts[0]\n\tpage_encoding = None\n\tfor h_part in h_parts[1:]:\n\t\th_part = h_part.strip()\n\t\tif h_part.lower().startswith('charset='):\n\t\t\tpage_encoding = h_part[8:]\n\treturn (content_type, page_encoding,)", "def declared_encoding(self) -> Optional[str]:\n content_type = self.get(\"Content-Type\", \"\")\n return http_content_type_encoding(content_type)", "def content_type(self):\n return self._headers['CONTENT-TYPE']", "def content_type(self):\n return self.get_header('Content-Type') or ''", "def get_content_type(self, request):\n header = request.requestHeaders.getRawHeaders(\"Content-Type\")\n if not header:\n return self.DEFAULT_CONTENT_TYPE\n\n content_type = set()\n for value in header:\n # Split out the various parts of the header and return them. We\n # ignore the q parameter here for the moment.\n content_type.update(\n entry.split(\";\")[0] for entry in value.split(\",\"))\n\n return content_type", "def encoding_from_content_type(content_type):\n\n if not content_type:\n return None\n match = encoding_re.search(content_type)\n return match and match.group(1) or None", "def get_encoding_from_headers(headers):\n content_type = headers.get('content-type')\n\n if not content_type:\n return None\n\n content_type, params = cgi.parse_header(content_type)\n\n if 'charset' in params:\n return params['charset'].strip(\"'\\\"\")\n\n if 'text' in content_type:\n return 'ISO-8859-1'", "def get_charset(request):\n\n content_type = request.META.get('CONTENT_TYPE', None)\n if content_type:\n return extract_charset(content_type) if content_type else None\n else:\n return None", "def _content_type__get(self):\n header = self.headers.get('Content-Type')\n if not header:\n return None\n return header.split(';', 1)[0]", "def extract_charset(content_type):\n\n match = charset_pattern.match(content_type)\n return match.group(1) if match else None", "def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type", "def extractCharset(response, default='utf-8'):\n\n charset = default\n if 'content-type' in response.headers:\n for item in response.headers['content-type'].split(';'):\n if item.strip().startswith('charset'):\n charset = item.split('=')[1].strip()\n break\n return charset", "def contentType(self):\n return MimeType.fromString(\"text/vcard; charset=utf-8\")", "def encoding(response: tornado.httpclient.HTTPResponse) -> str:\n if 'Content-Encoding' in response.headers:\n return response.headers['Content-Encoding'].decode()\n elif 'Content-Type' in response.headers:\n headers = email.message_from_string('Content-Type: ' +\n response.headers['Content-Type'])\n return headers.get_param('charset', 'utf-8')\n else:\n return 'utf-8'", "def get_content_type(self, headers):\n if headers:\n for h, val in headers.items():\n if h.lower().strip() == 'content-type':\n # As it turns out, content-type often appears with some\n # additional values e.g \"text/css; charset=utf8\" so we want\n # just 'text/css' rather than the whole string\n return val[0].split(\";\")[0]\n return \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the manager. The ``_datamappers`` dictionary is initialized here to make testing easier.
def __init__(self): self._datamappers = { '*/*': DataMapper() }
[ "def _initialize_mappers(mappers_factory, work_dir=None):\n if work_dir is not None:\n os.chdir(work_dir) # needed for ray\n G.F_MAPPERS = Composed(mappers_factory)", "def init_dataloaders(self):\n raise NotImplementedError()", "def init(self):\n self.init_db_header()\n self.load_segments()", "def _initialisation(self):\n\n self._maps_initialisation()\n self._distr_initialisation()\n self._em_initialisation()", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def __init__(self, dm_paths):\n Loader.__init__(self)\n self.args = self._prepare_args(locals())\n self.dm_paths = dm_paths", "def initialize_providers_manager(self):\n # We cannot use @cache here because it does not work during pytest, apparently each test\n # runs it it's own namespace and ProvidersManager is a different object in each namespace\n # even if it is singleton but @cache on the initialize_providers_manager message still works in the\n # way that it is called only once for one of the objects (at least this is how it looks like\n # from running tests)\n if self._initialized:\n return\n # Local source folders are loaded first. They should take precedence over the package ones for\n # Development purpose. In production provider.yaml files are not present in the 'airflow\" directory\n # So there is no risk we are going to override package provider accidentally. This can only happen\n # in case of local development\n self._discover_all_airflow_builtin_providers_from_local_sources()\n self._discover_all_providers_from_packages()\n self._discover_hooks()\n self._provider_dict = OrderedDict(sorted(self._provider_dict.items())) # noqa\n self._hooks_dict = OrderedDict(sorted(self._hooks_dict.items())) # noqa\n self._connection_form_widgets = OrderedDict(sorted(self._connection_form_widgets.items())) # noqa\n self._field_behaviours = OrderedDict(sorted(self._field_behaviours.items())) # noqa\n self._discover_extra_links()\n self._initialized = True", "def init( dataDir ):\n\n global _dataDir\n \n _dataDir = dataDir", "def __init__(self, mapper=None):\n self.mapper = mapper", "def __init__(self):\n cdb.initialize()\n cred.Store.initialize()\n self._vlan_mgr = importutils.import_object(conf.MANAGER_CLASS)\n for key in conf.PLUGINS[const.PLUGINS].keys():\n plugin_obj = conf.PLUGINS[const.PLUGINS][key]\n self._plugins[key] = importutils.import_object(plugin_obj)\n LOG.debug(\"Loaded device plugin %s\\n\" %\n conf.PLUGINS[const.PLUGINS][key])\n if key in conf.PLUGINS[const.INVENTORY].keys():\n inventory_obj = conf.PLUGINS[const.INVENTORY][key]\n self._inventory[key] = importutils.import_object(inventory_obj)\n LOG.debug(\"Loaded device inventory %s\\n\" %\n conf.PLUGINS[const.INVENTORY][key])\n\n LOG.debug(\"%s.%s init done\" % (__name__, self.__class__.__name__))", "def __init__(self) -> None:\n\n for info in self.__METADATA.values():\n info[\"loader\"] = getattr(loaders, info.get(\"loader\", None), None)", "def test_init(self):\n try:\n GeneralArrayManager(self.TEST_BLOCK_SIZE,\n self.TEST_STRING_BLOCK_SIZE)\n except IOError:\n self.fail(\"GeneralArrayManager initializer failed: %s\"\n \"db file failed to open.\")", "def __init__(self, db):\n\n # Get a logger handle (singleton)\n self._logger = logger.logger()\n\n # Set the database\n self._db = db\n\n # Pull the MapReduce manager collection\n self._storage = db[\"mr_manager\"]\n\n '''\n # Pull the MapReduce manager\n self._storage = db[\"mr_manager\"].find_one({\"_dataBlobID\":\"mr_manager\"})\n if (not self._storage):\n self._logger.warning(\"Didn't find the MapReduce manager: creating it...\")\n db[\"mr_manager\"].save({\"_dataBlobID\":\"mr_manager\", 'desc':\"MapReduce Manager\",'mr_job_array':[]})\n self._storage = db[\"mr_manager\"].find_one({\"_dataBlobID\":\"mr_manager\"})\n\n\n # Make sure we have the time zone info all set\n mr_job_array = self._storage['mr_job_array']\n for mr_job in mr_job_array:\n mr_job['start'] = pytz.UTC.localize(mr_job['start'])\n mr_job['end'] = pytz.UTC.localize(mr_job['end'])\n '''", "def __init__(self) -> None:\n self.mappings = {}", "def _initialize_dataloaders(\n self, train_data: np.ndarray, val_data: np.ndarray, batch_size: int\n ):\n\n train_dataset = torch.from_numpy(train_data).float()\n self.train_data = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size\n )\n if val_data is not None:\n val_dataset = torch.from_numpy(val_data).float()\n self.val_data = torch.utils.data.DataLoader(\n val_dataset, batch_size=batch_size\n )\n else:\n self.val_data = None", "def setUp(self):\n self.ds = DictionaryStore(Camper)\n self.engine = Engine(self.ds)", "def __init__(self):\n self.init(**self.get_init_storage())", "def _configure_manager(self):\n self._manager = CloudDatabaseManager(self,\n resource_class=CloudDatabaseInstance, response_key=\"instance\",\n uri_base=\"instances\")\n self._flavor_manager = BaseManager(self,\n resource_class=CloudDatabaseFlavor, response_key=\"flavor\",\n uri_base=\"flavors\")\n self._backup_manager = CloudDatabaseBackupManager(self,\n resource_class=CloudDatabaseBackup, response_key=\"backup\",\n uri_base=\"backups\")", "def __init__(self, *args, **kwargs):\n super(DataMoverTestBase, self).__init__(*args, **kwargs)\n self.dm_cmd = None\n self.processes = None\n self.pool = []\n self.containers = []\n self.uuids = []\n self._gen_daos_path_v = 0\n self.dfuse_hosts = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select appropriate formatter based on the request.
def select_formatter(self, request, resource): # 1. get from resource if resource.mapper: return resource.mapper # 2. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from accept header mapper_name = self._get_name_from_accept(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()
[ "def _determine_format(self, request):\n return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)", "def determine_format(request, serializer, default_format='application/json'):\r\n # First, check if they forced the format.\r\n if request.GET.get('format'):\r\n if request.GET['format'] in serializer.formats:\r\n return serializer.get_mime_for_format(request.GET['format'])\r\n\r\n # Try to fallback on the Accepts header.\r\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\r\n formats = list(serializer.supported_formats) or []\r\n # Reverse the list, because mimeparse is weird like that. See also\r\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\r\n # more information.\r\n formats.reverse()\r\n best_format = mimeparse.best_match(\r\n formats, request.META['HTTP_ACCEPT'])\r\n\r\n if best_format:\r\n return best_format\r\n\r\n # No valid 'Accept' header/formats. Sane default.\r\n return default_format", "def determine_format(request, serializer, default_format='application/json'):\n\n # First, check if they forced the format.\n if request.GET.get('format'):\n if request.GET['format'] in serializer.formats:\n return serializer.get_mime_for_format(request.GET['format'])\n\n if request.GET.get('file'):\n default_format = 'application/octet-stream'\n\n # No valid 'Accept' header/formats. Sane default.\n return default_format", "def get_formatter(format_name):\n return {\n FORMAT_STYLISH: stylish,\n FORMAT_PLAIN: plain,\n FORMAT_JSON: formatted_to_json,\n }.get(format_name)", "def determine_format(request, serializer, default_format='application/json'):\n # First, check if they forced the format.\n if request.GET.get('format'):\n if request.GET['format'] in serializer.formats:\n return serializer.get_mime_for_format(request.GET['format'])\n \n # If callback parameter is present, use JSONP.\n if request.GET.has_key('callback'):\n return serializer.get_mime_for_format('jsonp')\n \n # Try to fallback on the Accepts header.\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\n formats = list(serializer.supported_formats) or []\n # Reverse the list, because mimeparse is weird like that. See also\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\n # more information.\n formats.reverse()\n best_format = mimeparse.best_match(formats, request.META['HTTP_ACCEPT'])\n \n if best_format:\n return best_format\n \n # No valid 'Accept' header/formats. Sane default.\n return default_format", "def _get_formatter(self, data_format): \n if data_format == 'file':\n return self._extract_data_to_file\n elif data_format == 'gdf':\n return self._extract_data_to_gdf\n else:\n raise ValueError(data_format)", "def _get_format(self, request):\n\n # Derive a list of 'formats.Format' instances from the list of formats these views support.\n supported_formats = [formats.find(format) for format in self.supported_formats]\n\n # Determine format by extension...\n if '.' in request.path:\n extension = request.path.split('.')[-1]\n\n try:\n format = formats.find_by_extension(extension)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # Determine format by HTTP Accept header...\n if 'HTTP_ACCEPT' in request.META:\n content_types = parse_http_accept_header(request.META['HTTP_ACCEPT'])\n\n # Only consider 'accept' headers with a single format in an attempt to play nice\n # with browsers that ask for formats they really should not want.\n if len(content_types) == 1:\n content_type = content_types[0]\n\n # If the request has no preference as to the format of its response, prefer the\n # first of the view's supported formats.\n if content_type == '*/*':\n return supported_formats[0]\n\n try:\n format = formats.find_by_content_type(content_type)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # If no format is given by either extension or header, default to the format given in\n # RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view).\n if DEFAULT_FORMAT:\n format = formats.find(DEFAULT_FORMAT)\n\n if format in supported_formats:\n return format\n else:\n return None", "def choose_formatter_factory(\n options_list: Options,\n) -> type[formatters.BaseFormatter[Any]]:\n r: type[formatters.BaseFormatter[Any]] = formatters.Formatter\n if options_list.format == \"quiet\":\n r = formatters.QuietFormatter\n elif options_list.format in (\"json\", \"codeclimate\"):\n r = formatters.CodeclimateJSONFormatter\n elif options_list.format == \"sarif\":\n r = formatters.SarifFormatter\n elif options_list.parseable or options_list.format == \"pep8\":\n r = formatters.ParseableFormatter\n return r", "def get_format(request, default='html'):\n format_ = request.GET.get('format', None)\n if not format_:\n format_ = request.GET.get('view', default)\n return format_", "def _set_formatter(self, formatter_name):\n self.formatter = self.formatter_options[formatter_name]()", "def get_formatter(name):\n for k in sorted(_FORMATTERS):\n if k.startswith(name):\n return _FORMATTERS[k]", "def configure_formatter(self, config):\n if '()' in config:\n factory = config['()'] # for use in exception handler\n try:\n result = self.configure_custom(config)\n except TypeError as te:\n if \"'format'\" not in str(te):\n raise\n # Name of parameter changed from fmt to format.\n # Retry with old name.\n # This is so that code can be used with older Python versions\n #(e.g. by Django)\n config['fmt'] = config.pop('format')\n config['()'] = factory\n result = self.configure_custom(config)\n else:\n fmt = config.get('format', None)\n dfmt = config.get('datefmt', None)\n result = logging.Formatter(fmt, dfmt)\n return result", "def initialize_formatter(config):\n if config.json: # pylint: disable=R1705\n return formatters.JsonFormatter()\n elif config.severity: # pylint: disable=R1705\n return formatters.SeverityFormatter(config.colored)\n return formatters.Formatter(config.colored)", "def response_format(request, fmt):\n best = request.accept_mimetypes \\\n .best_match(MIME_TYPES.keys())\n if fmt in MIME_TYPES.values():\n return fmt\n return MIME_TYPES.get(best)", "def get_response_format():\n # if a format is specified in the url, always use it\n try:\n return request.args['format']\n except KeyError:\n pass\n # otherwise determine it from accept mime types header\n default = 'text/html'\n keys = [default, ]\n keys.extend(mimes.keys())\n best = request.accept_mimetypes.best_match(\n keys,\n default=default)\n result = mimes[best]\n app.logger.debug('response format %s', result)\n return result", "def set_formatter(self, formatter):\n self.format = formatter", "def formatter_default(self):\n if self.app_verbose_level > 1:\n return 'json'\n else:\n return 'table'", "def formatter(question: dict):\n fmt = question.get('possibilities', {}).get('format')\n if fmt == 'date':\n return pd.to_datetime\n elif fmt == 'num':\n return lambda x: x\n else:\n raise ValueError(f\"Question format {fmt} unknown\")", "def set_formatter_format(option='simple'):\n # Check for valid input\n options = ['simple', 'process', 'function']\n if option not in options:\n print(\"Input error!\")\n raise ValueError\n # Define a simple formatter\n if option == 'simple':\n return logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # Define a process formatter\n if option == 'process':\n return logging.Formatter(\n '%(asctime)s %(module)s -> %(process)d %(lineno)d %(levelname)s' +\n ' - %(message)s')\n # Define a function formatter\n if option == 'process':\n return logging.Formatter(\n '%(asctime)s %(module)s %(funcName)s -> %(lineno)d %(levelname)s' +\n ' - %(message)s')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select appropriate parser based on the request.
def select_parser(self, request, resource): # 1. get from resource if resource.mapper: return resource.mapper # 2. get from content type mapper_name = self._get_name_from_content_type(request) if mapper_name: return self._get_mapper(mapper_name) # 3. get from url mapper_name = self._get_name_from_url(request) if mapper_name: return self._get_mapper(mapper_name) # 4. use resource's default if resource.default_mapper: return resource.default_mapper # 5. use manager's default return self._get_default_mapper()
[ "def select_parser(self, request, parsers):\n return parsers[0]", "def parser(cls, type, dom):\r\n parser_class = cls.parsers.get(type)\r\n if parser_class:\r\n return parser_class(dom)", "def __find_parser__( self, input=None ):\n\n ## Check with parser name\n for name, parser in self.parsers.iteritems():\n if name == input:\n return parser\n\n ### if parser not find with name, we need to search with command\n\n command = ''\n command_xml = ''\n\n # Check if the command include \"| display xml\"\n # if not create a version with display xml\n display_xml_regex = r\"(\\s*\\|\\s*display\\s*xml\\s*)$\"\n\n has_display_xml = re.search(display_xml_regex, input,re.MULTILINE)\n\n if has_display_xml:\n command = re.sub(display_xml_regex, \"\", input)\n command_xml = input\n else:\n command = input\n command_xml = input + \" | display xml\"\n\n ## Check for parsers pyez, xml and regex\n for type in ['pyez', 'xml', 'regex']:\n\n for name, parser in self.parsers.iteritems():\n if parser['type'] != type:\n continue\n\n # Check if command in file is regex or not\n command_is_regex = False\n\n ## Check if command is a regex or not\n if re.search(r\"\\\\s[\\+\\*]\", parser['command'],re.MULTILINE):\n command_is_regex = True\n command_re = re.compile(parser['command'])\n\n if command_is_regex:\n if command_re.match(command) or command_re.match(command_xml):\n return parser\n else:\n if parser['command'] == command or parser['command'] == command_xml:\n return parser\n\n ## if nothing has been found\n return None", "def guess_parser(url):\n parsed = urlparse.urlparse(url)\n ext = os.path.splitext(parsed.path)[1].lstrip('.')\n if ext == 'csv' or parsed.netloc == 'spreadsheets.google.com':\n return get_parser('csv')\n elif ext in (\"xlsx\", \"excelx\"):\n return get_parser('xlsx')\n elif ext in (\"xls\", \"excel\"):\n return get_parser(\"xls\")\n elif ext in ('htm', 'html'):\n return get_parser('html')\n elif ext == 'xml':\n return get_parser('xml')\n else:\n raise ValueError(\"Can't guess a parser for URL %r\" % url)", "def get_parser(self) -> Union[Parser, ParserSoup, any]:\n return AVAILABLE_PARSERS[self.parser_class]", "def get_parser(self):\n if self.vendor and self.platform and self.version:\n cls = self.profile.get_profile().get_parser(\n self.vendor.code, self.platform.name, self.version.version\n )\n if cls:\n return get_handler(cls)(self)\n return get_handler(\"noc.cm.parsers.base.BaseParser\")(self)", "def get_first_available_parser():\n if sys.platform == 'cli':\n try:\n from bridge.parser.bridge_dotnet import Parser\n return Parser\n except ImportError:\n pass\n elif sys.platform[:4] == 'java':\n try:\n from bridge.parser.bridge_java import Parser\n return Parser\n except ImportError:\n pass\n \n from bridge.parser.bridge_default import Parser\n \n return Parser", "def _get_parser(self, attr_name):\n parser_name = self._object_parser_name(attr_name)\n if not hasattr(parsers, parser_name):\n parser_name = self._generic_parser_name(attr_name)\n parser = getattr(parsers, parser_name, parsers.generic_parser)\n return parser", "def _parser_for(parsers, content_type):\n for expr, parser in parsers.items():\n # XXX: Pedestal uses regexps instead of strings to match, why?\n if expr == content_type:\n return parser\n return identity", "def _get_parser(self, language: str):\n parser = None\n if language:\n parser = self.parsers.get(language)\n\n if not parser:\n self.log.warning(f\"Content parser for {language} is not available.\")\n return parser", "def get_parser_for_file_type(file_type):\n parser = file_type.upper()\n if file_type not in SUPPORTED_PARSERS:\n parser = 'XML'\n return parser", "def _get_parser(filepath, cfg):\n if not os.path.isfile(filepath):\n LOG.error('File not found: %s', filepath)\n return\n valid_parsers = importer.get_parsers(filepath, cfg)\n if not valid_parsers:\n LOG.error('No parsers found for file: %s', filepath)\n return\n\n if len(valid_parsers) > 1:\n while True:\n print('More than one valid parser found. '\n 'Please select which one to use:')\n for idx, vp in enumerate(valid_parsers):\n print('[{}] {}'.format(idx, vp.__name__))\n inp = input()\n try:\n parser = valid_parsers[inp]\n break\n except (IndexError, TypeError):\n print('Invalid input. Please select the parser number.')\n else:\n parser = valid_parsers[0]\n\n return parser", "def get_parser(name):\n try:\n return importlib.import_module('.%s' % name, package=__name__).Parser\n except ImportError, e:\n raise ValueError(\"Can't find or load a parser named %r: %s\" % (name, e))", "def request_parser(cls):\n parser = RequestParser()\n for name, source, attr, typ, help, default in cls.PARAMS:\n if source == 'derived':\n continue\n required = source == 'required'\n if Config.get('web', 'case') == 'camel':\n param_name = under_to_camel(name)\n else:\n param_name = name\n parser.add_argument(param_name, type=typ, required=required, help=help, default=default)\n return parser", "def GetParserObjectByName(cls, parser_name):\n parser_class = cls._parser_classes.get(parser_name, None)\n if not parser_class:\n return\n return parser_class()", "def choose_bs_parser(html):\n if re.match(r\"<(section|nav|article|aside|header|footer|main).*?>\", html) or re.match(r\"<!DOCTYPE html>\", html,\n re.IGNORECASE):\n return \"html5lib\"\n else:\n return \"html.parser\"", "def parse_from_request(self, name, request):\n # type: (str, Request) -> Any\n name_bytes = name.encode()\n if name_bytes not in request.args:\n if self.default is not None:\n return self.default\n if self.required:\n raise Error(BAD_REQUEST, message=b\"%s is required\" % name_bytes)\n else:\n return None\n\n if len(request.args[name_bytes]) != 1:\n raise Error(BAD_REQUEST, message=b\"Pass exactly one argument for %s\" % name_bytes)\n\n val = request.args[name_bytes][0]\n return self.parse(val)", "def get_read_parser(format):\n format = format.lower()\n if format == 'bed':\n return BedReadParser\n elif format == 'bedpe':\n return BedPeReadParser\n elif format == 'sam':\n return SamReadParser\n elif format == 'bam':\n return BamReadParser\n else:\n raise ValueError(f\"unknown read file format: {format!r}\")", "def getparser(use_datetime=False, use_builtin_types=False):\n if FastParser and FastUnmarshaller:\n if use_builtin_types:\n mkdatetime = _datetime_type\n mkbytes = base64.decodebytes\n elif use_datetime:\n mkdatetime = _datetime_type\n mkbytes = _binary\n else:\n mkdatetime = _datetime\n mkbytes = _binary\n target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault)\n parser = FastParser(target)\n else:\n target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=\n use_builtin_types)\n if FastParser:\n parser = FastParser(target)\n else:\n parser = ExpatParser(target)\n return parser, target" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returs mapper based on the content type.
def get_mapper_by_content_type(self, content_type): content_type = util.strip_charset(content_type) return self._get_mapper(content_type)
[ "def get_mapping_type(cls):\n ...", "def loadMimemapper():", "def _get_mapper(obj):\n its_a_model = isinstance(obj, type)\n mapper = class_mapper if its_a_model else object_mapper\n return mapper(obj)", "def lookup_by_content_type(self, content_type):\n return self.__by_content_type[content_type.lower()]", "def __getitem__(self, content_type):\n return self._get_serializer_for_type(content_type)", "def getMappingType(self):\n \n return self.mapping_type", "def select_parser(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from content type\n mapper_name = self._get_name_from_content_type(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def mapperClass(self):\n return PosixStorage.getMapperClass(self.root)", "def _get_msg_by_content_type(self, content_type):\n for msg in self.walk():\n if msg.get_content_type() == content_type:\n return msg", "def _get_mapper(self, mapper_name):\n\n if mapper_name in self._datamappers:\n # mapper found\n return self._datamappers[mapper_name]\n else:\n # unsupported format\n return self._unknown_format(mapper_name)", "def mapper_id(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return id(mapper)", "def get_entity_type_mapping(self) -> Optional[Dict]:\n return None", "def _get_default_mapper(self):\n\n return self._datamappers['*/*']", "def _get_mapper(self,modelname):\r\n mapmodule = __import__('cone.carbon.mapping')\r\n return mapmodule.carbon.mapping.MAPPERS[modelname]()", "def load(self) -> Mapping[str, Mapping[str, Any]]:\n\n types = {}\n types_config = self.config[\"type\"]\n for type_config in types_config:\n directory = type_config[\"directory\"]\n fragment_type_name = type_config[\"name\"]\n is_content_required = type_config[\"showcontent\"]\n types[directory] = {\n \"name\": fragment_type_name,\n \"showcontent\": is_content_required,\n }\n return types", "def get_content_type(ct_name):\r\n try:\r\n ct = CONTENT_TYPE_MAPPING[ct_name]\r\n except KeyError:\r\n for model in models.get_models():\r\n if ct_name == slugify(model._meta.verbose_name_plural):\r\n ct = ContentType.objects.get_for_model(model)\r\n CONTENT_TYPE_MAPPING[ct_name] = ct\r\n break\r\n else:\r\n raise Http404\r\n return ct", "def get_serializer(self, content_type, default_serializers=None):\n\n default_serializers = default_serializers or {}\n\n try:\n mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)\n if mtype in self.serializers:\n return mtype, self.serializers[mtype]\n else:\n return mtype, default_serializers[mtype]\n except (KeyError, TypeError):\n raise exception.InvalidContentType(content_type=content_type)", "def _get_mapper(mapper_name: str, mapper_settings: Optional[dict] = None):\n mapper_type = MapperType.from_mapper_name(mapper_name)\n if mapper_type:\n if mapper_settings:\n mapper = mapper_type.interface(**mapper_settings)\n else:\n mapper = mapper_type.interface()\n return mapper\n else:\n raise NotImplementedError(f'Mapper of type {mapper_name} is not implemented')", "def guess_mapping_format_from_content(content):\n regex = re.compile(\"MAPPING.*?FROM\", re.DOTALL | re.IGNORECASE)\n syntax = \"SMS2\" if regex.search(content) else None\n\n return syntax" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the default mapper to be used, when no format is defined. This is the same as calling ``register_mapper`` with ``/`` with the exception of giving ``None`` as parameter.
def set_default_mapper(self, mapper): mapper = mapper or DataMapper() self._datamappers['*/*'] = mapper
[ "def _get_default_mapper(self):\n\n return self._datamappers['*/*']", "def set_mapper(obj, mapper):\n setattr(obj, MAPPER, mapper)\n return mapper", "def get_default_generator(self):\n raise self.format_generator_map[self.default_format].generator_class", "def __init__(self, mapper=None):\n self.mapper = mapper", "def create_mapper() -> Mapper:\n return Mapper()", "def set_config_file_mapper(f: Callable[[str], str]):\n RenderTool._config_file_mapper = f", "def _get_mapper(self, mapper_name):\n\n if mapper_name in self._datamappers:\n # mapper found\n return self._datamappers[mapper_name]\n else:\n # unsupported format\n return self._unknown_format(mapper_name)", "def set_default(self, node: Node) -> None:\n if isinstance(node, str):\n self._default = TextRecord(self.node, node)\n if isinstance(node, ast.AST):\n self._default = ExpressionRecord(node)", "def _set_default_serializer(self, name):\r\n try:\r\n (self._default_content_type, self._default_content_encoding,\r\n self._default_encode) = self._encoders[name]\r\n except KeyError:\r\n raise SerializerNotInstalled(\r\n \"No encoder installed for %s\" % name)", "def _get_mapper(mapper_name: str, mapper_settings: Optional[dict] = None):\n mapper_type = MapperType.from_mapper_name(mapper_name)\n if mapper_type:\n if mapper_settings:\n mapper = mapper_type.interface(**mapper_settings)\n else:\n mapper = mapper_type.interface()\n return mapper\n else:\n raise NotImplementedError(f'Mapper of type {mapper_name} is not implemented')", "def set_base_format(self: T, format_):\n self.__class__.FALLBACK_FORMAT = FormatDict(format_)", "def set_default_format(cls, fmt: FormatStr | core.QSettings.Format):\n cls.setDefaultFormat(FORMAT.get_enum_value(fmt))", "def mapper(self, new_mapper=None):\n if new_mapper:\n self.SetMapper(new_mapper)\n if self._mapper:\n iptdata = self._mapper.GetInput()\n if iptdata:\n new_mapper.SetInputData(self._mapper.GetInput())\n self._mapper = new_mapper\n self._mapper.Modified()\n return self._mapper", "def _initialize_mappers(mappers_factory, work_dir=None):\n if work_dir is not None:\n os.chdir(work_dir) # needed for ray\n G.F_MAPPERS = Composed(mappers_factory)", "def select_formatter(self, request, resource):\n\n # 1. get from resource\n if resource.mapper:\n return resource.mapper\n # 2. get from url\n mapper_name = self._get_name_from_url(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 3. get from accept header\n mapper_name = self._get_name_from_accept(request)\n if mapper_name:\n return self._get_mapper(mapper_name)\n # 4. use resource's default\n if resource.default_mapper:\n return resource.default_mapper\n # 5. use manager's default\n return self._get_default_mapper()", "def set_default_parser(parser=None): # real signature unknown; restored from __doc__\n pass", "def default_field(self, default_field):\n\n self._default_field = default_field", "def GetDefaultFormat(self, p_int, p_int_1, bool):\n ...", "def set_default_region(self, region):\n region = _convert_to_charp(region)\n self._set_default_region_func(self.alpr_pointer, region)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the default mapper.
def _get_default_mapper(self): return self._datamappers['*/*']
[ "def create_mapper() -> Mapper:\n return Mapper()", "def _get_mapper(self,modelname):\r\n mapmodule = __import__('cone.carbon.mapping')\r\n return mapmodule.carbon.mapping.MAPPERS[modelname]()", "def set_default_mapper(self, mapper):\n\n mapper = mapper or DataMapper()\n self._datamappers['*/*'] = mapper", "def get_mapper(modelname):\r\n mapmodule = __import__('cone.carbon.mapping')\r\n return mapmodule.carbon.mapping.MAPPERS[modelname]()", "def _get_mapper(self, mapper_name):\n\n if mapper_name in self._datamappers:\n # mapper found\n return self._datamappers[mapper_name]\n else:\n # unsupported format\n return self._unknown_format(mapper_name)", "def mapperClass(self):\n return PosixStorage.getMapperClass(self.root)", "def _get_mapper(mapper_name: str, mapper_settings: Optional[dict] = None):\n mapper_type = MapperType.from_mapper_name(mapper_name)\n if mapper_type:\n if mapper_settings:\n mapper = mapper_type.interface(**mapper_settings)\n else:\n mapper = mapper_type.interface()\n return mapper\n else:\n raise NotImplementedError(f'Mapper of type {mapper_name} is not implemented')", "def col_mapper(self):\n return self._col_mapper", "def _get_maps_get_default_rules(self):\n return self.__maps_get_default_rules", "def mapper(self, new_mapper=None):\n if new_mapper:\n self.SetMapper(new_mapper)\n if self._mapper:\n iptdata = self._mapper.GetInput()\n if iptdata:\n new_mapper.SetInputData(self._mapper.GetInput())\n self._mapper = new_mapper\n self._mapper.Modified()\n return self._mapper", "def _get_mapper(obj):\n its_a_model = isinstance(obj, type)\n mapper = class_mapper if its_a_model else object_mapper\n return mapper(obj)", "def get_default_generator(self):\n raise self.format_generator_map[self.default_format].generator_class", "def mapping(self):\n try:\n mapper_file = pathlib.Path(self.mapper)\n if mapper_file.is_file():\n with open(self.mapper, 'r') as f:\n self.mapper = json.load(f)\n except (OSError, TypeError):\n pass\n if not isinstance(self.mapper, dict):\n raise TypeError(f\"mapper must be dict {self.mapper} ==> \"\n f\"{type(self.mapper)}\")\n if not self._check_for_labels():\n raise(MissingLabelsKey(f\"mapper must contain 'labels' key at \"\n f\"outer most level: {self.mapper}\"))\n return self.mapper", "def __init__(self, mapper=None):\n self.mapper = mapper", "def _load_mapper(self, group_obj) -> FeedDataMapper:\n mapper = self.__class__.__group_data_mappers__.get(group_obj.name)\n\n if not mapper:\n raise Exception(\n \"No mapper class found for group: {}\".format(group_obj.name)\n )\n\n return mapper", "def get_mapper(obj, *, expected=None):\n try:\n mapper = object.__getattribute__(obj, MAPPER)\n except AttributeError:\n mapper = None\n\n if mapper and expected is False:\n msg = \"{!r} is already mapped\".format(obj)\n raise TypeError(msg)\n\n if not mapper and expected is True:\n msg = \"{!r} is not mapped\".format(obj)\n raise TypeError(msg)\n\n return mapper", "def get_mapper_by_content_type(self, content_type):\n\n content_type = util.strip_charset(content_type)\n return self._get_mapper(content_type)", "def mapper_id(self, structure):\n type_ = structure['type']\n mapper = self.mappers[type_]\n return id(mapper)", "def get_mapper(mixed):\n if isinstance(mixed, orm._MapperEntity):\n mixed = mixed.expr\n elif isinstance(mixed, orm.sa.Column):\n mixed = mixed.table\n elif isinstance(mixed, orm._ColumnEntity):\n mixed = mixed.expr\n\n if isinstance(mixed, orm.sa.orm.Mapper):\n return mixed\n if isinstance(mixed, orm.sa.orm.util.AliasedClass):\n return orm.sa.inspect(mixed).mapper\n if isinstance(mixed, orm.sa.sql.selectable.Alias):\n mixed = mixed.element\n if isinstance(mixed, orm.AliasedInsp):\n return mixed.mapper\n if isinstance(mixed, orm.sa.orm.attributes.InstrumentedAttribute):\n mixed = mixed.class_\n if isinstance(mixed, orm.sa.Table):\n if hasattr(orm.mapperlib, '_all_registries'):\n all_mappers = set()\n for mapper_registry in orm.mapperlib._all_registries():\n all_mappers.update(mapper_registry.mappers)\n else: # SQLAlchemy <1.4\n all_mappers = orm.mapperlib._mapper_registry\n mappers = [\n mapper for mapper in all_mappers\n if mixed in {mapper.local_table}\n ]\n if len(mappers) > 1:\n raise Exception('Still to many mappers %s' % str(mappers))\n if not mappers:\n raise ValueError(\n \"Could not get mapper for table '%s'.\" % mixed.name\n )\n else:\n return mappers[0]\n if not orm.isclass(mixed):\n mixed = type(mixed)\n return orm.sa.inspect(mixed)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the mapper based on the given name.
def _get_mapper(self, mapper_name): if mapper_name in self._datamappers: # mapper found return self._datamappers[mapper_name] else: # unsupported format return self._unknown_format(mapper_name)
[ "def get_mapper(modelname):\r\n mapmodule = __import__('cone.carbon.mapping')\r\n return mapmodule.carbon.mapping.MAPPERS[modelname]()", "def _get_mapper(self,modelname):\r\n mapmodule = __import__('cone.carbon.mapping')\r\n return mapmodule.carbon.mapping.MAPPERS[modelname]()", "def _get_mapper(mapper_name: str, mapper_settings: Optional[dict] = None):\n mapper_type = MapperType.from_mapper_name(mapper_name)\n if mapper_type:\n if mapper_settings:\n mapper = mapper_type.interface(**mapper_settings)\n else:\n mapper = mapper_type.interface()\n return mapper\n else:\n raise NotImplementedError(f'Mapper of type {mapper_name} is not implemented')", "def lookup(self,name):\n return self.namemap.get(name)", "def get(map_name):\r\n if isinstance(map_name, Map):\r\n return map_name\r\n\r\n # Get the list of maps. This isn't at module scope to avoid problems of maps\r\n # being defined after this module is imported.\r\n maps = get_maps()\r\n map_class = maps.get(map_name)\r\n if map_class:\r\n return map_class()\r\n raise NoMapError(\"Map doesn't exist: %s\" % map_name)", "def _get_default_mapper(self):\n\n return self._datamappers['*/*']", "def create_mapper() -> Mapper:\n return Mapper()", "def _get_map_name(self):\n return self.__map_name", "def get_map(self, name, return_type='image'):\n m = self.maps.get(name)\n if m is None:\n raise ValueError(\"No map with name '{}' found.\".format(name))\n return self.masker.inverse_transform(m) if return_type == 'image' else m", "def _get_config_by_name(self, name: str) -> Mapping[str, Any]:\n return {\n get_model_name(config): config\n for config in self.configs\n }.get(name)", "def getMapperClass(root):\n if not (root):\n return None\n\n # Find a \"_mapper\" file containing the mapper class name\n basePath = root\n mapperFile = \"_mapper\"\n globals = {}\n while not os.path.exists(os.path.join(basePath, mapperFile)):\n # Break abstraction by following _parent links from CameraMapper\n if os.path.exists(os.path.join(basePath, \"_parent\")):\n basePath = os.path.join(basePath, \"_parent\")\n else:\n raise RuntimeError(\n \"No mapper provided and no %s available\" %\n (mapperFile,))\n mapperFile = os.path.join(basePath, mapperFile)\n\n # Read the name of the mapper class and instantiate it\n with open(mapperFile, \"r\") as f:\n mapperName = f.readline().strip()\n components = mapperName.split(\".\")\n if len(components) <= 1:\n raise RuntimeError(\"Unqualified mapper name %s in %s\" %\n (mapperName, mapperFile))\n pkg = importlib.import_module(\".\".join(components[:-1]))\n return getattr(pkg, components[-1])", "def resolve_name_to_key(self, name):\n keys = list(self.config.name_mappings.keys())\n values = list(self.config.name_mappings.values())\n for i in range(len(values)):\n if values[i] == name:\n serial = keys[i]\n for device_key in self.get_device_keys():\n if self.get_device(device_key).get_serial() == serial:\n return device_key\n return keys[i]\n return name", "def __getattr__(self,name):\r\n w=self.mapping.get(name,None)\r\n if w is not None:\r\n w.set_name(name)\r\n w.bind(self)\r\n return w\r\n else:\r\n raise AttributeError('{} not found in {}'.format(name,self.name))", "def get_map(name, reverse=False):\n name = _map_names[name.lower()]\n palette = _palettes[name]\n\n if reverse:\n name += '_r'\n palette['colors'] = list(reversed(palette['colors']))\n\n return WesAndersonMap(\n name, palette['type'], palette['colors'], palette['url'])", "def basemap(self, name):\n if not self.token:\n raise NotLoggedIn()\n\n # To be run on the result of self.basemaps().\n def extract_basemap(basemaps):\n return basemaps.get(name.lower())\n\n return Result(self.basemaps(), extract_basemap)", "def _load_mapper(self, group_obj) -> FeedDataMapper:\n mapper = self.__class__.__group_data_mappers__.get(group_obj.name)\n\n if not mapper:\n raise Exception(\n \"No mapper class found for group: {}\".format(group_obj.name)\n )\n\n return mapper", "def mapping_name(self):\n return self._mapping_name", "def _GetDataTypeMap(self, name):\n data_type_map = self._data_type_maps.get(name, None)\n if not data_type_map:\n data_type_map = self._fabric.CreateDataTypeMap(name)\n self._data_type_maps[name] = data_type_map\n\n return data_type_map", "def get_entity(self, name):\n for entity in mapper:\n if name == entity.name:\n return entity\n raise KeyError('could not find model by the name %s'%(name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get name from ContentType header
def _get_name_from_content_type(self, request): content_type = request.META.get('CONTENT_TYPE', None) if content_type: # remove the possible charset-encoding info return util.strip_charset(content_type) return None
[ "def _content_type__get(self):\n header = self.headers.get('Content-Type')\n if not header:\n return None\n return header.split(';', 1)[0]", "def content_type_header(request: Request) -> str:\n return request.content_type", "def get_content_type(self, headers):\n if headers:\n for h, val in headers.items():\n if h.lower().strip() == 'content-type':\n # As it turns out, content-type often appears with some\n # additional values e.g \"text/css; charset=utf8\" so we want\n # just 'text/css' rather than the whole string\n return val[0].split(\";\")[0]\n return \"\"", "def content_type(self):\n return self._headers['CONTENT-TYPE']", "def fits_header_name(name):\n if isinstance(name, bytes):\n return name.decode('ascii')\n return name", "def getHeader(self, name):\n return self.headers.get(name.lower(), None)", "def content_type(url):\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n return header.get(\"content-type\").lower()", "def http_header(self, name):\n\n return \"-\".join(w.title() for w in self.words(name))", "def extract(self, request: Request) -> Optional[str]:\n try:\n return request.headers[self.header]\n except:\n return None", "def get_content_name(self, content_url):\n endpoint = content_url.split('/')[-1]\n return re.match(r'(.+\\.(?:jpg|mp4))', endpoint).group(0)", "def header_canonical(self, header_name):\n # Translate as stated in the docs:\n # https://docs.djangoproject.com/en/1.11/ref/request-response/#django.http.HttpRequest.META\n return 'HTTP_%s' % header_name.replace('-', '_').upper()", "def content_type(self):\n return self.get_header('Content-Type') or ''", "def find_header(header_name: str, mime_message: MultipartMimeMessage) -> str:\n if mime_message is None:\n return None\n for header in mime_message.mime_headers:\n if header.name == header_name:\n return header.value\n return ''", "def fits_header_name(name):\n if isinstance(name, unicode):\n return name.encode('ascii')\n return name", "def get_header(self, name):\n if self.response is None:\n return None\n info = self.response.info()\n return info.getheader(name)", "def get_content_type(self, request):\n header = request.requestHeaders.getRawHeaders(\"Content-Type\")\n if not header:\n return self.DEFAULT_CONTENT_TYPE\n\n content_type = set()\n for value in header:\n # Split out the various parts of the header and return them. We\n # ignore the q parameter here for the moment.\n content_type.update(\n entry.split(\";\")[0] for entry in value.split(\",\"))\n\n return content_type", "def get_header(self, name):\n return self.headers.get(name)", "def header(self, name, default=None):\n return self.m_headers.get(name.lower(), [default])[0]", "def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine short name for the mapper based on the URL. Short name can be either in query string (e.g. ?format=json) or as an extension to the URL (e.g. myresource.json).
def _get_name_from_url(self, request): format = request.GET.get('format', None) if not format: match = self._format_query_pattern.match(request.path) if match and match.group('format'): format = match.group('format') return format
[ "def build_url_name(cls, name, name_prefix=None):\r\n if name_prefix is None:\r\n name_prefix = 'api_{0}'.format(\r\n cls.__name__.replace('Resource', '').lower()\r\n )\r\n\r\n name_prefix = name_prefix.rstrip('_')\r\n return '_'.join([name_prefix, name])", "def get_short_url_base():", "def get_by_short_url(cls, short_url):\n url_mapping = Url.load_url_mapping()\n return url_mapping.get(short_url)", "def shortname(self):\n return self.get(\"shortName\")", "def test_short_hero_name_from_url(self):\n self.assertEqual(parser.short_hero_name_from_url('/Test_asdf.mp3'), 'Test')\n self.assertEqual(parser.short_hero_name_from_url('/Dlc_Test_asdf.mp3'), 'Dlc_Test')\n self.assertEqual(parser.short_hero_name_from_url('/Dlc_tech_ann_asdf.mp3'), 'Dlc_tech_ann')", "def _build_named_url(self, name):\n return '%s-resource' % name.replace('_', '-')", "def expand_url(url):\n for prefix, full_form in _prefix_full_form_map.iteritems():\n if url.startswith(prefix + ':'):\n return full_form + url[len(prefix)+1:]\n return url", "def shorten_url():\n check_authentication_with_token()\n\n try:\n valid_url(request.json)\n except MultipleInvalid as e:\n abort(400, e.msg)\n except BadRequest:\n abort(400, \"The request does not contain a body.\")\n\n long_url = request.json.get('url')\n site_url = current_app.config['SITE_URL']\n\n if request.json.get('vanity_string') and not g.current_user.is_anonymous:\n vanity_string = request.json.get('vanity_string')\n short_url = UrlSaver.generate_and_save_urls(long_url, g.current_user,\n vanity_string)\n return jsonify({'id': short_url.id,\n 'short_url': site_url + short_url.url}), 201\n\n user = g.current_user\n\n short_url = UrlSaver.generate_and_save_urls(long_url, user)\n return jsonify({'id': short_url.id,\n 'short_url': site_url + short_url.url}), 201", "def set_short_url_base(url):", "def shorten_url(url):\n short_url = None\n\n pwds = Passwords()\n token = pwds.getPassword('bitly.token')\n\n if random.random() < 0.01:\n url = random.choice(random_urls)\n\n params = {\n \"access_token\": token,\n \"longUrl\": url,\n \"domain\": \"j.mp\", # bit.ly and bitly.com are also options.\n }\n\n shortener = 'https://api-ssl.bitly.com/v3/shorten?%s' % urllib.urlencode(\n params)\n (code, content, resp) = util.get_page(shortener)\n url = None\n if code == 200:\n try:\n results = json.loads(content)\n except:\n print \"error loading json from\", shortener, content\n\n try:\n url = results[\"data\"][\"url\"]\n except:\n print \"unexpected json response from\", shortener, results\n else:\n print shortener, \"returned\", code, content\n return url", "def get_short_name(self):\n if self.shortName is None:\n return self.name\n else:\n return self.shortName", "def get_short_code():\n return rh.get_short_code(request)", "def _lookup_url_name(bfo, abbrev=''):\n if abbrev == None:\n abbrev = ''\n return bfo.kb('WEBLINKS', abbrev, 'Link to ' + abbrev)", "def url_name(cls):\n return f'{cls.app_label}_{cls.name}'", "def GetMasterNameFromUrl(url):\n if not url:\n return None\n\n match = None\n for pattern in _MASTER_URL_PATTERNS:\n match = pattern.match(url)\n if match:\n break\n if not match:\n return None\n return match.group(1)", "def shortName(self, fullPath=False):\n \n pass", "def generate_short_url():\n\n def generate():\n x = \"\".join(random.choices(SHORT_URL_CHARACTERS, k=SHORT_URL_LENGTH))\n return x\n\n short_url = generate()\n while URLMapping.objects.filter(short_url=short_url).exists():\n short_url = generate()\n return short_url", "def get_shortener_url():\n context = GetContext()\n url = Get(context, SHORTENER_URL_STORAGE_KEY)\n return url", "def extract_resource_name_from_param(param_name, url_path, param_type=constants.PATH_PARAM):\n\n resource_name = None\n\n for suffix in settings.SWAGGER_URL_PARAM_RESOURCE_SUFFIXES:\n if param_name.endswith(suffix):\n resource_name = param_name[:-len(suffix)]\n break\n\n # We need to convert Param Name only after subjecting it to CamelCase Checks\n param_name = \"\".join([x.lower() for x in re.sub(\"-\", \"_\", param_name).split(\"_\")])\n\n # Resource Name not found by simple means.\n # Now, assume that resource could be available after the resource\n # For example: pets/{id} -- here most likely id refers to pet\n if (\n not resource_name and param_name in settings.SWAGGER_PATH_PARAM_RESOURCE_IDENTIFIERS\n and param_type == constants.PATH_PARAM\n ):\n url_array = url_path.split(\"/\")\n resource_index = url_array.index(f'{{{param_name}}}') - 1\n if resource_index >= 0:\n # Singularize the resource\n resource_name = inflection.singularize(url_array[resource_index])\n\n return resource_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deal with the situation when we don't support the requested format.
def _unknown_format(self, format): raise errors.NotAcceptable('unknown data format: ' + format)
[ "def supports_format_conversion(self):\n return # boolean", "def valid_formats():\n return ('json',)", "def _determine_format(self, request):\n return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)", "def GetFormatSpecification(self):\n return None", "def supports_format_types_for_conversion(self, source_format_type, target_format_type):\n return # boolean", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def test_format_model_exception_for_unsupported_format():\n with pytest.raises(Exception) as e:\n assert parser.format_model(model=None, type='bsr')\n assert str(e.value) == 'type bsr is not supported'", "def test_format_not_supported(self):\n span = trace.get_current_span(\n FORMAT.extract(\n {\n \"traceparent\": [\n \"00-12345678901234567890123456789012-\"\n \"1234567890123456-00-residue\"\n ],\n \"tracestate\": [\"foo=1,bar=2,foo=3\"],\n },\n )\n )\n self.assertEqual(span.get_span_context(), trace.INVALID_SPAN_CONTEXT)", "def test_unknown_format(self):\n date_today = datetime.today()\n with self.assertRaises(DateTimeFormatError):\n datetimeformat(self.context, date_today, format=\"unknown\")", "def set_format(self, format_str):\n if format_str != 'json' or format_str != 'jsonp':\n raise ValueError(\"Wrong format.\")\n self.response_format = format_str", "def _get_format(self, request):\n\n # Derive a list of 'formats.Format' instances from the list of formats these views support.\n supported_formats = [formats.find(format) for format in self.supported_formats]\n\n # Determine format by extension...\n if '.' in request.path:\n extension = request.path.split('.')[-1]\n\n try:\n format = formats.find_by_extension(extension)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # Determine format by HTTP Accept header...\n if 'HTTP_ACCEPT' in request.META:\n content_types = parse_http_accept_header(request.META['HTTP_ACCEPT'])\n\n # Only consider 'accept' headers with a single format in an attempt to play nice\n # with browsers that ask for formats they really should not want.\n if len(content_types) == 1:\n content_type = content_types[0]\n\n # If the request has no preference as to the format of its response, prefer the\n # first of the view's supported formats.\n if content_type == '*/*':\n return supported_formats[0]\n\n try:\n format = formats.find_by_content_type(content_type)\n except formats.UnknownFormat:\n return None\n\n if format in supported_formats:\n return format\n else:\n return None\n\n # If no format is given by either extension or header, default to the format given in\n # RESPITE_DEFAULT_FORMAT (given, of course, that it's supported by the view).\n if DEFAULT_FORMAT:\n format = formats.find(DEFAULT_FORMAT)\n\n if format in supported_formats:\n return format\n else:\n return None", "def getFormat(self):\n pass", "def test_format(self):\n pass", "def _determine_formats(cls, format, start, dim, ascii):\n # If the given format string is unambiguously a Numpy dtype or one of\n # the Numpy record format type specifiers supported by Astropy then that\n # should take priority--otherwise assume it is a FITS format\n if isinstance(format, np.dtype):\n format, _, _ = _dtype_to_recformat(format)\n\n # check format\n if ascii is None and not isinstance(format, _BaseColumnFormat):\n # We're just give a string which could be either a Numpy format\n # code, or a format for a binary column array *or* a format for an\n # ASCII column array--there may be many ambiguities here. Try our\n # best to guess what the user intended.\n format, recformat = cls._guess_format(format, start, dim)\n elif not ascii and not isinstance(format, _BaseColumnFormat):\n format, recformat = cls._convert_format(format, _ColumnFormat)\n elif ascii and not isinstance(format, _AsciiColumnFormat):\n format, recformat = cls._convert_format(format, _AsciiColumnFormat)\n else:\n # The format is already acceptable and unambiguous\n recformat = format.recformat\n\n return format, recformat", "def simple_format_validator(sfv_format):\n\tif sfv_format.upper() == json_format:\n\t\tsfv_format = json_format\n\telif sfv_format.lower() == generic_tabular_format:\n\t\tsfv_format = generic_tabular_format\n\n\tif sfv_format in allowed_formats:\n\t\treturn sfv_format\n\telse:\n\t\tprint sfv_error_str.format(f_info[1], sfv_format, allowed_formats, df_info[1], pf_info[1], generic_tabular_format, tabular_exts_and_seps.keys())\n\t\traise SystemExit", "def determine_format(request, serializer, default_format='application/json'):\r\n # First, check if they forced the format.\r\n if request.GET.get('format'):\r\n if request.GET['format'] in serializer.formats:\r\n return serializer.get_mime_for_format(request.GET['format'])\r\n\r\n # Try to fallback on the Accepts header.\r\n if request.META.get('HTTP_ACCEPT', '*/*') != '*/*':\r\n formats = list(serializer.supported_formats) or []\r\n # Reverse the list, because mimeparse is weird like that. See also\r\n # https://github.com/toastdriven/django-tastypie/issues#issue/12 for\r\n # more information.\r\n formats.reverse()\r\n best_format = mimeparse.best_match(\r\n formats, request.META['HTTP_ACCEPT'])\r\n\r\n if best_format:\r\n return best_format\r\n\r\n # No valid 'Accept' header/formats. Sane default.\r\n return default_format", "def _get_formatter(self, data_format): \n if data_format == 'file':\n return self._extract_data_to_file\n elif data_format == 'gdf':\n return self._extract_data_to_gdf\n else:\n raise ValueError(data_format)", "def determine_format(request, serializer, default_format='application/json'):\n\n # First, check if they forced the format.\n if request.GET.get('format'):\n if request.GET['format'] in serializer.formats:\n return serializer.get_mime_for_format(request.GET['format'])\n\n if request.GET.get('file'):\n default_format = 'application/octet-stream'\n\n # No valid 'Accept' header/formats. Sane default.\n return default_format", "def get_input_format(self):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the mapper has valid signature.
def _check_mapper(self, mapper): if not hasattr(mapper, 'parse') or not callable(mapper.parse): raise ValueError('mapper must implement parse()') if not hasattr(mapper, 'format') or not callable(mapper.format): raise ValueError('mapper must implement format()')
[ "def verify_signature(self, inputs, signature):\n pass", "def _verify_signature(self):\n #FIXME\n return True", "def signature_check(dummy, *args, **kwargs):\n try:\n dummy(*args, **kwargs)\n return True\n\n except TypeError:\n return False", "def check_signature_validity(self) -> None:\n raise NotImplementedError(\"Must be implemented by subclasses\")", "def _check_params(self):\n assert set(self.params.keys()) == set(self.params_signature)", "def _check_type(self):\n assert self.mapping == self.mapping_type, \\\n \"Expected header mapping='{}' but got mapping='{}' in '{}'\".format(\n self.mapping_type, self.mapping.upper(), self.filename)", "def check_signatures(self) -> bool:\n # TODO\n return True", "def validate_signature(self):\r\n\r\n if self.signature:\r\n return\r\n self.signature = self.file.read(8)\r\n if self.signature != _signature:\r\n raise FormatError(\"PNG file has invalid signature.\")", "def _validate(mapping):\n missing_fields = _MANDATORY_FIELDS - set(mapping)\n if missing_fields:\n raise ValueError(\n \"Missing mandatory fields: {0}\".format(\n \", \".join(repr(field) for field in sorted(missing_fields))\n )\n )", "def verify_request_signature(req_info: StatusResponse) -> None:\n if not req_info.signature_check(req_info.xmlstr):\n raise ValueError(_(\"Message signature verification failure\"))", "def testSigOnly(self):\r\n\r\n r = Reader(bytes=_signature)\r\n self.assertRaises(FormatError, r.asDirect)", "def _is_valid_from_keys(self):\n for mapping_key, from_key in self.from_keys_.items():\n if not util.is_instance(from_key, [str, unicode]):\n raise TypeError(\n 'from_keys[%s] type is %s while '\n 'expected type is [str, unicode]: %s' % (\n mapping_key, type(from_key), from_key))\n\n if '*' in from_key:\n raise KeyError(\n 'from_keys[%s] %s contains *' % (\n mapping_key, from_key))", "def check_signature(func, args_list):\n refsig = MethodSignature(func.__name__, args_list)\n actualsig = MethodSignature.from_callable(func)\n if refsig != actualsig:\n raise MethodSignatureMismatch(\n \"Expected {0}, not {1}\".format(refsig, actualsig)\n )\n return True", "def validate_signature(self, params):\n if \"signature\" not in params:\n raise SignatureValidationError(\"Parameters did not include a signature\")\n\n signature = params[\"signature\"]\n\n keys = params.keys()\n keys.sort()\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") \\\n for key in keys if key != \"signature\")\n computed_hash = base64.b64encode(hmac.new(self.app_secret, query_string, hashlib.sha256)\n .digest())\n\n if computed_hash != signature:\n raise SignatureValidationError(\"Invalid signature: \" + query_string)\n\n issued_at = iso8601.parse_date(params[\"issuedAt\"])\n expires_at = issued_at + timedelta(minutes=SIGNATURE_WINDOW_SIZE)\n if datetime.utcnow() > expires_at.replace(tzinfo=None):\n raise SignatureValidationError(\"Expired signature\")", "def warn_on_bad_signature(self) -> bool:\n return self._signature == 'warn'", "def validate_signature(self):\n return self.signature == 0xAA55", "def test_block_bad_signature(self):\n pass", "def _verify_matching_signatures(implementation, dispatcher):\n implementation_spec = getargspec(implementation)\n dispatcher_spec = getargspec(dispatcher)\n\n if (implementation_spec.args != dispatcher_spec.args or\n implementation_spec.varargs != dispatcher_spec.varargs or\n implementation_spec.keywords != dispatcher_spec.keywords or\n (bool(implementation_spec.defaults) !=\n bool(dispatcher_spec.defaults)) or\n (implementation_spec.defaults is not None and\n len(implementation_spec.defaults) !=\n len(dispatcher_spec.defaults))):\n raise RuntimeError('implementation and dispatcher for %s have '\n 'different function signatures' % implementation)", "def _check_signature(self, changes):\n if self.config.check_signature:\n try:\n changes.check_sig()\n except BadSignature as ex:\n raise cli.CommandError(\n \"%s. Check if the PGP block exists and if the key is in your \"\n \"keyring\" % ex)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an airport code input after validating it
def airportCodeInput(self, prompt): while True: code = input(prompt).upper() if code not in self.travel_db.airports: print("Invalid airport code") else: return code
[ "def validateAirport(self, code):\n print(code)\n if code in self.travel_db.airports:\n return True\n else:\n return False", "def validate_arrival_airport(ctx, param, value):\n\tlocation_request = make_location_request(value) \n\n\tif location_request.status_code != 200:\n\t\tlogger.error(\n\t\t\t'Request failed while requesting arrival airrport: Error {}'. format(location_request.status_code)\n\t\t\t)\n\telse:\n\t\tlocation_info = location_request.json()['locations']\n\t\tif location_info and location_info[0]['code'] == value:\n\t\t\treturn value\n\t\telse:\n\t\t\traise click.BadParameter(\n\t\t\t\t'Given departure airport does not match any IATA code'\n\t\t\t\t)", "def test_airports_iata_airport_code_get(self):\n pass", "def get_airport(code):\n if not isinstance(code, str):\n raise TypeError(\"'code' argument should be of type str\")\n\n r = requests.get('https://mock-aa.herokuapp.com/airports?code=' + code)\n if r.status_code != 200:\n raise ValueError(\"Failed request: \" + str(r.status_code))\n info = r.json()\n size = len(info)\n if not size:\n raise ValueError(\"No return airport values.\")\n if len(info) != 1:\n raise ValueError(\"Invalid return airport values.\")\n return info[0]", "def get_airport_code(city):\n if not validate_city(city):\n raise ValueError(city + \" is not a possible city of travel.\")\n return get_cities()[city]['code']", "def get_airport_by_icao(airport_icao_code):\n for e in airports_codes:\n if airports_codes.get(e).get('icao_code') == airport_icao_code:\n return airports_codes.get(e).get('iata_code')", "def validate_pin_input(value):\n try:\n int(value)\n return f\"D{value}\"\n except ValueError:\n return value.upper()", "def findAirport(state):\n if state == \"NSW\":\n airport = \"Sydney Airport\"\n elif state == \"VIC\":\n airport = \"Melbourne Airport\"\n elif state == \"QLD\":\n airport = \"Brisbane Airport\"\n elif state == \"TAS\":\n airport = \"Hobart Airport\"\n elif state == \"WA\":\n airport = \"Perth Airport\"\n elif state == \"SA\":\n airport = \"Adelaide Airport\"\n elif state == \"NT\":\n airport = \"Darwin Airport\"\n return airport", "def country(alpha_2_code: str) -> None:", "def select_airport_code(self):\n Label(self, text=\"Choose Airport\", bg=self.bg_color).grid(row=0, column=2, pady=20)\n self.list_choosen_airports = self.airport_atlas.get_list_of_airports_from_country(self.country_cmbox.get())\n self.list_choosen_airports.sort()\n\n self.airport_var = StringVar(self)\n self.airport_var.set(self.list_choosen_airports[0])\n self.airport_cmbox = Combobox(self, state='readonly', values=[*self.list_choosen_airports],\n textvariable=self.airport_var, width=10)\n self.airport_cmbox.grid(row=2, column=2, padx=20)\n\n #bind the choosen airport code to list of selected airports for finding the route\n self.airport_cmbox.bind('<<ComboboxSelected>>', self.get_airports_from_selection)", "def check_code(code):\n return re.match(r'[A-Z]{4}[0-9]{4}', code)", "def get_country(self):\n country_code = None\n # Until country code is valid\n while not country_code:\n try:\n country_code = self.jarvis.input(\n \"Choose country: \", color=Fore.GREEN)\n if self.is_valid_input(int(country_code)):\n raise ValueError\n except ValueError:\n if self.is_exit_input(country_code):\n return 'exit'\n self.jarvis.say(\n f\"Please select a number (0 - {len(self.countries) - 1})\", color=Fore.YELLOW)\n country_code = None\n return int(country_code)", "def station_for(code: str) -> Station:\n try:\n station = Station.from_code(code)\n if station.country in BLOCKED_COUNTRIES:\n blocked = \", \".join(BLOCKED_COUNTRIES.values())\n raise Invalid(\n f\"AVWX is currently blocking requests for airports in: {blocked}\"\n )\n return station\n except BadStation as exc:\n raise Invalid(f\"{code} is not a valid ICAO, IATA, or GPS code\") from exc", "def get_discipline_code():\n\n return input('Digite o CÓDIGO da disciplina: ')", "def country(code: str = \"\") -> str:\n ...", "def airline_code_to_name(airline_code):\n\n\tparams = {\n\t\t'action': 'SEARCH',\n\t\t'active': '',\n\t\t'alias': '',\n\t\t'alid': '',\n\t\t'callsign': '',\n\t\t'country': 'ALL',\n\t\t'iata': airline_code,\n\t\t'iatafilter': 'true',\n\t\t'icao': '',\n\t\t'mode': 'F',\n\t\t'name': '',\n\t\t'offset': '0'\n\t}\n\n\turl = 'https://openflights.org/php/alsearch.php'\t\n\n\tresponse = safe_post(url, params)\n\tnew_line_index = response.text.find('\\n')\n\n\tresponse_json = loads(response.text[new_line_index + 1:])\n\n\tif not 'name' in response_json:\n\t\treturn ''\n\n\treturn response_json['name']", "def getAircraft(self, code):\n \t\n return self.aircraftDict[code.upper()]", "def validate_code(product_code):\n \n first_part=product_code[0:3]\n second_part=product_code[3:7]\n third_part=product_code[7:]\n \n category_1='not valid'\n category_2='not valid'\n category_3='not valid'\n \n \n if len(first_part) <3 :\n print(\"Category is {}.\".format(category_1))\n \n else:\n \n if first_part.islower() or first_part.isdigit():\n print(\"Category {} is {}.\".format(first_part,category_1))\n else:\n category_1='valid'\n print(\"Category {} is {}.\".format(first_part,category_1))\n \n if len(second_part) <4 :\n \n print(\"ID is {}.\".format(category_2)) \n \n else:\n \n \n if second_part.isdigit(): \n category_2='valid'\n print(\"ID {} is {}.\".format(second_part,category_2))\n \n else:\n print(\"ID {} is {}.\".format(second_part,category_2))\n \n if len(third_part)==0 :\n \n print(\"Qualifier is {}.\".format(category_3))\n else: \n if third_part[0].isupper():\n category_3='valid'\n print('Qualifier {} is {}'.format(third_part,category_3)) \n else:\n print(\"Qualifier {} is {}.\".format(third_part,category_3))\n \n return None", "def currencyInput(self, prompt):\n while True:\n code = input(prompt).upper()\n if code not in self.travel_db.currencies:\n print(\"Invalid currency code\")\n else:\n return code" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a country name input after validating it
def countryInput(self, prompt): while True: name = input(prompt) if name not in self.travel_db.countries: print("Invalid country name. Please make sure name is capitalized.") else: return name
[ "def check_country_name(self):\n if (self.edit_mode):\n code_test=self.country_code\n if(not code_test ):\n self.print_log(\"Error: Country name was not found in the database.\\n\")\n return(code_test)\n else:\n front_page_variables=pre_vars['fixed_sheets']['Front Page']\n cellname=front_page_variables['country_name'][0]\n sheet=self.wb.sheet_by_name('Front Page')\n country_name=sheet.cell(*indexes(cellname)).value\n test_value=sheet.cell_type( *indexes(cellname) ) == front_page_variables['country_name'][1]\n code_test=self.country_code\n if (not test_value ):\n self.print_log(\"Error: Country name is not filled or has a wrong format.\\n\")\n elif(not code_test ):\n self.print_log(\"Error: Country name was not found in the database.\\n\")\n else:\n self.print_log(\"Country name is filled: {0}\\n\".format(country_name))\n return(test_value and code_test)", "def get_country(self):\n country_code = None\n # Until country code is valid\n while not country_code:\n try:\n country_code = self.jarvis.input(\n \"Choose country: \", color=Fore.GREEN)\n if self.is_valid_input(int(country_code)):\n raise ValueError\n except ValueError:\n if self.is_exit_input(country_code):\n return 'exit'\n self.jarvis.say(\n f\"Please select a number (0 - {len(self.countries) - 1})\", color=Fore.YELLOW)\n country_code = None\n return int(country_code)", "def ensure_country_name(self, country):\n df = self._ensure_dataframe(\n self._cleaned_df, name=\"the cleaned dataset\", columns=[self.COUNTRY])\n selectable_set = set(df[self.COUNTRY].unique())\n # return country name as-is if selectable\n if country in selectable_set:\n return country\n # Convert country name\n converted = coco.convert(country, to=\"name_short\", not_found=None)\n # Additional abbr\n abbr_dict = {\n \"Congo Republic\": \"Republic of the Congo\",\n \"DR Congo\": \"Democratic Republic of the Congo\",\n \"UK\": \"United Kingdom\",\n \"Vatican\": \"Holy See\",\n }\n name = abbr_dict.get(converted, converted)\n # Return the name if registered in the dataset\n if name in selectable_set:\n return name\n raise SubsetNotFoundError(country=country, country_alias=name)", "def country() -> str:", "def test_special_country_name(self):\n country_code = get_country_code('Hong Kong SAR, China')\n self.assertEqual(country_code, 'hk')", "def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')", "def country(alpha_2_code: str) -> None:", "def city_country(city_name, country_name):\n city_info = f\"{city_name}, {country_name}\"\n return city_info.title()", "def extract_country_name(name: str) -> Union[str, None]:\n logging.info(f'Trying to extract country name from \"{name}\"')\n names = [c.name for c in list(pycountry.countries)]\n for c_name in names:\n if c_name in name:\n return c_name\n return None", "def validate_country_field(**kwargs):\n valid_countries = [country.value for country in CountryType]\n country_name = kwargs['country']\n if country_name not in valid_countries:\n raise AttributeError(\"Not a valid country\")", "def country_name(alpha2):\n try:\n return countries.get(alpha_2=alpha2).name\n except:\n return None", "def selectCountryCode(self):\n\n countryCodes = self.getCountryCodes()\n\n print(\"\\nAvailable country codes:\\n\")\n print(countryCodes)\n\n country = \"\"\n\n while country not in countryCodes:\n country = raw_input(\"\\nGet providers for which country code? : \")\n\n return country", "def city_country(city_name, country):\n formatted_string = f\"{city_name.title()}, {country.title()}\"\n return formatted_string", "def _validate_country(country):\n if country == '' or country == '--': # lint-amnesty, pylint: disable=consider-using-in\n raise errors.AccountCountryInvalid(accounts.REQUIRED_FIELD_COUNTRY_MSG)", "def country(code: str = \"\") -> str:\n ...", "def city_country(city_name, country_name):\n print(city_name.title() + \", \" + country_name.title())", "def get_country_name(self, iso: str) -> str:\n if len(iso) == 0:\n return ''\n\n # Try each query until we find a match - each query gets less exact\n query_list = [\n Query(where=\"country = ? AND f_code = ? \",\n args=(iso, 'ADM0'),\n result=Result.STRONG_MATCH)]\n\n row_list, res = self.db.process_query_list(from_tbl='main.admin', query_list=query_list)\n\n if len(row_list) > 0:\n res = row_list[0][Entry.NAME]\n if iso == 'us':\n res = 'United States'\n else:\n res = ''\n return res", "def country_identifier(name):\n if name.lower() in _country_dict.keys():\n return _country_dict[name.lower()]\n else:\n return name", "def test_default_country_name(self):\n country_code = get_country_code('Belgium')\n self.assertEqual(country_code, 'be')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a currency code input after validaing it
def currencyInput(self, prompt): while True: code = input(prompt).upper() if code not in self.travel_db.currencies: print("Invalid currency code") else: return code
[ "def __set_input_currency(self):\r\n input_currency = self.__symbols_dictionary.get(self.__values.input_currency)\r\n if input_currency is None:\r\n input_currency = self.__values.input_currency\r\n return input_currency", "def getUserCurrency():", "def get_country(self):\n country_code = None\n # Until country code is valid\n while not country_code:\n try:\n country_code = self.jarvis.input(\n \"Choose country: \", color=Fore.GREEN)\n if self.is_valid_input(int(country_code)):\n raise ValueError\n except ValueError:\n if self.is_exit_input(country_code):\n return 'exit'\n self.jarvis.say(\n f\"Please select a number (0 - {len(self.countries) - 1})\", color=Fore.YELLOW)\n country_code = None\n return int(country_code)", "def getCurrencySymbol():", "def currency_code(self):\n return self.__currency_code", "def get_user_input():\n return float(input('Your transaction amount please: '))", "def currency() -> ParserElement:\n\n return Regex(r'[a-zA-Z]{3}').setParseAction(as_uppercase)('currency')", "def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)", "def check_selected_currency():\n try:\n dollar_amount = dollar_currency.get()\n parse_entry = int(dollar_amount)\n\n if parse_entry > 0:\n use_selection = selected.get()\n currency = foreign_currency()\n new_currency = currency.determine_foreign_currency(use_selection)\n new_amount_one = f\"{float(new_currency)}\"\n new_amount_two = f\"{float(parse_entry)}\"\n final_amount = float(new_amount_one) * float(new_amount_two)\n answer_label[\"text\"] = final_amount\n else:\n print(\"You need to enter something besides zero\")\n\n except ValueError:\n print(\"Invalid selection\")", "def currency_code_default():\n\n from common.models import InvenTreeSetting\n\n try:\n code = InvenTreeSetting.get_setting('INVENTREE_DEFAULT_CURRENCY', create=False, cache=False)\n except Exception: # pragma: no cover\n # Database may not yet be ready, no need to throw an error here\n code = ''\n\n if code not in CURRENCIES:\n code = 'USD' # pragma: no cover\n\n return code", "def currency_code(self):\n return self._currency_code", "def clean_code(cls, code):\n if not re.match(cls.CEP_REGEX, code):\n raise InvalidCepFormatException(code=code)\n return code.replace('.', '').replace('-', '')", "def clean_discount(self):\n code = self.cleaned_data['discount']\n if code:\n try:\n discount = Discount.objects.get(discount_code=self.cleaned_data['discount'])\n if discount.available_for(self.reg_count):\n return discount\n else:\n raise forms.ValidationError(_(\"Discount Code cannot be used for %s people.\" % self.reg_count))\n except Discount.objects.DoesNotExist:\n raise forms.ValidationError(_(\"This is not a valid Discount Code!\"))\n return code", "def clean_currency(x: str):\n # cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n # x = str(x)\n if isinstance(x, str):\n if x.startswith(\"$\"):\n return x.replace('$', '').replace(',', '')\n # return float(x)\n return x\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())", "def getCurrencyCode(self):\n return self.currency_code", "def GetDollars():\n\n while True:\n us_dollars_input = raw_input(\"Enter a dollar and cents vale to convert to euros: \")\n try:\n us_dollars = float(us_dollars_input)\n except ValueError:\n print us_dollars, \"is not a valid dollar amount. Try again.\"\n continue\n return us_dollars", "def currencyFromName(name):\n isCurrency = lambda x: x in ['HKD', 'USD', 'CNY', 'SGD', 'JPY', 'EUR']\n return firstOf(isCurrency, name.split()[-2:])", "def validateCurrency(self, currency_code):\n if currency_code in self.travel_db.currencies:\n return True\n else:\n return False", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if airport code valid, False otherwise.
def validateAirport(self, code): print(code) if code in self.travel_db.airports: return True else: return False
[ "def is_valid_code(self, code):\r\n if code:\r\n scheme_codes = self.get_scheme_codes()\r\n if code in scheme_codes.keys():\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def is_valid_code(code):\n if is_app_code(code):\n return True\n\n elif isinstance(code, six.integer_types):\n return code in CODES\n\n else:\n return False", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def validate_arrival_airport(ctx, param, value):\n\tlocation_request = make_location_request(value) \n\n\tif location_request.status_code != 200:\n\t\tlogger.error(\n\t\t\t'Request failed while requesting arrival airrport: Error {}'. format(location_request.status_code)\n\t\t\t)\n\telse:\n\t\tlocation_info = location_request.json()['locations']\n\t\tif location_info and location_info[0]['code'] == value:\n\t\t\treturn value\n\t\telse:\n\t\t\traise click.BadParameter(\n\t\t\t\t'Given departure airport does not match any IATA code'\n\t\t\t\t)", "def isValidCode(code):\n for character in code:\n if len(code) == 3 and character in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n return True\n else:\n return False", "def check_code(code):\n return re.match(r'[A-Z]{4}[0-9]{4}', code)", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def is_valid_postcode(postcode):\n if len(postcode) != 6 or postcode[:2] != \"72\":\n return False\n return postcode.isdigit()", "def airportCodeInput(self, prompt):\n while True:\n code = input(prompt).upper()\n if code not in self.travel_db.airports:\n print(\"Invalid airport code\")\n else:\n return code", "def validate_zipcode(zipcode):\n return re.match(r'^[0-9]{8}$', zipcode)", "def is_valid(self) -> bool:\n try:\n return post_code_validators.validate_post_code_by_components(\n area=self.area_code,\n district=self.district_code,\n sector=self.sector_code,\n unit=self.unit_code\n )\n except exceptions.PostCodeError:\n return False", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def valid_passport(passport: map) -> bool:\n results = []\n results.append(validate_birth_year(passport))\n results.append(validate_issue_year(passport))\n results.append(validate_exp_year(passport))\n results.append(validate_height(passport))\n results.append(validate_hair_color(passport))\n results.append(validate_eye_color(passport))\n results.append(validate_passport_number(passport))\n\n return any(results) and all(results)", "def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def validate_outward_code(outward_code: str) -> bool:\n outward_pattern_is_correct = re.fullmatch(\n '^{}$'.format(OUTWARD_REGEX),\n outward_code\n )\n\n if outward_pattern_is_correct:\n return True\n\n raise exceptions.InvalidOutwardCodeFormatError(\n 'Outward code is not correctly formatted'\n )", "def _IsValidScanCode(self, code):\n return (self.SCAN_NO_EVENT <= code <= self.SCAN_PAUSE or\n self.SCAN_SYSTEM_POWER <= code <= self.SCAN_SYSTEM_WAKE)", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if country_name valid, False otherwise.
def validateCountry(self, country_name): if country_name in self.travel_db.countries: return True else: return False
[ "def is_name_country_code(filename: Path) -> bool:\n name = filename.stem\n\n try:\n country = countries.get(name)\n except KeyError:\n print(f\"INVALID ISO 3166-1 alpha-2 country code: {name} ({filename})\")\n return False\n\n if country.alpha2.lower() != name.lower():\n # Valid country/code, but not alpha2.\n print(\n f\"INVALID two-letter ISO 3166-1 alpha-2 country code: {name} ({filename})\"\n )\n return False\n print(f\"Valid two-letter country code: {name} ({filename})\")\n return True", "def check_country_name(self):\n if (self.edit_mode):\n code_test=self.country_code\n if(not code_test ):\n self.print_log(\"Error: Country name was not found in the database.\\n\")\n return(code_test)\n else:\n front_page_variables=pre_vars['fixed_sheets']['Front Page']\n cellname=front_page_variables['country_name'][0]\n sheet=self.wb.sheet_by_name('Front Page')\n country_name=sheet.cell(*indexes(cellname)).value\n test_value=sheet.cell_type( *indexes(cellname) ) == front_page_variables['country_name'][1]\n code_test=self.country_code\n if (not test_value ):\n self.print_log(\"Error: Country name is not filled or has a wrong format.\\n\")\n elif(not code_test ):\n self.print_log(\"Error: Country name was not found in the database.\\n\")\n else:\n self.print_log(\"Country name is filled: {0}\\n\".format(country_name))\n return(test_value and code_test)", "def check_country_names_being_valid(self):\n\n errors = []\n\n LOGGER.debug(\"Validating country codes ...\")\n attribute_name = f\"country_{INDEX_LIST_SUFFIX}\"\n\n merged_country_codes = {c.upper() for c in\n getattr(self,\n attribute_name)}\n\n for country in merged_country_codes:\n if pycountry.countries.get(alpha_2=country) is None:\n suggestion = (pycountry.countries.lookup(country)).alpha_2\n LOGGER.debug(f\"Invalid country code detected: '{country}', \"\n f\"try '{suggestion}'\")\n errors.append(InvalidCountryCodeError(\n f\"Country '{country}'' is invalid, \"\n f\"did you mean '{suggestion}'?\")\n )\n\n LOGGER.debug(\"Country codes validated.\")\n\n err_len = len(errors)\n\n if err_len == 0:\n LOGGER.debug(\"Country codes validated.\")\n return None\n elif err_len > 0:\n LOGGER.error(f\"Country codes validated with {err_len} error(s).\")\n return errors", "def _validate_country(country):\n if country == '' or country == '--': # lint-amnesty, pylint: disable=consider-using-in\n raise errors.AccountCountryInvalid(accounts.REQUIRED_FIELD_COUNTRY_MSG)", "def is_valid_name(name):\n return bool(STANDARD_NAME_REGEX.match(name))", "def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')", "def validate_country_field(**kwargs):\n valid_countries = [country.value for country in CountryType]\n country_name = kwargs['country']\n if country_name not in valid_countries:\n raise AttributeError(\"Not a valid country\")", "def test_special_country_name(self):\n country_code = get_country_code('Hong Kong SAR, China')\n self.assertEqual(country_code, 'hk')", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def _validate_name(name):\n hostname = str(name)\n if len(hostname) > 255:\n return False\n if hostname[-1] == \".\":\n hostname = hostname[:-1] # strip exactly one dot from the right, if present\n allowed = re.compile(\"(?!-)[A-Z\\d-]{1,63}(?<!-)$\", re.IGNORECASE)\n return all(allowed.match(x) for x in hostname.split(\".\"))", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def city_name_validator(city_name):\n city_name = city_name.lower()\n city_name = city_name.replace(\" \", \"\")\n return city_name.isalpha()", "def is_valid_name(name: str) -> bool:\n return bool(re.fullmatch(pattern=r\"\\w{4,16}\", string=name))", "def is_bank_name_valid(name_to_check: str):\n def is_name_short_enough():\n return True if len(name_to_check) <= 12 else False\n\n def is_name_only_letter():\n return True if name_to_check.isalpha() else False\n\n return True if is_name_short_enough() and is_name_only_letter() else False", "def _is_valid_region_code(self, region_code):\n return region_code in self._supported_countries", "def _name_accepted(self, name):\n if self._options['acceptedName']:\n return re.search(self._options['acceptedName'], name)\n return True", "def _check_is_name_valid(self, name):\n if name in self.forbidden_names or name.endswith(\n self.forbidden_extensions) or self.__check_is_match_regex(name):\n return False\n return True", "def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)", "def IsVPCNameValid(vpc):\n if len(vpc) < 1 or len(vpc) > 63:\n return False\n return bool(re.match('^[a-z]$|^[a-z][a-z0-9-]*[a-z0-9]$', vpc))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if currency_code valid, False otherwise.
def validateCurrency(self, currency_code): if currency_code in self.travel_db.currencies: return True else: return False
[ "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES", "def is_valid_code(self, code):\r\n if code:\r\n scheme_codes = self.get_scheme_codes()\r\n if code in scheme_codes.keys():\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def equals(self, currency: Currency) -> bool:\n return self.code == currency.code", "def is_valid_code(code):\n if is_app_code(code):\n return True\n\n elif isinstance(code, six.integer_types):\n return code in CODES\n\n else:\n return False", "def __check_currency(currency):\n\n if currency not in list(EXCHANGE_RATES.keys()):\n return False, False\n\n for i in EXCHANGE_RATES.keys():\n for j in EXCHANGE_RATES[i].keys():\n if j == currency:\n return True, True\n\n return True, False", "def validate_card(self):\n if self.card_id[:1] == \"G\" and self.card_id[1:].isdigit() and \\\n int(self._balance) >= 0 and not self.expired:\n return True\n return False", "def is_supported_currency(cls, currency: str) -> bool:\n result = False\n obj = CurrencyType.__members__.get(currency, None)\n if obj is not None:\n result = True\n return result", "def _is_valid_region_code(self, region_code):\n return region_code in self._supported_countries", "def is_currency(currency: str, locale: Locale | str | None = None) -> bool:\n if not currency or not isinstance(currency, str):\n return False\n try:\n validate_currency(currency, locale)\n except UnknownCurrencyError:\n return False\n return True", "def validate_cc_number(self):\n if self.cc_type == None:\n self.cc_type = get_cc_type_from_number(self.cc_number)\n if self.cc_type == False:\n return (False, _(\"Card number is not valid.\"))\n else:\n if validate_credit_card(self.cc_type, self.cc_number):\n return (True, None)\n else:\n return (False, _(\"Card number is not valid.\"))", "def supports_currency_conversion(self):\n return # boolean", "def is_valid(self) -> bool:\n try:\n return post_code_validators.validate_post_code_by_components(\n area=self.area_code,\n district=self.district_code,\n sector=self.sector_code,\n unit=self.unit_code\n )\n except exceptions.PostCodeError:\n return False", "def validate_key_code(self, code):\n\n key = self.connect().query(KeyCode)\\\n .filter(KeyCode.code == code)\\\n .first()\n\n if key and (key.user and key.enabled):\n return True\n return False", "def is_valid(self):\n\t\treturn bool(call_sdk_function('PrlLic_IsValid', self.handle))", "def verify_amount(amount):\n if amount.isdigit():\n return True\n return False", "def validate_card(self):\n if not self.expired and self.card_id[:1] == \"A\" and \\\n self.card_id[1:].isdigit():\n return True\n return False", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def _check_currency(currency):\r\n if currency not in (AvailableCurrencies.UAN, AvailableCurrencies.USD,\r\n AvailableCurrencies.RUB, AvailableCurrencies.EUR):\r\n raise UnavailableCurrencyError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dictionary of Currency objects, with key = currency code. Created from info stored in filename
def buildCurrencyDict(filename): currencies = {} with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: currencies[line[1]] = Currency(line[1], line[0], float(line[2])) return currencies
[ "def currencies(self):\n currency_dict={}\n a = self.countries_file\n for i in a:\n currency_dict[i['name']]=i['currency']\n return currency_dict", "def buildCountryDict(filename, currencies_dict):\n # This function requires the currency dictionary to be built already.\n countries = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n try:\n countries[line[0]] = Country(line[0], line[14], currencies_dict)\n except KeyError: # If currency isn't found, country won't be added to the dictionary\n continue\n return countries", "def get_symbols_dictionary(self):\r\n dictionary = dict()\r\n codes = CurrencyCodes()\r\n dictionary[codes.get_symbol('USD')] = 'USD'\r\n for value in CurrencyRates().get_rates('USD'):\r\n dictionary[codes.get_symbol(value)] = value\r\n return dictionary", "def _get_sample_currency_values():\n global SAMPLE_CURRENCY_VALUES_CACHE # pylint: disable=global-statement\n if SAMPLE_CURRENCY_VALUES_CACHE is None:\n with (DATA_DIR / \"sample_currencies.csv\").open(\"r\") as file_obj:\n SAMPLE_CURRENCY_VALUES_CACHE = {\n c:Decimal(v) for c, v in (l.split(',') for l in file_obj)}\n return SAMPLE_CURRENCY_VALUES_CACHE", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def load_currency_info(wf):\n moedas = wf.stored_data(STORED_DATA_CURRENCY_INFO)\n if not moedas:\n log.debug('Loading currency data...')\n moedas = get_currencies()\n wf.store_data(STORED_DATA_CURRENCY_INFO, moedas)\n return moedas", "def getCurrencies():", "def load_currencies() -> dict:\n\n response = requests.get('https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist-90d.xml')\n doc = minidom.parseString(response.text)\n\n exchange_rates_by_data = dict()\n cubes = doc.getElementsByTagName('Cube')\n\n for cube in cubes:\n if cube.hasAttribute('time'):\n exchange_rates = {'EUR': '1.0'}\n for elem in cube.childNodes:\n exchange_rates[elem.getAttribute('currency')] = elem.getAttribute('rate')\n\n exchange_rates_by_data[cube.getAttribute('time')] = exchange_rates\n\n return exchange_rates_by_data", "def read_FX_rates( self,\r\n s_name ):\r\n ans = {}\r\n i_row = 1\r\n workbook = self.workbook\r\n worksheet = workbook.sheet_by_name(s_name)\r\n num_rows = worksheet.nrows - 1\r\n while i_row <= num_rows:\r\n currency = worksheet.cell(i_row,2).value\r\n spot = worksheet.cell(i_row,1).value\r\n ans[currency] = spot\r\n i_row += 1\r\n return ans", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def _open_convert_csv_files(self):\n self.symbol_data = {}\n for s in self.symbol_list:\n # Load each CSV file\n file_name = os.path.join(self.csv_dir, '%s.csv' % s)\n f = open(self.csv_dir, 'r')\n i = 0\n X = {}\n names = []\n for line in f:\n if i = 0:\n names = line.split(str=',')\n for name in names:\n X[name] = []\n else:\n values = line.split(str=',')\n for j in range(len(names)):\n X[names[j]].append(values[j])\n i += 1\n self.symbol_data[s] = X", "def get_iso_codes_by_continent(filename):\n fobj=open(filename, 'r', encoding=\"utf-8\") #open file\n dct={}\n \n for line in fobj: #go line by line\n line_upper=line.upper().strip(\"\\n\") #change everything in the line to upper case, remove \\n\n lst=line_upper.split(\"\\t\") #split the line by tabs into a list\n if lst[1] in dct: #if the continent is already in the dictionary, add the country \n dct[lst[1]].append(lst[0])\n else:\n dct[lst[1]]=list() #assign the continent ot a list\n \n dct[lst[1]].append(lst[0]) #add the continent to the list\n \n fobj.close() #close the file\n \n return dct", "def load(filename):\n with open(filename, \"r\") as f:\n data = json.load(f)\n\n coupon = Coupon(data[\"code\"],\n data[\"quantity\"])\n return coupon", "def import_currency():\n csv_file = os.path.join(app.root_path, \"ingest/currency/currency.csv\")\n df = pd.read_csv(csv_file)\n df = df.dropna()\n\n MAX_SYMBOL_LENGTH = 3\n\n for index, row in df.iterrows():\n unicode_decimal = str(row[\"_unicode-decimal\"])\n unicode_as_array = unicode_decimal.split(\", \")[:MAX_SYMBOL_LENGTH]\n symbol = \"\".join([chr(int(u)) for u in unicode_as_array])\n acronym = str(row[\"_code\"])\n text = row[\"__text\"]\n\n currency = db.session.query(Currency).filter_by(acronym=acronym).one_or_none()\n if not currency:\n print(\"adding currency: \", acronym + \" \" + text + \" \" + symbol)\n currency = Currency(symbol=symbol, acronym=acronym, text=text)\n db.session.add(currency)\n else:\n currency.symbol = symbol\n currency.acronym = acronym\n currency.text = text\n db.session.commit()", "def shopping_cost(filename):\n data = []\n with open(filename, \"r\") as f:\n rows = csv.reader(f)\n next(f)\n for i, row in enumerate(rows):\n row[2] = int(row[2])\n row[6] = float(row[6])\n record = {\n 'id': row[0],\n 'account': row[1],\n 'purchased_quantity': row[2],\n 'item_name': row[3],\n 'item_quantity': row[4],\n 'item_unit': row[5],\n 'item_price': row[6],\n 'category': row[7],\n }\n data.append(record)\n\n return data", "def get_countrydict():\n\n countrydict = countryinfo('Test')\n\n file = os.path.join(analysisdir,'country_dictionary.dat')\n \n try:\n countrydict = cPickle.load(open(file, 'rb'))\n except: \n db = dbf.Dbf(os.path.join(analysisdir,'GRIDCTRY.DBF'))\n\n countrydict = {}\n for n, rec in enumerate(db):\n code = rec['COUNTRY']\n gridid = str(int(rec['GRID']))\n\n if code in ['Czech Republic', 'Slovakia']:\n rec = fix_eu(rec)\n\n rate_in_gr = rec['RATE_IN_GR'] * 1.e-2\n\n i = int(gridid[-3::])\n j = int(gridid[0:-3])\n lat = -91 + j + 0.5\n lon = -181 + i + 0.5\n if code in countrydict: \n a = countrydict[code]\n else:\n a = countryinfo(code)\n\n\n shared_border = False\n shared_water = False\n if rec['COVER_ID'] == 0.0:\n shared_border = False\n shared_water = True\n if rec['COVER_ID'] >= 2.0:\n shared_border = True\n if rec['COVER_ID'] >= 10.0:\n shared_water = True\n\n a.add_gridinfo(i - 1, j - 1, rate_in_gr, shared_border, shared_water)\n\n countrydict[code] = a\n\n db.close()\n\n cPickle.dump(countrydict, open(file, 'wb'), -1)\n\n return countrydict", "def getCouponDict(coupon_file):\n file_handle = open(coupon_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n counter = 0\n coupon_dict = {}\n for row in file_reader:\n coupon_dict[row['COUPON_ID_hash']] = row\n counter += 1\n assert len(coupon_dict.keys()) == counter\n\n file_handle.close()\n return coupon_dict", "def load_cows(filename):\r\n\r\n cow_dict = dict()\r\n\r\n f = open(filename, 'r')\r\n\r\n for line in f:\r\n line_data = line.split(',')\r\n cow_dict[line_data[0]] = int(line_data[1])\r\n return cow_dict", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dictionary of Country objects, with key = country name. Created from info stored in filename
def buildCountryDict(filename, currencies_dict): # This function requires the currency dictionary to be built already. countries = {} with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: try: countries[line[0]] = Country(line[0], line[14], currencies_dict) except KeyError: # If currency isn't found, country won't be added to the dictionary continue return countries
[ "def get_iso_codes_by_continent(filename):\n fobj=open(filename, 'r', encoding=\"utf-8\") #open file\n dct={}\n \n for line in fobj: #go line by line\n line_upper=line.upper().strip(\"\\n\") #change everything in the line to upper case, remove \\n\n lst=line_upper.split(\"\\t\") #split the line by tabs into a list\n if lst[1] in dct: #if the continent is already in the dictionary, add the country \n dct[lst[1]].append(lst[0])\n else:\n dct[lst[1]]=list() #assign the continent ot a list\n \n dct[lst[1]].append(lst[0]) #add the continent to the list\n \n fobj.close() #close the file\n \n return dct", "def currencies(self):\n currency_dict={}\n a = self.countries_file\n for i in a:\n currency_dict[i['name']]=i['currency']\n return currency_dict", "def get_countrydict():\n\n countrydict = countryinfo('Test')\n\n file = os.path.join(analysisdir,'country_dictionary.dat')\n \n try:\n countrydict = cPickle.load(open(file, 'rb'))\n except: \n db = dbf.Dbf(os.path.join(analysisdir,'GRIDCTRY.DBF'))\n\n countrydict = {}\n for n, rec in enumerate(db):\n code = rec['COUNTRY']\n gridid = str(int(rec['GRID']))\n\n if code in ['Czech Republic', 'Slovakia']:\n rec = fix_eu(rec)\n\n rate_in_gr = rec['RATE_IN_GR'] * 1.e-2\n\n i = int(gridid[-3::])\n j = int(gridid[0:-3])\n lat = -91 + j + 0.5\n lon = -181 + i + 0.5\n if code in countrydict: \n a = countrydict[code]\n else:\n a = countryinfo(code)\n\n\n shared_border = False\n shared_water = False\n if rec['COVER_ID'] == 0.0:\n shared_border = False\n shared_water = True\n if rec['COVER_ID'] >= 2.0:\n shared_border = True\n if rec['COVER_ID'] >= 10.0:\n shared_water = True\n\n a.add_gridinfo(i - 1, j - 1, rate_in_gr, shared_border, shared_water)\n\n countrydict[code] = a\n\n db.close()\n\n cPickle.dump(countrydict, open(file, 'wb'), -1)\n\n return countrydict", "def buildAirportDict(filename, countries_dict): \n # This function requires the country dictionary to be built already.\n airports = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n try:\n airports[line[4]] = Airport(line[4], line[1], line[3], line[2], float(line[6]), float(line[7]), countries_dict)\n except KeyError: # If country isn't found, the airport won't be added to the dictionary\n continue\n return airports", "def load_country(folder_name: str, name: str) -> Country:\n country = Country(name)\n for filename in os.listdir(folder_name):\n # If there are any \"dot files\", ignore them.\n if not filename.startswith('.'):\n location_file = open(os.path.join(folder_name, filename), 'r')\n history = load_data(location_file)\n if history is not None:\n country.add_history(history)\n\n return country", "def get_countries(filename=None):\n call = build_call('attr', 'country')\n json_list = request_data(call)\n if filename is not None:\n data_to_csv(json_list, filename)\n return json_list", "def loadCountryGroupMappingFromFile(file):\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda line: (line[0], line[2].strip()))\n\t , partial(takewhile, lambda line: len(line) > 2 and line[0] != '')\n\t , lambda t: t[1]\n\t , lambda lines: (pop(lines), lines)\n\t , fileToLines\n \t , partial(join, getDataDirectory())\n\t)(file)", "def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf", "def writeCountryCodeFile(self):\n try:\n geojson = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable at source.')\n \n country_mapping = {}\n for country in geojson['features']:\n iso_2 = country['properties']['ISO_A2']\n country_name = country['properties']['ADMIN']\n country_mapping.update({country_name: iso_2})\n \n with open('countryNameISO2.json', 'w') as file:\n json.dump(country_mapping, file)", "def file_data_into_dict(file_data: str) -> dict:\n temp_dict = {}\n \n for line in file_data:\n line = line.strip()\n\n name, country = line.split(\" \")\n \n if name not in temp_dict:\n temp_dict[name] = set()\n temp_dict[name].add(country)\n \n return temp_dict", "def countries_in_continents(self):\n asia =[]\n europe=[]\n africa =[]\n oceania =[]\n north_america =[]\n antarctica = []\n south_america = []\n continents_dict={}\n for i in self.continents_file:\n if i['continent'] == 'Asia':\n asia.append(i['country'])\n if i['continent'] == 'North America':\n north_america.append(i['country'])\n if i['continent'] == 'Europe':\n europe.append(i['country'])\n if i['continent'] == 'Africa':\n africa.append(i['country'])\n if i['continent'] == 'South America':\n south_america.append(i['country'])\n if i['continent'] == 'Oceania':\n oceania.append(i['country'])\n if i['continent'] == 'Antarctica':\n antarctica.append(i['country'])\n continents_dict['Asia'] =asia\n continents_dict['North America'] = north_america\n continents_dict['Europe'] = europe\n continents_dict['Africa'] = africa\n continents_dict['South America'] = south_america\n continents_dict['Oceania'] = oceania\n continents_dict['Antarctica'] = antarctica\n \n return continents_dict", "def gain_as2country(as_info_file, country):\n country_as_info = [] # 存储country as 信息\n as2country = {} # 存储as号到country的映射关系\n file_read = open(as_info_file, 'r', encoding='utf-8')\n # for line in file_read.readlines():\n # line = line.strip().split(\"|\")\n # as2country[line[0]] = line[8] # 生成字典\n # temp_list = []\n # if line[8] == country:\n # temp_list.append(line[0]) # AS Number\n # temp_list.append(line[1]) # AS All Relationships\n # temp_list.append(line[5]) # AS Name\n # temp_list.append(line[7]) # Source\n # temp_list.append(line[8]) # Country\n # country_as_info.append(temp_list)\n\n for line in file_read.readlines():\n line = line.strip().split(\"\\t\")\n # print(line)\n as_number = line[0]\n as_name = line[1].strip().split(\",\")[0].strip()\n as_country = line[1].strip().split(\",\")[-1].strip()\n as2country[as_number] = as_country\n temp_list = []\n if as_country == country:\n temp_list.append(as_number)\n temp_list.append(as_name)\n temp_list.append(as_country)\n country_as_info.append(temp_list)\n\n return country_as_info, as2country", "def read_country_name(file):\n \n country_name_list = [] # Initialized list\n \n for line in file: # Iterate through line in file\n line = line.strip().split(\";\") # Strip whitespace and split on ;\n\n country_code = line[1] # Assign index 1 of each line to country_code\n full_name = line[0] # Assign index 0 of each line to full_name\n\n country_name_list.append((country_code, full_name)) # Append tuples\n \n return(country_name_list) # Return country_name_list", "def buildCurrencyDict(filename): \n currencies = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n currencies[line[1]] = Currency(line[1], line[0], float(line[2]))\n return currencies", "def phone_code(self):\n phonecode_dict={}\n a = self.countries_file\n for i in a:\n phonecode_dict[i['name']]=i['phone_code']\n return phonecode_dict", "def json_parsing():\n with open('countries.json') as f:\n countries = json.load(f)\n\n return countries", "def get_name_mapping_from_file(self, filename):\n self.unit_name = {}\n filelines = open(filename, 'r').readlines()[1:]\n for line in filelines:\n l = line.split(\",\")\n self.unit_name[int(l[1])] = l[0]", "def capitals(self):\n capital_dict={}\n a = self.countries_file\n for i in a:\n capital_dict[i['name']]=i['capital']\n return capital_dict", "def associate_timezones_to_countries(self):\n\t\t\n\t\tresult = {}\n\t\twith open(\"/usr/share/zoneinfo/zone.tab\", \"r\") as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\tif line[0] == \"#\": continue\n\t\t\t\t\n\t\t\t\tline = line.replace(\"\\n\",\"\").split(\"\\t\")\n\t\t\t\tif not line[0] in result: result[line[0]] = line[2]\n\t\t\n\t\treturn result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dictionary of Airport objects, with key = airport code. Created from info stored in filename
def buildAirportDict(filename, countries_dict): # This function requires the country dictionary to be built already. airports = {} with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: try: airports[line[4]] = Airport(line[4], line[1], line[3], line[2], float(line[6]), float(line[7]), countries_dict) except KeyError: # If country isn't found, the airport won't be added to the dictionary continue return airports
[ "def load_airport_info(airport_file):\n with open(airport_file, 'r') as file_handle:\n for line in file_handle:\n line = line.split(',')\n name, code, latitude, longitude = line[1].strip('\"'), line[4].strip('\"'), float(line[6]), float(line[7])\n AIRPORT_LOCATIONS[code] = (latitude, longitude)\n AIRPORT_CODES[code] = '{name} ({code})'.format(name=name, code=code)\n AIRPORT_CODES[name.upper()] = '{name} ({code})'.format(name=name, code=code)", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def getCityCodeDict():\n \n filename = 'C:/Course/1905/Data/airports.csv'\n dictionary = {}\n for input in open(filename,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport(cityCode = input[0], city = input[1]) # create new object\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def read_airports():\n with open('../cities_with_airports.json', 'r') as data:\n airport_file = json.load(data)\n airport_dictionary = {}\n for city in airport_file:\n airport_dictionary[city['city']] = {\n 'lat_lon': city['lat_lon'],\n 'connections': city['destination_cities']\n }\n return airport_dictionary", "def getAircraftCodeDict():\n\n dictionary = {}\n f = open(filename2,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def read_airports(airports_source: TextIO) -> AirportDict:\n #AirportDict = Dict[str, List[str]]\n \n airports_list = airports_source.readlines()\n d = {}\n iata_index = AIRPORT_DATA_INDEXES['IATA']\n \n i = 0\n while i < len(airports_list):\n num_comma = 0\n comma_index = 0 \n while num_comma < iata_index:\n comma_index = airports_list[i].find(',', comma_index)\n num_comma += 1\n comma_index += 1\n iata = airports_list[i][comma_index + 1: \\\n airports_list[i].find(',', comma_index) - 1]\n \n if iata != '\"\"' and iata != \"\\\\N\":\n d[iata] = [get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Airport ID']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Name']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['City']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Country']), get_airports_information(airports_list[i],\\\n AIRPORT_DATA_INDEXES['IATA']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['ICAO']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Latitude']), get_airports_information(airports_list[i],\\\n AIRPORT_DATA_INDEXES['Longitude']), get_airports_information(airports_list[i],\\\n AIRPORT_DATA_INDEXES['Altitude']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Timezone']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['DST']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Tz']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Type']), get_airports_information(airports_list[i], \\\n AIRPORT_DATA_INDEXES['Source'])]\n \n i += 1\n \n return d", "def get_airports():\n iata_to_city = {}\n with open('./airports.txt') as f:\n for line in f.readlines():\n line = line.strip()\n\n if len(line) < 5:\n continue\n\n r = line.strip().split(',')[0]\n r = r.replace(' ', '')\n iata, city = r.split('-', 1)\n\n if iata_to_city.get(iata) is None:\n iata_to_city[iata] = city\n\n return iata_to_city", "def load_airports(csv_file_name):\n airports = {}\n with open(csv_file_name, newline='') as data_file:\n for entry in csv.reader(data_file):\n a = Airport(csv_entry=entry)\n airports[a.iata] = a\n return airports", "def getAircraftCodeDict():\n table = 'aircraft'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n aircraft = airlineClasses.Aircraft()\n aircraft.aircraftCode = row[0]\n aircraft.name = row[1]\n d[aircraft.aircraftCode] = aircraft\n \n curs.close()\n connection.close()\n return d", "def getFlightDict():\n\n dictionary = {}\n f = open(filename3,'rb') \n\n dictionary = pickle.load(f)\n f.close()\n return dictionary", "def process_file(f):\n data = []\n info = {}\n info[\"courier\"], info[\"airport\"] = f[:6].split(\"-\")\n \n with open(\"{}/{}\".format(datadir, f), \"r\") as html:\n\n soup = BeautifulSoup(html)\n tr = soup.find_all('tr',{'class':'dataTDRight'})\n for r in tr:\n td = r.find_all('td')\n if td[1].get_text() != \"TOTAL\":\n info[\"year\"] = int(td[0].get_text())\n info[\"month\"] = int(td[1].get_text())\n info[\"flights\"] = {\"domestic\": int(td[2].get_text().replace(\",\",\"\")),\n \"international\": int(td[3].get_text().replace(\",\",\"\"))}\n data.append(info)\n return data", "def extract_airports(filename, store):\n print filename\n f = open(filename, 'r')\n text = f.read()\n f.close()\n \n if store:\n ## Database connection, db, collection\n conn = pymongo.Connection()\n db=conn.flight_db\n ap = db.airports\n\n airport_list = []\n \n ## extract city,country,airport code\n #match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a></td>\\s+', text)\n match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a><span\\s*style=.*', text)\n if not match:\n print 'airport:rank not found...'\n exit(1)\n for tuples in match:\n if store:\n ap.insert({\n 'city':tuples[0],\n 'country':tuples[1],\n 'code':tuples[2]\n })\n airport_list.append(tuples[0] + ', ' + tuples[1] + ' - ' + tuples[2])\n if store:\n conn.disconnect()\n return airport_list", "def get_iso_codes_by_continent(filename):\n fobj=open(filename, 'r', encoding=\"utf-8\") #open file\n dct={}\n \n for line in fobj: #go line by line\n line_upper=line.upper().strip(\"\\n\") #change everything in the line to upper case, remove \\n\n lst=line_upper.split(\"\\t\") #split the line by tabs into a list\n if lst[1] in dct: #if the continent is already in the dictionary, add the country \n dct[lst[1]].append(lst[0])\n else:\n dct[lst[1]]=list() #assign the continent ot a list\n \n dct[lst[1]].append(lst[0]) #add the continent to the list\n \n fobj.close() #close the file\n \n return dct", "def read_airports(file_name):\n file = None\n try:\n file = open(file=file_name, mode='r', encoding='utf-8')\n line = file.readline()\n while line:\n line = file.readline()\n # remove all the double quotes\n stripped_line = line.replace(\"\\\"\", \"\")\n try:\n cols = stripped_line.split(\",\")\n if cols[2] == 'small_airport':\n SMALL_AIRPORTS.append(cols[3])\n elif cols[2] == 'large_airport':\n LARGE_AIRPORTS.append(cols[3])\n elif cols[2] == 'medium_airport':\n MEDIUM_AIRPORTS.append(cols[3])\n except Exception as exception_e:\n print(exception_e)\n print(\"Bad Record....\")\n finally:\n print('Done....')\n print(len(SMALL_AIRPORTS))\n print(len(LARGE_AIRPORTS))\n print(len(MEDIUM_AIRPORTS))\n except FileNotFoundError as fne:\n print(\"Check if you have really provided a correct file \")\n print(fne)\n finally:\n if file:\n file.close()", "def get_airports_codes():\n airports_codes = Airport.find({'icao_code': {\"$ne\": None}},\n {'code': 1, 'iata_code': 1, 'icao_code': 1, 'country': 1, 'state': 1,\n 'city': 1, 'name': 1, '_id': 0})\n return dict((i.icao_code, i) for i in airports_codes if i.icao_code)", "def get_airports_codes():\n airports_codes = Airport.find({'iata_code': {\"$ne\": None}},\n {'code': 1, 'iata_code': 1, 'icao_code': 1, 'country': 1, 'city': 1,\n 'name': 1, '_id': 0})\n return dict((i.iata_code, i) for i in airports_codes if i.iata_code)", "def load_airports():\n\n Airport.query.delete()\n\n with open('static/data/airports.csv', 'rb') as csvfile:\n next(csvfile)\n airports_reader = csv.reader(csvfile)\n for row in airports_reader:\n airport_id = row[0]\n name = row[3]\n code = row[13] # iata_code\n size = row[2]\n latitude_deg = row[4]\n longitude_deg = row[5]\n continent = row[7]\n iso_country = row[8]\n iso_region = row[9]\n municipality = row[10]\n\n # if iata_code is empty, try local_code\n if code == '':\n code = row[14]\n\n # if local_code is also empty, try gps_code\n if code == '':\n code = row[12]\n\n airport = Airport(airport_id=airport_id,\n name=name,\n code=code,\n size=size,\n latitude_deg=latitude_deg,\n longitude_deg=longitude_deg,\n continent=continent,\n iso_country=iso_country,\n iso_region=iso_region,\n municipality=municipality)\n\n db.session.add(airport)\n\n db.session.commit()", "def parse_airports(xml_file):\n\n tree = ET.XML(xml_file)\n airports = []\n for node in tree.getiterator('airportName'):\n code = node.attrib.get(\"code\")\n name = node.attrib.get(\"name\")\n airports.append(AirPort(code, name))\n\n return airports", "def get_airports_codes():\n airports_codes = Airport.find({'code': {\"$ne\": None}},\n {'code': 1, 'iata_code': 1, 'icao_code': 1, 'country': 1,\n 'city': 1, 'name': 1, '_id': 0})\n return dict((i.code, i) for i in airports_codes if i.code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of routes from a file, in the format [name, [airport code list]]. Return None if file not found.
def getRouteInputFile(filename): if filename[-4:] != ".csv": # Make sure the filename is a .csv return None routes = [] try: with open(os.path.join("input", filename), "rt", encoding="utf8") as f: reader = csv.reader(f) for line in reader: try: routes.append([line[0], line[1:]]) except (UnicodeDecodeError, IndexError): # skip blank lines and lines with invalid characters continue except (FileNotFoundError, OSError): return None return routes
[ "def get_flight_route_from_file(self):\n returnList = []\n with open(FILENAME, 'r', encoding=\"utf8\") as csvFile:\n csvReader = csv.DictReader(csvFile, delimiter=',')\n for line in csvReader:\n returnList.append(line)\n self.__dictList = returnList\n self.flightRouteList = []\n for dictionary in self.__dictList:\n flightRoute = FlightRoute(dictionary)\n self.flightRouteList.append(flightRoute)", "def get_paths(file):\n if file is None:\n return []\n else:\n ls = []\n with open(file) as f:\n ls.append(f.readline())\n return ls", "def parse_input_route(filename):\n\tf = open(filename, 'r')\n\troute = []\n\tdimension = -1 \n\tdimension_found = False\n\tnode_section_found = False\n\n\t# Parse header\n\tfor line in f:\n\t\tif \"DIMENSION\" in line:\n\t\t\ttokens = line.split()\n\t\t\tdimension = int(tokens[-1])\n\t\t\tdimension_found = True\n\t\tif \"NODE_COORD_SECTION\" in line:\n\t\t\tnode_section_found = True\n\t\t\tbreak\n\n\t# Check for parsing errors in header\n\tif not dimension_found:\n\t\tprint(\"99 TSP - Parsing error: DIMENSION not found\")\n\t\tf.close()\n\t\treturn None\n\telif not node_section_found:\n\t\tprint(\"99 TSP - Parsing error: NODE_COORD_SECTION header not found\")\n\t\tf.close()\n\t\treturn None\n\n\t# Parse nodes\n\tfor line in f:\n\t\tif \"EOF\" in line:\n\t\t\tbreak\n\t\tcoords = get_coords(line)\n\t\tif not coords:\n\t\t\tprint(\"99 TSP - Parsing error: Invalid node data found\")\n\t\t\tf.close()\n\t\t\treturn None\n\t\troute.append(Node(coords))\n\tf.close()\n\n\t# Check for parsing error with nodes\n\tif len(route) != dimension:\n\t\tprint(\"99 TSP - Parsing error: number of nodes found does not match dimension\")\n\t\treturn None\n\n\treturn route", "def getCustomRoutes():\n custom_routes = []\n with open(settings.KAM_CFG_PATH, 'r') as kamcfg_file:\n kamcfg_str = kamcfg_file.read()\n\n regex = r\"CUSTOM_ROUTING_START.*CUSTOM_ROUTING_END\"\n custom_routes_str = re.search(regex, kamcfg_str, flags=re.MULTILINE | re.DOTALL).group(0)\n\n regex = r\"^route\\[(\\w+)\\]\"\n matches = re.finditer(regex, custom_routes_str, flags=re.MULTILINE)\n\n for matchnum, match in enumerate(matches):\n if len(match.groups()) > 0:\n custom_routes.append(match.group(1))\n\n for route in custom_routes:\n print(route)\n return custom_routes", "def bus_routes():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file)\n reader = csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3]) #Journey ID field\n if route not in route_list and route!=\"\": #error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def get_routes(iface):\n if iface == \"routes\":\n path = _SUSE_NETWORK_ROUTES_FILE\n else:\n path = os.path.join(_SUSE_NETWORK_SCRIPT_DIR, \"ifroute-{}\".format(iface))\n return _read_file(path)", "def get_routers(filename):\n with open(filename, 'r') as json_file:\n add_list = json.loads(json_file.read())\n print(\"Addresses :\" + str(add_list))\n routers = [telnet_router.TN_ROUTER(router['device'], router['username'], \\\n router['password'], router['en_password']) for router in add_list]\n return routers", "def retrieveRouteData(filename=\"sampleroutes.txt\"):\n with open(filename, \"r\") as f:\n return f.read()", "def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def get_routes(input_dir):\n print 'parsing shapes...'\n shapes = _parse_shapes(os.path.join(input_dir, 'shapes.txt'))\n print 'parsing stops...'\n stops = _parse_stops(os.path.join(input_dir, 'stops.txt'))\n print 'parsing routes...'\n routes = _parse_routes(os.path.join(input_dir, 'routes.txt'))\n print 'adding services and trips to routes...'\n _add_services_trips_to_routes(routes, os.path.join(input_dir, 'trips.txt'))\n print 'adding calendar to services...'\n _add_calendar_to_services(routes, os.path.join(input_dir, 'calendar.txt'))\n print 'adding calendar dates to services...'\n _add_calendar_dates_to_services(routes, os.path.join(input_dir, 'calendar_dates.txt'))\n print 'adding stop times to trips...'\n _add_stop_times_to_trips(routes, os.path.join(input_dir, 'stop_times.txt'))\n print 'adding shapes to routes...'\n _add_shapes_to_routes(routes, shapes, stops)\n\n return routes", "def load_rooms(self, filename):\n with open(filename, \"r\") as f:\n roomss = []\n for line in f:\n line = line.strip()\n\n # Add id, name and description to each room object\n if line.isdigit():\n id = line\n line = f.readline()\n line = line.strip()\n name = line\n line = f.readline()\n line = line.strip()\n description = line\n room = Room(id, name, description)\n roomss.append(room)\n\n # Add the connected routes to the room\n elif line.isupper():\n line = line.split()\n direction = line[0]\n room_number = line[1]\n\n # Add multiple routes to a direction if needed\n if not direction in roomss[-1].connection:\n roomss[-1].connection[direction] = [room_number]\n else:\n roomss[-1].connection[direction].append(room_number)\n return roomss", "def read_cities(file_name):\n road_map = []\n with open(file_name, 'r') as f:\n for line in f:\n tmp = line.split(\"\\t\")\n road_map.append((tmp[0], tmp[1], float(tmp[2]), float(tmp[3])))\n return road_map", "def mbta_route_list():\n f = open('complete_routes.txt', 'r')\n complete_routes = ast.literal_eval(f.read())\n\n #creates list of all route_ids in MBTA system\n subway_route_list = []\n for x in range(len(complete_routes['mode'])):\n if complete_routes['mode'][x]['mode_name'] == 'Subway':\n for y in range(len(complete_routes['mode'][x]['route'])):\n subway_route_list.append(complete_routes['mode'][x]['route'][y]['route_id'])\n\n #removes duplicates from list and returns\n return list(OrderedDict.fromkeys(subway_route_list))", "def get_routes():\n\n return Db().get_line_ids()", "def get_passports(filename):\n with open(filename) as f:\n content = f.read().split(\"\\n\\n\")\n passports = [dict(token_regex.findall(line)) for line in content]\n return passports", "def read_route_table():\n try:\n with open('/proc/net/route') as routing_table:\n return list(map(str.strip, routing_table.readlines()))\n except Exception as e:\n logger.error(\"Cannot read route table [{0}]\", ustr(e))\n\n return []", "def get_route_with_scariness_from_file(route_file_path):\n route = read_gpx.read_gpx(route_file_path)\n route = read_gpx.pad_gpx_dataframe(route)\n route_bounds = read_gpx.get_route_bounds(route)\n if not csp.check_route_bounds_fit_location_data(route_bounds):\n abort(400)\n altitudes_df = csp.get_complete_route_altitude_df(route_bounds)\n route = csp.calculate_route_scariness(route, altitudes_df)\n administer_route_database.insert_route_into_db_table(\n administer_route_database.prepare_route_for_insertion(route, route_file_path),\n administer_route_database.get_route_db_connection(), 'waypoints'\n )\n return route", "def get_rides_list(filepath: str) -> list:\n store = AddressStore()\n rides = []\n with open(filepath, 'r', newline='') as f:\n # skip header\n next(f)\n reader = csv.reader(f)\n for line in reader:\n rides.append(generate_ride(line, store))\n print(f'Rides parsed: {len(rides)}')\n return rides", "def read_file(self, path, route_id):\n\n #uncoded_route_id = route_id.decode(\"utf-8\")\n route_id = str(route_id)\n path += \"/line_\" + route_id + \".txt\"\n with io.open(path, encoding=\"utf-8\") as f:\n lines = f.readlines()\n\n stop_of_graph_list = list()\n\n # So here we're examining the lines of the file\n for line in lines[1:]:\n line = line.strip()\n\n if line != '':\n stop_of_graph_list.append(StopOfGraph.StopOfGraph(line))\n\n # We mustn't forget to give our bus line a name\n self.line_id = lines[0]\n return stop_of_graph_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a csv input file, given a list of routes. Routes are lists of names and airport codes.
def writeRoutesCSV(filename, routes): if filename[-4:] != ".csv": # Make sure the filename is a .csv filename += ".csv" try: with open(os.path.join("input", filename), "w", newline='') as f: writer = csv.writer(f, delimiter=",") writer.writerows(routes) except (OSError, FileNotFoundError): return False else: return True
[ "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def export_features_to_csv(split_route, out_csv_name):\n print(\"Appending information to route\")\n\n gps_fields = \";\".join((\"longitude\", \"latitude\", \"altitude\", \"speed\", \"hort_accur\",\n \"vert_accur\", \"started_at\", \"recorded_a\"))\n user_fields = \";\".join((\"app_user_i\", \"winter\", \"rider_hist\", \"workzip\", \"income\",\n \"cyclingfre\", \"age\", \"cycling_le\", \"gender\", \"rider_type\",\n \"schoolzip\", \"homezip\", \"cyclingexp\"))\n trip_fields = \";\".join((\"purpose\", \"FID\", \"Cumul_Mete\"))\n road_fields = \";\".join((\"LF_NAME\", \"ONE_WAY_DI\", \"sig_dist\", \"stop_dist\", \"SourceOID\",\n \"SLOPE_TF\", \"Shape_Leng\", \"RDCLASS\", \"Bike_Class\",\n \"Bike_Code\", \"EMME_MATCH\", \"EMME_CONTR\", \"link_dir\"))\n fields = \";\".join((gps_fields, user_fields, trip_fields, road_fields))\n arcpy.ExportXYv_stats(split_route, fields, \"COMMA\", out_csv_name, \"ADD_FIELD_NAMES\")", "def write_to_csv(all_roads, geo, out_fn):\n\n output_header = [\"road_id\", \"color\", \"origin_lon\",\n \"origin_lat\", \"dest_lon\", \"dest_lat\"]\n\n segments_written = 0\n with open(out_fn, 'w') as fout:\n csvwriter = csv.writer(fout)\n csvwriter.writerow(output_header)\n\n for color in ['green', 'yellow', 'red']:\n roads = all_roads[color]\n for road_id in roads:\n # road is a list of coordinates, {x:___, y:___}.\n # we want to encode each pair of coordinates as its\n # own row in the CSV.\n road = geo[road_id]\n for origin, dest in zip(road, road[1:]):\n origin_lon = origin['x']\n origin_lat = origin['y']\n dest_lon = dest['x']\n dest_lat = dest['y']\n\n row = [road_id, color, origin_lon, origin_lat,\n dest_lon, dest_lat]\n csvwriter.writerow(row)\n\n segments_written += 1\n if segments_written % 100 == 0:\n print(f\"Added {segments_written} segments so far.\")\n\n print(f\"Added all {color} roads.\")", "def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])", "def csv_file_creator(path, list_of_jobs):\n with open(path, \"wb\") as out_file:\n writer = UnicodeWriter(out_file, delimiter=',')\n for row in list_of_jobs:\n writer.writerow(row)", "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def write_to_csv( list_of_rows, filename ):\n try:\n csvfile = open( filename, \"w\", newline='' )\n filewriter = csv.writer( csvfile, delimiter=\",\")\n for row in list_of_rows:\n filewriter.writerow( row )\n csvfile.close()\n\n except:\n print(\"File\", filename, \"could not be opened for writing...\")", "def csv_file_writer(list_of_links):\n with open(\"active_malwares_file.csv\", 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['Links'])\n writer.writerows(list_of_links)", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def append_routes(metro_arch: ZipFile, gtfs_dir: str) -> List[str]:\n filename = \"routes.txt\"\n inserted_routes = []\n local_file = join(gtfs_dir, filename)\n\n # Get the header of local GTFS file\n header = peek_csv_header(local_file)\n\n # Open local file in read+write mode, open file form metro GTFS and create wrap it into text\n with open(local_file, \"a\", encoding=\"utf-8\", newline=\"\") as target_buff, \\\n metro_arch.open(filename, \"r\") as in_binary_buff, \\\n TextIOWrapper(in_binary_buff, encoding=\"utf-8\", newline=\"\") as in_txt_buff:\n\n # Pass file objects into csv readers/writers.\n reader = csv.DictReader(in_txt_buff)\n writer = csv.DictWriter(target_buff, fieldnames=header, extrasaction=\"ignore\")\n\n for row in reader:\n # Collect route_id\n inserted_routes.append(row[\"route_id\"])\n\n # Pass row to writer\n writer.writerow(row)\n\n target_buff.flush()\n\n return inserted_routes", "def csv_writer(list_,csv_name):\n with open(csv_name,\"wb\") as outfile:\n csv_writer = csv.writer(outfile)\n for element in list_:\n csv_writer.writerow(element)", "def write_csv(path, numbers):\n with open(path, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow([\"number\", \"firstname\", \"lastname\", \"address\", \"city\", \"state\", \"zip\", \"latitude\", \"longitude\", \"vendor\", \"restricted\"])\n for n in numbers:\n if len(n.get(\"contacts\", [])) == 0:\n writer.writerow([n[\"number\"]])\n else:\n for c in n[\"contacts\"]:\n zipcode = c.get(\"zip\", \"\")\n if zipcode is None: zipcode = \"\"\n if len(zipcode) > 5:\n zipcode = \"{}-{}\".format(zipcode[:5], zipcode[5:])\n\n writer.writerow([\n n[\"number\"],\n c.get(\"firstname\", \"\"),\n c.get(\"lastname\", \"\"),\n c.get(\"address\", \"\"),\n c.get(\"city\", \"\"),\n c.get(\"state\", \"\"),\n zipcode,\n c.get(\"latitude\", \"\"),\n c.get(\"longitude\", \"\"),\n n.get(\"vendor\", \"-\").split(\"-\")[0],\n \"Y\" if \"restricted\" in n.get(\"vendor\", \"\") else \"N\"])", "def write_offices_to_csv(agencies):\n with open(\"/Users/jonathanrubin/dropbox/python/python(fromdesktop)/git/refactoredScrape3.csv\", \"w\") as nationwide_csv:\n print(\"Writing to CSV...\")\n writer = csv.writer(nationwide_csv)\n writer.writerow(['Agent Name', 'Office Name', 'Office Phone', 'Address', 'City', 'State', 'Zip Code'])\n for each_office in agencies:\n writer.writerow(each_office)", "def save_csv_file(self):\n filename = filedialog.asksaveasfilename(confirmoverwrite=True,initialdir=\"/\", title=\"Save Cheapest Route\", filetypes= ((\"CSV file\",\"*.csv\"),(\"All files\",\"*.*\")),defaultextension='.csv')\n if filename:\n with open(filename, \"w\") as csvFile:\n fieldnames = ['Aircraft Model','Departure', 'Arrival', 'Distance(Km)', 'Cost(Euro)', 'Fuel(L)']\n writer = csv.DictWriter(csvFile, fieldnames=fieldnames)\n writer.writeheader()\n for elem in range(len(self.list_text_boxes)):\n writer.writerow({fieldnames[0]: self.aircraft_var.get(),\n fieldnames[1]: self.list_text_boxes[elem][0].get(),\n fieldnames[2]: self.list_text_boxes[elem][1].get(),\n fieldnames[3]: self.list_text_boxes[elem][2].get(),\n fieldnames[4]: self.list_text_boxes[elem][3].get(),\n fieldnames[5]: self.list_text_boxes[elem][4].get()})\n writer.writerow({fieldnames[3]: self.list_total[0].get(),\n fieldnames[4]: self.list_total[1].get(),\n fieldnames[5]: self.list_total[2].get()})", "def csv_list2(request):\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=rnaforces.csv'\n csv_forces = Forces.objects.all()\n # Create the CSV writer using the HttpResponse as the \"file.\"\n writer = csv.writer(response)\n writer.writerow(['Step','Shift', 'Slide', 'Rise', 'Tilt', 'Roll', 'Twist'])\n for (ch) in csv_forces:\n writer.writerow([ch.step_id, ch.shift, ch.slide, ch.rise, ch.tilt, ch.roll, ch.twist])\n return response", "def write_csv(rows, output_path, delimiter=','):\n with open(output_path, 'w') as csvfile:\n out_writer = csv.writer(csvfile, delimiter=delimiter)\n for row in rows:\n out_writer.writerow(row)", "def create_csv(file_name, data):\n with open(file_name, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(data)", "def generate_csv(dict_of_lists, filename):\n with open(filename, 'w') as file:\n writer = csv.writer(file)\n for key in dict_of_lists:\n writer.writerow([key] + dict_of_lists[key])", "def write_dictList_to_file(self):\n with open(FILENAME, 'w', newline='', encoding='utf8') as csvfile:\n fieldnames = ['Flight route ID'\n ,'Country'\n ,'Airport'\n ,'Flight distance'\n ,'Travel time'\n ,'Emergency contact'\n ,'Emergency number']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for dictionary in self.__dictList:\n writer.writerow(dictionary)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write output .csv file for list of itineraries. Output file shows cheapest route and its cost.
def writeItineraryOutput(filename, itins): if filename[-4:] != ".csv": # Make sure the filename is a .csv filename += ".csv" try: with open(os.path.join("output", filename), "w", newline='') as f: writer = csv.writer(f, delimiter=",") firstline = ["Name", "Cost", "Home", "Dest 1", "Dest 2", "Dest 3", "Dest 4", "Dest 5", "Dest 6"] writer.writerow(firstline) for itinerary in itins: line = [] line.append(itinerary.name) line.append(itinerary.cheapest_cost) line = line + itinerary.cheapest_route.getCodeList() writer.writerow(line) except (FileNotFoundError, OSError): return False else: return True
[ "def get_output(self, total_costs):\n with open(f\"./data/outputfiles/chip_{self.chip_id}_net_{self.netlist_id}.csv\", 'w') as file:\n \n # This other output file can be used for the check50\n # with open('./data/outputfiles/output.csv', 'w') as file:\n output = writer(file)\n output.writerow([\"net\", \"wires\"])\n\n for net in self.netlist:\n start_gate, end_gate = net.start.name, net.end.name\n route = tuple([int(start_gate),int(end_gate)])\n route_string = str(route).replace(\" \", \"\")\n routelist = net.show_route_coordinates()\n routelist_string = str(routelist).replace(\" \", \"\")\n output.writerow([route_string, f\"{routelist_string}\"])\n \n output.writerow([f\"chip_{self.chip_id}_net_{self.netlist_id},{total_costs}\"])", "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def csv_file_writer(list_of_links):\n with open(\"active_malwares_file.csv\", 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['Links'])\n writer.writerows(list_of_links)", "def write_dictList_to_file(self):\n with open(FILENAME, 'w', newline='', encoding='utf8') as csvfile:\n fieldnames = ['Flight route ID'\n ,'Country'\n ,'Airport'\n ,'Flight distance'\n ,'Travel time'\n ,'Emergency contact'\n ,'Emergency number']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for dictionary in self.__dictList:\n writer.writerow(dictionary)", "def output_all_lines_to_csv(self):\n with open(cfg.OUTPUT_LOC+\"\\\\all_lines.csv\", \"wb\") as f:\n writer = csv.writer(f)\n writer.writerows(self.all_lines)", "def write_csv(weights):\n string=str(weights[0]) + \",\" +str(weights[1])+ \",\" +str(weights[2])+\"\\n\"\n fp = open(Weight_File, \"a+\")\n fp.write(string)\n fp.close()", "def export_features_to_csv(split_route, out_csv_name):\n print(\"Appending information to route\")\n\n gps_fields = \";\".join((\"longitude\", \"latitude\", \"altitude\", \"speed\", \"hort_accur\",\n \"vert_accur\", \"started_at\", \"recorded_a\"))\n user_fields = \";\".join((\"app_user_i\", \"winter\", \"rider_hist\", \"workzip\", \"income\",\n \"cyclingfre\", \"age\", \"cycling_le\", \"gender\", \"rider_type\",\n \"schoolzip\", \"homezip\", \"cyclingexp\"))\n trip_fields = \";\".join((\"purpose\", \"FID\", \"Cumul_Mete\"))\n road_fields = \";\".join((\"LF_NAME\", \"ONE_WAY_DI\", \"sig_dist\", \"stop_dist\", \"SourceOID\",\n \"SLOPE_TF\", \"Shape_Leng\", \"RDCLASS\", \"Bike_Class\",\n \"Bike_Code\", \"EMME_MATCH\", \"EMME_CONTR\", \"link_dir\"))\n fields = \";\".join((gps_fields, user_fields, trip_fields, road_fields))\n arcpy.ExportXYv_stats(split_route, fields, \"COMMA\", out_csv_name, \"ADD_FIELD_NAMES\")", "def write_csv(header, table_data, output_file):\r\n with open(output_file, \"a+\", newline=\"\") as file:\r\n writer = csv.writer(file)\r\n if not os.path.exists(output_file) or os.path.getsize(output_file) == 0:\r\n writer.writerow(header)\r\n writer.writerows(table_data)\r\n print()\r\n print(f\"File {output_file} was created.\")", "def write_to_csv(all_roads, geo, out_fn):\n\n output_header = [\"road_id\", \"color\", \"origin_lon\",\n \"origin_lat\", \"dest_lon\", \"dest_lat\"]\n\n segments_written = 0\n with open(out_fn, 'w') as fout:\n csvwriter = csv.writer(fout)\n csvwriter.writerow(output_header)\n\n for color in ['green', 'yellow', 'red']:\n roads = all_roads[color]\n for road_id in roads:\n # road is a list of coordinates, {x:___, y:___}.\n # we want to encode each pair of coordinates as its\n # own row in the CSV.\n road = geo[road_id]\n for origin, dest in zip(road, road[1:]):\n origin_lon = origin['x']\n origin_lat = origin['y']\n dest_lon = dest['x']\n dest_lat = dest['y']\n\n row = [road_id, color, origin_lon, origin_lat,\n dest_lon, dest_lat]\n csvwriter.writerow(row)\n\n segments_written += 1\n if segments_written % 100 == 0:\n print(f\"Added {segments_written} segments so far.\")\n\n print(f\"Added all {color} roads.\")", "def write_kpi_file(n_ships, total_time, max_wait, max_y, furthest_distance,avg_time):\n arg = list(map(lambda x : str(x),[n_ships, total_time, max_wait, max_y, furthest_distance,avg_time]))\n string = \"\\n\".join(arg)\n with open(\"kpi.csv\",\"w+\",encoding=\"utf-8\") as f:\n f.write(string)", "def write_offices_to_csv(agencies):\n with open(\"/Users/jonathanrubin/dropbox/python/python(fromdesktop)/git/refactoredScrape3.csv\", \"w\") as nationwide_csv:\n print(\"Writing to CSV...\")\n writer = csv.writer(nationwide_csv)\n writer.writerow(['Agent Name', 'Office Name', 'Office Phone', 'Address', 'City', 'State', 'Zip Code'])\n for each_office in agencies:\n writer.writerow(each_office)", "def csv_file(self):\n try:\n file = CSVOutput()\n file.write_to_csv_file(self.results)\n except Exception as e:\n print(\"Something went wrong.\")\n print(\"The computer provides this message: \" + e)\n print(\"Exiting.\")", "def write_csv(rows, output_path, delimiter=','):\n with open(output_path, 'w') as csvfile:\n out_writer = csv.writer(csvfile, delimiter=delimiter)\n for row in rows:\n out_writer.writerow(row)", "def write_to_csv( list_of_rows, filename ):\n try:\n csvfile = open( filename, \"w\", newline='' )\n filewriter = csv.writer( csvfile, delimiter=\",\")\n for row in list_of_rows:\n filewriter.writerow( row )\n csvfile.close()\n\n except:\n print(\"File\", filename, \"could not be opened for writing...\")", "def save_csv_file(self):\n filename = filedialog.asksaveasfilename(confirmoverwrite=True,initialdir=\"/\", title=\"Save Cheapest Route\", filetypes= ((\"CSV file\",\"*.csv\"),(\"All files\",\"*.*\")),defaultextension='.csv')\n if filename:\n with open(filename, \"w\") as csvFile:\n fieldnames = ['Aircraft Model','Departure', 'Arrival', 'Distance(Km)', 'Cost(Euro)', 'Fuel(L)']\n writer = csv.DictWriter(csvFile, fieldnames=fieldnames)\n writer.writeheader()\n for elem in range(len(self.list_text_boxes)):\n writer.writerow({fieldnames[0]: self.aircraft_var.get(),\n fieldnames[1]: self.list_text_boxes[elem][0].get(),\n fieldnames[2]: self.list_text_boxes[elem][1].get(),\n fieldnames[3]: self.list_text_boxes[elem][2].get(),\n fieldnames[4]: self.list_text_boxes[elem][3].get(),\n fieldnames[5]: self.list_text_boxes[elem][4].get()})\n writer.writerow({fieldnames[3]: self.list_total[0].get(),\n fieldnames[4]: self.list_total[1].get(),\n fieldnames[5]: self.list_total[2].get()})", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def route_information(th_object, topology_info, file_name, node1, node2, path):\n save_path = path + node1 + \"_\" + node2 + \"_vs_t2.csv\"\n route_data = th_object.get_node_len_etx(topology_info, node1, node2)\n with open(save_path, \"w+\") as f_name:\n f_name.write(\"Time,No_hopes,Cost\\n\")\n cc = 0\n for k in file_name:\n f_name.write(str(k)[11:-7] + \",\" + str(route_data[cc]['hopes_count']) + \",\" + str(route_data[cc]['cost']) +\n \"\\n\")\n cc += 1\n print(node1 + \" \" + node2 + \" route information exported\")", "def _log_as_csv(self):\n route_filename = f'routes/route_{self.route_id}_{self.user_id}.csv'\n route_log_exists = os.path.isfile(self.gps_logs_dir + route_filename)\n with open(self.gps_logs_dir + route_filename, 'a') as route_log:\n headers = ['Timestamp', 'Latitude', 'Longitude', 'Total_Distance']\n writer = csv.DictWriter(route_log, delimiter=',',\n lineterminator='\\n',\n fieldnames=headers)\n if not route_log_exists:\n writer.writeheader()\n writer.writerow({\n 'Timestamp': self.timestamp,\n 'Latitude': self.lat,\n 'Longitude': self.lon,\n 'Total_Distance': self.total_distance,\n })\n route_log.flush()\n os.fsync(route_log)", "def writeRoutesCSV(filename, routes):\n if filename[-4:] != \".csv\": # Make sure the filename is a .csv\n filename += \".csv\"\n try:\n with open(os.path.join(\"input\", filename), \"w\", newline='') as f:\n writer = csv.writer(f, delimiter=\",\")\n writer.writerows(routes)\n except (OSError, FileNotFoundError):\n return False\n else:\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an input file with randomly generated routes for num_people.
def generateRandomInput(filename, num_people, travel_db): import random routes = [] for i in range(num_people): route = travel_db.randomRoute() route.insert(0,"Person " + str(i)) # Add a name for each route. routes.append(route) if FileHandler.writeRoutesCSV(filename,routes): # If it's successful writing the file print("File {0} created successfully with {1} people.".format(filename, num_people)) else: print("File {0} could not be created.".format(filename))
[ "def generer(nombre, distance):\n with open(\"test/{}.pts\".format(nombre), \"w\") as file:\n file.write(\"{}\\n\".format(distance))\n for _ in range(nombre):\n point = random(), random()\n file.write(\"{}, {}\\n\".format(point[0], point[1]))\n file.close()", "def genpoints(n, f, path):\n try:\n os.makedirs(path)\n except OSError:\n pass\n for i in range(f):\n out = open('{}/out_{}'.format(path,i), 'w')\n for j in range(n):\n x = random.random()\n y = random.random()\n out.write(str(x) + ',' + str(y) + '\\n')\n out.close()", "def generate_file(n_lines, file_name):\n with open('top_pwd_list.txt') as input_file, open(f'sample_input_files/{file_name}.txt', 'w') as output_file:\n word_list = [line.rstrip('\\n') for line in input_file]\n for i in range(0, n_lines):\n line_words = random.sample(\n population=word_list,\n k=random.randrange(1,200)\n )\n output_file.write(' '.join(line_words) + '\\n')", "def routes_gen(num) -> Generator[Route, None, None]:\n with open(f'data/route-costs-{num}.txt', 'rb') as routes:\n for route in routes:\n prefix, cost = route[:-1].split(b',')\n yield (prefix, float(cost))", "def generate_nums(filename, n):\n text = ''\n for i in range(n):\n num = random.randrange(0, 100)\n text += (str(num) + '\\n')\n f = open(filename, 'w')\n f.write(text)\n f.close()\n return", "def _make_random_file(self, dir, num_chars=10000):\n filename = os.path.join(dir, \"f-%d\" % random.randint(1, 2**63 - 1))\n content = \"\".join([random.choice(\"0123456789abcdefghijklmnopqrstuvwxyz\\n\") for _ in range(num_chars)])\n with open(filename, \"w\") as f:\n f.writelines(content)\n return filename", "def generate_hosts_file(n, path=\"./tests/fixtures\"):\n if not os.path.isdir(path):\n os.mkdir(path)\n with open(f\"{path}/hosts.txt\", \"w\") as f:\n for i in range(n):\n f.write(f\"{i},localhost,127.0.0.1,{5000+i}\\n\")", "def program_routes(self):\n with open(self.filename, \"w\") as fn:\n for ip_nhop in self.ip_nhops:\n\n ip_route = \"sudo {} ip route add {}\".format(\n self.asic.ns_arg, ip_nhop.prefix\n )\n ip_nhop_str = \"\"\n\n for ip in ip_nhop.nhop:\n ip_nhop_str += \"nexthop via {} \".format(ip)\n\n ip_cmd = \"{} {}\".format(ip_route, ip_nhop_str)\n fn.write(ip_cmd+ \"\\n\")\n\n # copy file to DUT and run it on DUT\n self.duthost.copy(src=self.filename, dest=self.filename, mode=0755)\n result = self.duthost.shell(self.filename)\n pytest_assert(\n result[\"rc\"] == 0,\n \"IP add failed on duthost:{}\".format(self.filename)\n )", "def test_print_allocations_to_file_takes_valid_file_name(self):\n self.dojo.create_room(\"office\", [\"Blue\", \"Green\", \"Pink\"])\n self.dojo.create_room(\"living_space\", [\"A\", \"B\", \"C\"])\n self.dojo.load_people(\"inputs.txt\")\n self.assertRaises(ValueError, self.dojo.print_allocations, 877762)", "def build_routes_file(routes, name):\n top = dict()\n top[\"file-type\"] = \"routes\"\n top[\"name\"] = name\n top[\"routes\"] = routes\n return top", "def generate_random_input(n, p, fileName):\n\n\tmax_x = 1000\n\tL = []\n\tH = []\n\tE = []\n\tx = [] #non negative x-coordinate of vertices\n\tfor i in range(n):\n\t\tL.append('location' + str(i))\n\t\trand = round(random.random() * max_x) + 1\n\t\twhile rand in x:\n\t\t\trand = round(random.random() * max_x) + 1\n\t\tx.append(rand)\n\tfor i in range(n):\n\t\tif random.random() < p and len(H) < n / 2: #vertex is a home with probability p\n\t\t\tH.append(i)\n\tfor i in range(n):\n\t\tE.append([])\n\t\tfor j in range(0, i):\n\t\t\tE[i].append(abs(x[i] - x[j])) #E[i][j] = absolute value of difference in x-coordinates of vertex i and vertex j as weight to ensure triangular inequality\n\t\tE[i].append('x') #no self-edges\n\tfor i in range(n):\n\t\tfor j in range(i+1, n):\n\t\t\tE[i].append(E[j][i])\n\tstarting_index = int((random.random() * (len(L) - 1)) // 1)\n\ts = L[starting_index]\n\tprint_input(L, E, H, s, fileName)", "def generate_teams_data(numberOfRows, output_filename='teams.csv'):\n \n ID_set=set() # storing the ID's since primary key should be unique.\n list_of_rows=[]\n with open(output_filename, mode='w', newline='') as player_file:\n writer = csv.writer(player_file, delimiter=',')\n \n for _ in range(numberOfRows):\n TeamID = randint(1, 1000000000)\n while(TeamID in ID_set):\n TeamID = randint(1, 1000000000)\n ID_set.add(TeamID)\n Team_name = random.choice(NFL_teams)\n city = random.choice(NFL_city)\n \n list_of_rows.append([TeamID, Team_name, city])\n writer.writerows(list_of_rows)", "def generate_files(folder, n):\n try:\n os.mkdir(folder)\n except FileExistsError:\n pass\n\n for current in range(n):\n name = '{:0>10d}'.format(current)\n print(name)\n with open(os.path.join(folder, name), 'w') as file:\n file.write(name)", "def generate_random_data():\n size = convert_file_size(request.args.get(\"size\"))\n datagen, cache, fwriter, status = get_dependencies(\"dg\", \"ch\", \"fw\", \"st\")\n path = make_file_path()\n report = DataReport()\n data_stream = datagen.generate_randoms(hook=report.update, suffix=\", \")\n\n def write_file_async():\n nonlocal fwriter, path, data_stream, size, report, status\n file_name = get_file_name(path)\n status.update_status(file_name, \"WAITING\")\n fwriter.write(path, data_stream, max_size=size)\n cache.save_data(file_name, report.dict())\n status.update_status(file_name, \"FINISH\")\n\n Thread(target=write_file_async).start()\n return GenDataAPIResponse(path=path, size=size).json()", "def fillFile(self, len=100):\n if not os.path.exists(TESTFILES):\n os.mkdir(TESTFILES)\n fd, path = tempfile.mkstemp(dir=TESTFILES)\n self.path = path\n os.write(fd, os.urandom(len))\n os.close(fd)", "def create_room(self):\r\n room_number = randrange(1, AMOUNT) # random number\r\n if isFile(FILENAME): # file Exist\r\n with open(FILENAME, 'rt') as f: # for opening `FILENAME`\r\n for line in f.readlines(): # assign line to lines of the file\r\n _, _, id_number, _, roomnum = line.split(\r\n ' ') # get information from line of file\r\n roomnum = roomnum[-1] # bray pak kardan \"\\n\"\r\n # check if room number of this line from file equal to ranrange(1, ROOM_AMOUNT)\r\n if roomnum == room_number:\r\n self.create_number() # go back to start method `Recursion`\r\n return room_number", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def create_attack_files(source_directory, files_amount, route_name):\n\n for index in range(files_amount):\n create_directories(f'{source_directory}/{route_name}')\n for attack in ['Constant Attack', 'Height Attack', 'Velocity Attack', 'Mixed Attack']:\n create_directories(f'{source_directory}/{route_name}/{attack}')", "def _gen_test_file(self, range_num=1000000):\n file_path_name = self._hd_path + '/' + self._file_name\n try:\n os.remove(file_path_name)\n except OSError: #Não existia o arquivo\n pass\n #Gerando um arquivo.\n test_file = open(file_path_name, 'w')\n rand = int(random.random()*range_num)\n test_file.write(str(range(rand, rand + range_num)))\n test_file.close()\n return self._get_checksum()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests API call to fetch multiple NS descriptor resources
def test_get_ns_descriptors(get_ns_descriptors_keys): sonata_nsd = SONATAClient.Nsd(HOST_URL) sonata_auth = SONATAClient.Auth(HOST_URL) _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD)) _token = json.loads(_token["data"]) response = json.loads(sonata_nsd.get_ns_descriptors( token=_token["token"]["access_token"], limit=1000)) response = json.loads(response["data"]) assert isinstance(response, list) if len(response) > 0: assert set(get_ns_descriptors_keys).issubset( response[0].keys()), "All keys should be in the response"
[ "def get_many_descriptors(self, uuids):", "def test_get_list(self):\n for dataset_attr in self.dataset_attrs:\n self.datasets.append(create_external_dataset(**dataset_attr))\n self.story.datasets.add(self.datasets[0], self.datasets[1])\n self.story.save()\n self.assertEqual(len(self.story.datasets.all()), 2)\n uri = '/api/0.1/datasets/stories/%s/' % (self.story.story_id)\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n for resp_obj in self.deserialize(resp)['objects']:\n attrs = self.filter_dict(self.dataset_attrs, 'title',\n resp_obj['title'])[0]\n for key, value in attrs.items():\n if key != 'owner':\n self.assertEqual(resp_obj[key], value)", "def test_multiple_gets(uris):\n\n for uri in uris:\n print('='*10 + ' Try uri : {uri} '.format(uri=uri) + '='*10)\n resp = get_api_url(uri)\n print(resp)\n try:\n pprint(resp.json())\n except Exception as e:\n print(resp.text)", "def test_get_ns_descriptors_nsdinfoid():\r\n sonata_nsd = SONATAClient.Nsd(HOST_URL)\r\n sonata_auth = SONATAClient.Auth(HOST_URL)\r\n _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))\r\n _token = json.loads(_token[\"data\"])\r\n _nsd_list = json.loads(sonata_nsd.get_ns_descriptors(\r\n token=_token[\"token\"][\"access_token\"]))\r\n _nsd_list = json.loads(_nsd_list[\"data\"])\r\n Helpers._upload_test_nsd(_token=_token[\"token\"][\"access_token\"])\r\n\r\n for _n in _nsd_list:\r\n if \"sonata-demo\" == _n['nsd']['name']:\r\n _nsd = _n['uuid']\r\n\r\n response = json.loads(sonata_nsd.get_ns_descriptors_nsdinfoid(\r\n token=_token[\"token\"][\"access_token\"], nsdinfoid=_nsd))\r\n\r\n Helpers._delete_test_nsd(_token=_token[\"token\"][\"access_token\"])\r\n if response[\"error\"]:\r\n return True\r\n else:\r\n return False", "def test_get_api_resources(self):\n pass", "def test_v1get_api_sub_resources(self):\n pass", "def test_discovery_apis_get(self):\n pass", "def test_discovery_oauthresources_get(self):\n pass", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def test_get_list_own(self):\n self.dataset_attrs[0]['status'] = 'draft'\n self.dataset_attrs[2]['status'] = 'draft'\n self.dataset_attrs[2]['owner'] = self.user2 \n for dataset_attr in self.dataset_attrs:\n self.datasets.append(create_external_dataset(**dataset_attr))\n self.story.datasets.add(self.datasets[0], self.datasets[1])\n self.story.save()\n self.api_client.client.login(username=self.username,\n password=self.password)\n uri = '/api/0.1/datasets/stories/%s/' % (self.story.story_id)\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n for resp_obj in self.deserialize(resp)['objects']:\n attrs = self.filter_dict(self.dataset_attrs, 'title',\n resp_obj['title'])[0]\n for key, value in attrs.items():\n if key != 'owner':\n self.assertEqual(resp_obj[key], value)\n self.assertEqual(len(self.filter_dict(\n self.deserialize(resp)['objects'],\n 'title', \"Illinois Neighborhood Boundaries\")), 0)", "def test_v1alpha3get_api_sub_resources(self):\n pass", "async def test_1() -> None:\n LOG.debug(\"Test info endpoint\")\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://localhost:5050/\") as resp:\n data = await resp.json()\n if \"datasets\" in data and len(data[\"datasets\"]) > 0:\n for data_ids in data[\"datasets\"]:\n # In info endpoint we get all dataset ids be them PUBLIC, REGISTERED or CONTROLLED\n assert data_ids[\"id\"] in DATASET_IDS_LIST, \"Dataset ID Error or not in list.\"\n else:\n sys.exit(\"Info Endpoint Error!\")", "def test_discovery_swagger_apis_get(self):\n pass", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def test_get_cloud_resources(self):\n pass", "def test_retrieve_list(self):\n pass", "def test_nic_list(self):\n\n partition1 = self.urihandler.get(self.hmc, '/api/partitions/1', True)\n\n # the function to be tested:\n nic_uris = partition1.get('nic-uris', [])\n\n exp_nic_uris = [\n '/api/partitions/1/nics/1',\n ]\n assert nic_uris == exp_nic_uris", "def test_get_resource(self):\n for i in range(11):\n self.app.post(f'/v1/resource/{ResourceTypeName.get()}', data=json.dumps({'actions': ['tr:action1']}),\n headers=admin_headers)\n self._test_paging('/v1/resources', admin_headers, 10, 'resources')", "async def test_datasets_access_call_multiple(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},\n {'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id']))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests API call to read information about an NS descriptor resources
def test_get_ns_descriptors_nsdinfoid(): sonata_nsd = SONATAClient.Nsd(HOST_URL) sonata_auth = SONATAClient.Auth(HOST_URL) _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD)) _token = json.loads(_token["data"]) _nsd_list = json.loads(sonata_nsd.get_ns_descriptors( token=_token["token"]["access_token"])) _nsd_list = json.loads(_nsd_list["data"]) Helpers._upload_test_nsd(_token=_token["token"]["access_token"]) for _n in _nsd_list: if "sonata-demo" == _n['nsd']['name']: _nsd = _n['uuid'] response = json.loads(sonata_nsd.get_ns_descriptors_nsdinfoid( token=_token["token"]["access_token"], nsdinfoid=_nsd)) Helpers._delete_test_nsd(_token=_token["token"]["access_token"]) if response["error"]: return True else: return False
[ "def test_get_ns_descriptors(get_ns_descriptors_keys):\r\n sonata_nsd = SONATAClient.Nsd(HOST_URL)\r\n sonata_auth = SONATAClient.Auth(HOST_URL)\r\n _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD))\r\n _token = json.loads(_token[\"data\"])\r\n\r\n response = json.loads(sonata_nsd.get_ns_descriptors(\r\n token=_token[\"token\"][\"access_token\"], limit=1000))\r\n response = json.loads(response[\"data\"])\r\n\r\n assert isinstance(response, list)\r\n if len(response) > 0:\r\n assert set(get_ns_descriptors_keys).issubset(\r\n response[0].keys()), \"All keys should be in the response\"", "def test_discovery_apis_get(self):\n pass", "def test_get_info(self):\n pass", "def test_get_access_resource(self):\n pass", "def test_get_call_args(self):\n from puresnmp.x690.types import Integer, OctetString, Sequence, ObjectIdentifier\n from puresnmp.pdu import GetRequest\n from puresnmp.const import Version\n data = readbytes('get_sysdescr_01.hex') # any dump would do\n packet = Sequence(\n Integer(Version.V2C),\n OctetString('public'),\n GetRequest(0, ObjectIdentifier(1, 2, 3))\n )\n with patch('puresnmp.send') as mck, patch('puresnmp.get_request_id') as mck2:\n mck2.return_value = 0\n mck.return_value = data\n get('::1', 'public', '1.2.3')\n mck.assert_called_with('::1', 161, bytes(packet))", "def test_inheritedDescriptors(self):\n sddaemon = self.getDaemon(7, 3)\n self.assertEqual([7, 8, 9], sddaemon.inheritedDescriptors())", "def test_get_disc_usage(self):\n res = self.api.GetDiscUsage()\n res = res.to_dict()\n\n self.assertTrue('Code' in res)\n self.assertTrue('Status' in res)\n self.assertTrue('DiscUsage' in res)\n\n self.assertTrue(isinstance(res['Code'], int))\n self.assertTrue(isinstance(res['Status'], (str, unicode)))\n self.assertTrue(isinstance(res['DiscUsage'], dict))\n\n self.assertTrue('TotalSize' in res['DiscUsage'])\n self.assertTrue('UsedSize' in res['DiscUsage'])\n\n self.assertTrue(isinstance(res['DiscUsage']['TotalSize'], (int, float, long)))\n self.assertTrue(isinstance(res['DiscUsage']['UsedSize'], (int, float, long)))\n\n self.assertEqual(res['Code'], 200)\n self.assertEqual(res['Status'].upper(), \"OK\")\n print(res)", "async def test_get_ac_descr(test_db):\n resp = await test_db.get_ac_descr(\"NC_000007.13\")\n assert resp is not None\n\n resp = await test_db.get_ac_descr(\"NC_000007.14\")\n assert resp is None", "def test_get_api_resources(self):\n pass", "def test_discovery_oauthresources_get(self):\n pass", "def test_discovery_swagger_apis_get(self):\n pass", "def test_get_serviceinfo():\n global DEVICE_OBJECT\n dobj = DEVICE_OBJECT\n services = dobj.get_services()\n\n svc_spec = 'urn:schemas-upnp-org:service:AVTransport:1'\n assert svc_spec in services\n\n actions = dobj.get_actions(svc_spec)\n assert 'Play' in actions\n\n a_params = dobj.get_action_parameters(svc_spec, 'Play')\n assert 'InstanceID' in a_params\n\n a_pinfo = dobj.get_parameter_info(svc_spec, 'Play', 'InstanceID')\n assert 'direction' in a_pinfo.keys()", "def test_get_disc_usage(self):\n response = self.api.get_disc_usage()\n self.assertTrue(response)", "def describe(ctx):", "def test_get_resource(app): # pylint: disable=redefined-outer-name\n response = app.get('/artist/1')\n\n assert response.status_code == 200\n resource = json.loads(response.get_data(as_text=True))\n assert resource['Name'] == 'AC/DC'", "def test_get_api_resources_kubevirt_io_v1(self):\n pass", "def GetResourceSample():\n client = CreateClient()\n for e1 in client.GetResources(limit=5).entry:\n e2 = client.GetResource(e1)\n print 'Refetched: ', e2.title.text, e2.resource_id.text", "def test_nic_list(self):\n\n partition1 = self.urihandler.get(self.hmc, '/api/partitions/1', True)\n\n # the function to be tested:\n nic_uris = partition1.get('nic-uris', [])\n\n exp_nic_uris = [\n '/api/partitions/1/nics/1',\n ]\n assert nic_uris == exp_nic_uris", "def test_get_md_description_parameters(self):\r\n cl_get_service = self.sdc_client.client('Get')\r\n message_data = cl_get_service.get_md_description(['not_existing_handle'])\r\n node = message_data.p_msg.msg_node\r\n print(etree_.tostring(node, pretty_print=True))\r\n descriptors = list(node[0]) # that is /m:GetMdDescriptionResponse/m:MdDescription/*\r\n self.assertEqual(len(descriptors), 0)\r\n existing_handle = '0x34F05500'\r\n message_data = cl_get_service.get_md_description([existing_handle])\r\n node = message_data.p_msg.msg_node\r\n self.assertTrue(existing_handle.encode('utf-8') in message_data.p_msg.raw_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests API call to delete NS descriptor resources
def test_delete_ns_descriptors_nsdinfoid(delete_ns_descriptors_nsdinfoid_keys): sonata_vnfpkgm = SONATAClient.VnfPkgm(HOST_URL) sonata_nsd = SONATAClient.Nsd(HOST_URL) sonata_auth = SONATAClient.Auth(HOST_URL) _token = json.loads(sonata_auth.auth(username=USERNAME, password=PASSWORD)) _token = json.loads(_token["data"]) _nsd_list = json.loads(sonata_nsd.get_ns_descriptors( token=_token["token"]["access_token"])) _nsd_list = json.loads(_nsd_list["data"]) _nsd = None for _n in _nsd_list: if "sonata-demo" == _n['nsd']['name']: _nsd = _n['uuid'] time.sleep(10) # Wait for NSD onboarding response = json.loads(sonata_nsd.delete_ns_descriptors_nsdinfoid( token=_token["token"]["access_token"], nsdinfoid=_nsd)) assert isinstance(response, dict) assert response["data"] == "{\"error\":\"The NSD ID None does not exist\"}" time.sleep(2) #Wait for NSD onboarding _vnfd_list = json.loads(sonata_vnfpkgm.get_vnf_packages( token=_token["token"]["access_token"])) _vnfd_list = json.loads(_vnfd_list["data"]) _vnfd = None for _v in _vnfd_list: if "vnfd_example" == _v['uuid']: _vnfd = _v['uuid'] response = None if _vnfd: response = json.loads(sonata_vnfpkgm.delete_vnf_packages_vnfpkgid( token=_token["token"]["access_token"], vnfPkgId=_vnfd)) assert isinstance(response, dict) assert response["data"] == ""
[ "def test_delete_descritors(self, nsd_proxy, vnfd_proxy):\n nsds = nsd_proxy.get(\"/rw-project:project[rw-project:name='default']/nsd-catalog/nsd\", list_obj=True)\n for nsd in nsds.nsd:\n xpath = \"/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]\".format(quoted_key(nsd.id))\n nsd_proxy.delete_config(xpath)\n\n nsds = nsd_proxy.get(\"/rw-project:project[rw-project:name='default']/nsd-catalog/nsd\", list_obj=True)\n assert nsds is None or len(nsds.nsd) == 0\n\n vnfds = vnfd_proxy.get(\"/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd\", list_obj=True)\n for vnfd_record in vnfds.vnfd:\n xpath = \"/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]\".format(quoted_key(vnfd_record.id))\n vnfd_proxy.delete_config(xpath)\n\n vnfds = vnfd_proxy.get(\"/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd\", list_obj=True)\n assert vnfds is None or len(vnfds.vnfd) == 0", "def test_delete_access_resource(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_delete_on_background_response_descriptor_projects_release_release_resource(self):\n pass", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_namespaced_ingress(self):\n pass", "def test_delete_on_background_response_descriptor_projects_project_trigger_project_trigger_project_trigger_resource_spaces(self):\n pass", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def test_delete_service(self):\n query_string = [('name', 'name_example'),\n ('httpMethod', 'httpMethod_example')]\n response = self.client.open(\n '/api/webservices',\n method='DELETE',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_delete_collection_namespaced_ingress(self):\n pass", "def test_delete_api_resource(self, mock_delete: Mock, mock_set_token: Mock) -> None:\n exonet_client = ExonetClient(\"kaSD0ffAD1ldSA92A0KODkaksda02KDAK\")\n exonet_client.delete_api_resource(\n ApiResource({\"type\": \"dns_records\", \"id\": \"qjJWA0Km8xgw\"})\n )\n\n # Check mock calls.\n assert mock_delete.call_count == 1\n assert mock_set_token.call_count == 1\n\n # Check call args.\n assert mock_set_token.call_args[0][0] == \"kaSD0ffAD1ldSA92A0KODkaksda02KDAK\"", "def test_delete_DNE(self):\n response = getWithReport('delete-supplement',self,{'assessIR':4242,'pk':8842},\"\")\n self.assertEquals(response.status_code,404)", "def test_delete_device(self):\n pass", "def test_delete_canary_config_using_delete(self):\n pass", "def test_mdt_z_proper_removing(self):\n url = reverse('api_mdt')\n response = self.client.get(url, {\"docrule_id\": \"10000\"})\n data = json.loads(str(response.content))\n for key, value in data.iteritems():\n mdt_id = data[key][\"mdt_id\"]\n response = self.client.delete(\n url,\n json.dumps({\"mdt_id\": mdt_id}),\n content_type='application/x-www-form-urlencoded',\n )\n self.assertEqual(response.status_code, 204)", "def test_lsd_delete_verify(self):\n\n new_ldap_srv_def_input = {\n 'name': 'ldap_srv_def_X',\n 'description': 'LDAP Srv Def #X',\n }\n\n # Create the LDAP Srv Def\n resp = self.urihandler.post(\n self.hmc, '/api/console/ldap-server-definitions',\n new_ldap_srv_def_input, True, True)\n\n new_ldap_srv_def_uri = resp['element-uri']\n\n # Verify that it exists\n self.urihandler.get(self.hmc, new_ldap_srv_def_uri, True)\n\n # the function to be tested:\n self.urihandler.delete(self.hmc, new_ldap_srv_def_uri, True)\n\n # Verify that it has been deleted\n with pytest.raises(InvalidResourceError):\n self.urihandler.get(self.hmc, new_ldap_srv_def_uri, True)", "def test_delete(self):\n return self._test(\n self.proto.delete(b\"bar\"), b\"delete bar\\r\\n\", b\"DELETED\\r\\n\", True\n )", "def delete_resources():\n #\n # Create all clients with an Application (service principal) token provider\n #\n print('Grabbing Credentials!')\n credentials, subscription_id = get_credentials()\n resource_client = ResourceManagementClient(credentials, subscription_id)\n\n resource_list = []\n\n try:\n\n test = resource_client.resources.list_by_resource_group(GROUP_NAME)\n\n for item in test:\n temp_dict = {}\n test_item_type = item.type\n test_item_provider = test_item_type.split('/')\n temp_dict['provider'] = test_item_provider[0]\n temp_dict['parent'] = test_item_provider[1]\n temp_dict['name'] = item.name\n temp_dict['id'] = item.id\n resource_list.append(temp_dict)\n\n #print(resource_list)\n print('\\n')\n\n for i, item in enumerate(resource_list):\n if item['parent'] == 'virtualMachines':\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n COMPUTE_API_VERSION)\n async_delete_item.wait()\n print('VM Deleted: ' + item['name'])\n del resource_list[i]\n\n for i, item in enumerate(resource_list):\n if item['parent'] == 'networkInterfaces':\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n NETWORK_API_VERSION)\n async_delete_item.wait()\n print('Network Interface Deleted: ' + item['name'])\n del resource_list[i]\n\n for i, item in enumerate(resource_list):\n if item['parent'] == 'virtualNetworks':\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n NETWORK_API_VERSION)\n async_delete_item.wait()\n print('Network Deleted: ' + item['name'])\n del resource_list[i]\n\n for i, item in enumerate(resource_list):\n if item['parent'] == 'publicIPAddresses':\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n NETWORK_API_VERSION)\n async_delete_item.wait()\n print('Public IP Deleted: ' + item['name'])\n del resource_list[i]\n\n for i, item in enumerate(resource_list):\n if item['parent'] == 'disks':\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n COMPUTE_API_VERSION)\n async_delete_item.wait()\n print('Disk Deleted: ' + item['name'])\n del resource_list[i]\n\n for i, item in enumerate(resource_list):\n if item['parent'] == 'storageAccounts':\n #DO NOT DELETE STORAGE ACCOUNT...TAKES FOREVER TO RECREATE\n #async_delete_item = resource_client.resources.delete_by_id(item['id'],\n # STORAGE_API_VERSION)\n #async_delete_item.wait()\n print('DO NOT DELETE STORAGE ACCOUNT...TAKES FOREVER TO RECREATE...')\n print('Storage Account NOT Deleted: ' + item['name'])\n del resource_list[i]\n\n for item in resource_list:\n #\n if item['provider'] == \"Microsoft.Network\":\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n NETWORK_API_VERSION)\n async_delete_item.wait()\n else:\n async_delete_item = resource_client.resources.delete_by_id(item['id'], \\\n COMPUTE_API_VERSION)\n async_delete_item.wait()\n print('Item Deleted: ' + item['name'])\n\n except CloudError:\n print('A VM operation failed:', traceback.format_exc(), sep='\\n')\n else:\n print('All operations completed successfully!')", "def test_delete_metadata(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns halo (row of data) given a ``nodeIndex``
def get_halo(self, index): try: halo = self.data.loc[index] except KeyError: raise IndexError( "Halo id %d not found in %s" % (index, self.filename) ) return halo
[ "def halo_host(self, index):\n halo = self.get_halo(index)\n return (\n halo\n if halo.name == halo[\"hostIndex\"]\n else self.halo_host(self.get_halo(halo[\"hostIndex\"]).name)\n )", "def __get_node_by_index(root, index):\n if isinstance(root, TreePlanBinaryNode):\n node = TreePlanBuilder.__get_node_by_index(root.left_child, index)\n if node is None:\n node = TreePlanBuilder.__get_node_by_index(root.right_child, index)\n return node\n elif isinstance(root, TreePlanUnaryNode):\n return TreePlanBuilder.__get_node_by_index(root.child, index)\n elif isinstance(root, TreePlanNestedNode):\n return root if root.nested_event_index == index else None\n elif isinstance(root, TreePlanLeafNode):\n return root if root.event_index == index else None\n else:\n raise Exception(\"Illegal Root\")", "def row(self, index):\n return self.data[index - 1]", "def get_node_by_index(self, index):\n\n current = self.head\n i = 0\n\n while (current is not None):\n if i == index:\n return current.data\n else:\n current = current.next\n i += 1\n\n return \"Index not found\"", "def __getnode(self, index):\n if (index >= self.size) or (abs(index) > self.size):\n raise IndexError(\"Index out of bounds\")\n elif index >= 0:\n to_go = index\n node = self.sentinel.next\n while to_go > 0:\n node = node.next\n to_go -= 1\n return node\n\n elif index < 0:\n to_go = abs(index)\n node = self.sentinel\n while to_go > 0:\n node = node.prev\n to_go -= 1\n return node", "def nthnode(self, nl_p=None, index=0):\n # TODO: create a method called nthnodename\n if not nl_p:\n nl_p = self.getnetnodes()\n # (const nodelist_bn* nodes, int index)\n cnetica.NthNode_bn.argtypes = [c_void_p, c_int]\n cnetica.NthNode_bn.restype = c_void_p\n return cnetica.NthNode_bn(nl_p, index) # node_p", "def get_node(self, index):\n return self.__nodes[index]", "def right_child(self, index):\n return 2 * index + 2", "def _get_node_at(self, index):\n assert isinstance(index, int)\n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n return node", "def get_index(self, index):\n return self.get_node_from_index(index).data", "def get(self, index):\n return self._get_node(index)", "def getNeuron(self, index):\n\t\treturn self.loader.getNeuron(index)", "def _get_node_by_index(self, node, i):\n if not node:\n return\n left_size = self._get_size(node.left)\n\n if left_size == i:\n return node\n\n if i < left_size:\n return self._get_node_by_index(node.left, i)\n\n return self._get_node_by_index(node.right, i-left_size-1)", "def subtree_at_index(node, index):\n\n if index == 0:\n return node\n\n # Subtract 1 for the current node\n index -= 1\n\n # Go through each child of the node, and find the one that contains this index\n for child in node.children:\n child_size = child.size_of_subtree()\n if index < child_size:\n return subtree_at_index(child, index)\n index -= child_size\n\n return \"INDEX {} OUT OF BOUNDS\".format(index)", "def TestWay(index):\n if index < -len(data) or index > len(data) - 1:\n return None\n return data[index]", "def left_child(self, index):\n return 2 * index + 1", "def get_children(self, index):\r\n first_child = index * self._n + 1\r\n return range(first_child, min(first_child + self._n, len(self._hl)))", "def get_row(self, node):\n row = self.model.get_path(node)\n return row[0]", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds indices of all progenitors of a halo, recursively.
def halo_progenitor_ids(self, index): _progenitors = [] def rec(i): _progenitor_ids = self.data[self.data["descendantHost"] == i][ "hostIndex" ].unique() logging.debug("Progenitors recursion: %d > %d (%d progenitors)", index, i, len(_progenitor_ids)) if len(_progenitor_ids) == 0: return for _progenitor_id in _progenitor_ids: # if _progenitor_id not in _progenitors: # TODO: this only eliminates fly-byes _progenitors.append(_progenitor_id) rec(_progenitor_id) rec(index) logging.info( "%d progenitors found for halo %d", len(_progenitors), index ) return _progenitors
[ "def halo_progenitor_ids(self, index):\n _progenitors = []\n\n def rec(i):\n _progenitor_ids = self.data[self.data[\"descendantHost\"] == i][\n \"hostIndex\"\n ].unique()\n logging.debug(\n \"Progenitors recursion: %d > %d (%d progenitors)\",\n index,\n i,\n len(_progenitor_ids),\n )\n if len(_progenitor_ids) == 0:\n return\n for _progenitor_id in _progenitor_ids:\n # TODO: this only eliminates fly-bys:\n # if _progenitor_id not in _progenitors:\n _progenitors.append(_progenitor_id)\n rec(_progenitor_id)\n\n rec(index)\n\n logging.info(\n \"%d progenitors found for halo %d\", len(_progenitors), index\n )\n return _progenitors", "def get_pt_id_targets(part, h, halo_inds, r_frac=1):\n assert(np.size(halo_inds) > 0)\n buffer_frac = 1.3 * np.sqrt(r_frac)\n # search radii larger than r_vir will result in more particles.\n npart_tot_guess = np.math.ceil(sum(h.data.np[halo_inds]) * buffer_frac)\n idlist = np.zeros(npart_tot_guess, dtype=np.int32)\n x = part.x\n y = part.y\n z = part.z\n print(\"Expected # of particles:\", npart_tot_guess)\n nhalo = len(halo_inds)\n halo_range_list = np.zeros((nhalo) + 1, dtype=np.int32)\n len_idlist = len(idlist)\n for i, ihalo in enumerate(halo_inds):\n print(\"# part in this halo\", h.data.np[ihalo])\n xr = [h.data.x[ihalo] - h.data.rvir[ihalo] * r_frac,\n h.data.x[ihalo] + h.data.rvir[ihalo] * r_frac]\n yr = [h.data.y[ihalo] - h.data.rvir[ihalo] * r_frac,\n h.data.y[ihalo] + h.data.rvir[ihalo] * r_frac]\n zr = [h.data.z[ihalo] - h.data.rvir[ihalo] * r_frac,\n h.data.z[ihalo] + h.data.rvir[ihalo] * r_frac]\n ind_x = np.where( (x > xr[0]) & (x < xr[1]))[0]\n # out of 8.1GB particle information()\n if len(ind_x) > 0:\n ind_y = np.where((y[ind_x] > yr[0]) & (y[ind_x] < yr[1]))[0]\n if len(ind_y) > 0:\n ind_z = np.where((z[ind_x[ind_y]] > zr[0]) & (z[ind_x[ind_y]] < zr[1]))[0]\n # If array is not large enough, append it.\n halo_range_list[i + 1] = halo_range_list[i] + len(ind_z)\n if halo_range_list[i + 1] > len_idlist:\n if i + 1 == nhalo:\n # If it's the last halo, append by exact difference.\n npart_more = halo_range_list[i + 1] - len_idlist\n else:\n # Otherwise, guess from previous halos.\n # 1.5 * mean npart so far * number of remaining halos\n npart_more = int(1.5 * (halo_range_list[i] / (i + 1)) * (nhalo - i))\n print(\"increase the array size by {:d} from {:d}\".format(npart_more, len_idlist))\n idlist = np.append(idlist, np.zeros(npart_more, dtype=np.int32))\n len_idlist= len(idlist)\n\n idlist[halo_range_list[i]:halo_range_list[i+1]] = part.id[ind_x[ind_y[ind_z]]]\n print(halo_range_list[i+1], len(idlist))\n \n return halo_range_list, idlist", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def neighborhood(G,n,o):\n base = G[n]\n neighbors = {}\n neighbors[n] = 0\n newNodes = set(neighbors.keys())\n for i in range(1,o+1):\n #for node in neighbors.keys():\n nodes = newNodes.copy()\n newNodes = set()\n for node in nodes:\n branch = G[node]\n for node in branch:\n if node not in neighbors:\n newNodes.add(node)\n neighbors[node]=i\n return neighbors", "def iter_all_orbit_indices(self,**kwargs):\n\t\tfor i in range(self.n_orbits):\n\t\t\tfirst_north = np.flatnonzero(self.oi==i)[0]\n\t\t\tfirst_south = np.flatnonzero(self.oi==-1*i)[0]\n\t\t\tif first_north < first_south:\n\t\t\t\themi_order = ['N','S']\n\t\t\telse:\n\t\t\t\themi_order = ['S','N']\n\t\t\t\t\n\t\t\tfor hemi in hemi_order:\n\t\t\t\tyield i,hemi", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def iter_all_hypo_isomorphic(hypo_indicator, nhypo):\n hypo_ind = [i for i in range(nhypo)]\n for permuted in uperm(hypo_ind):\n perm_hypo_indicator = []\n for li in hypo_indicator:\n if len(li) >= 1:\n perm_li = [permuted[v] for v in li]\n perm_hypo_indicator.append(sorted(perm_li))\n elif len(li) == 0:\n perm_hypo_indicator.append(li)\n yield perm_hypo_indicator", "def h_index(self):\n\n def hindex(ls):\n if ls:\n ls.sort() # 排序算法 最耗时的部分\n h = 1 if ls[-1] > 0 else 0\n small = ls[-1]\n for i in ls[-2::-1]:\n if i == small and i > h:\n h += 1\n elif i > h:\n h += 1\n small = i\n else:\n break\n return h\n else:\n return 0\n\n try:\n degrees = dict(self.in_degree() if self.G.is_directed() else self.degree())\n neighbors = self.in_neighbor() if self.G.is_directed() else self.neighbor()\n h_index = {node: hindex([degrees[i] for i in neighbors[node]]) for node in self.G}\n return self.order_dict(h_index, index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))", "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def indices(self):", "def iter_hypo_indicator(nhypo, n_pattern, n_overlap):\n # for i, x in enumerate(iter_hypo_indicator(2,6,5)):\n # print(i, x)\n base_bag = [[]]\n base_count = 0\n additional_bag =[[]]\n additional_count = 0\n for hypo_base in pattern_hypo_product_space(nhypo, n_pattern):\n if hypo_indicator_filter(hypo_base, nhypo, base_bag):\n base_bag.append([])\n base_count += 1\n base_bag[base_count] = hypo_base\n # print(base_bag)\n for hypo_overlap in pattern_powerhypo_product_space(nhypo-1, n_pattern):\n if overlap_filter(hypo_overlap, n_overlap):\n hypo_overlap = remap_overlap_indicator(hypo_overlap, hypo_base, nhypo)\n hypo_indicator = concatenate_hypo_indicators(hypo_base, hypo_overlap)\n if not is_hypobag_isomorphic(additional_bag, hypo_indicator, nhypo):\n additional_bag.append([])\n additional_count += 1\n additional_bag[additional_count] = hypo_indicator\n # print(additional_bag)\n yield hypo_indicator", "def _natural_indices(self):\n r = self.num_rings\n if self.num_axial is None:\n for a in range(-r + 1, r):\n for x in range(-r + 1, r):\n idx = (x, a)\n if self.is_valid_index(idx):\n yield idx\n else:\n for z in range(self.num_axial):\n for a in range(-r + 1, r):\n for x in range(-r + 1, r):\n idx = (x, a, z)\n if self.is_valid_index(idx):\n yield idx", "def hierarchy_indices(query, divisors):\n indices = []\n for d in divisors:\n #print(\"{0} = {1} * {2} + {3} \".format(query, d, query//d, query%d))\n indices.append(query//d)\n query = query % d\n indices.append(0) \n return indices", "def query_halo(self, halo):\n ind = np.nonzero(self.haloes == halo)[0]\n return self.mass[ind], self.birth_times[ind], self.radii[ind]", "def childWellIndices(self):\n return self._wellIndices", "def get_indexes(prog):\n # 'Nice' one-liner that iterates through prog, reads each line and stores if it's jmp or nop command.\n return {idx:val for idx,val in enumerate(prog) if read_line(val)[0] in ['jmp','nop']}", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def build_hals_main_branch_indices(self, do_host=False):\n\n def get_mmp_index(my_id, previous_hal, mmp_prop='vel.circ.max'):\n # first get the indices in the previous catalog where the descendant is my ID\n if not len(previous_hal):\n return -2**31\n\n progenitor_indices = np.where(\n previous_hal.prop('descendant.id') == my_id)[0]\n if not progenitor_indices.size:\n return -2**31\n\n # then get the sorting values of the progenitor halos\n progenitor_mmp_prop = previous_hal.prop(\n mmp_prop, progenitor_indices)\n\n # then return the index of the one that's the biggest\n return progenitor_indices[np.argmax(progenitor_mmp_prop)]\n\n assert self.hals is not None\n if do_host == True or do_host == 'host' or do_host == 'host1':\n starting_index = self.hals[-1].prop('host.index')[0]\n store_name = 'host_hals_mb_indices'\n elif do_host == 'host2' or do_host == '2':\n starting_index = self.hals[-1].prop('host2.index')[0]\n store_name = 'host2_hals_mb_indices'\n else:\n assert self.index is not None\n starting_index = self.index\n store_name = 'hals_mb_indices'\n\n res = np.empty(len(self.hals), dtype=int)\n res.fill(-2**31)\n\n current_snapshot_index = len(self.hals) - 1\n my_index = starting_index\n while my_index >= 0:\n res[current_snapshot_index] = my_index\n\n my_id = self.hals[current_snapshot_index].prop('id', my_index)\n my_index = get_mmp_index(\n my_id, self.hals[current_snapshot_index-1])\n current_snapshot_index -= 1\n\n self.__dict__[store_name] = res\n return res", "def vitoria_1(tab,jog):\r\n for i in range(1,4):\r\n win = [(0,jog,jog), (jog,0,jog), (jog,jog,0)]\r\n coluna = obter_coluna(tab, i)\r\n linha = obter_linha(tab, i) \r\n if coluna in win:\r\n return i+3*win.index(coluna)\r\n elif linha in win:\r\n return 3*i-2+win.index(linha) \r\n if i!=3:\r\n diagonal = obter_diagonal(tab, i)\r\n if diagonal in win:\r\n if i==1:\r\n return i+4*win.index(diagonal)\r\n\r\n else:\r\n return 7-2*win.index(diagonal)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds host of halo. Recursively continues until hits the main halo, in case of multiply embedded subhaloes.
def halo_host(self, index): halo = self.get_halo(index) return ( halo if halo.name == halo["hostIndex"] else self.halo_host(self.get_halo(halo["hostIndex"]).name) )
[ "def extract_halos( bricks, grid, halo_type, sides='all' ):\n assert grid.halo_shape.any(), \"Grid has no halo!\"\n assert halo_type in ('outer', 'inner')\n assert sides in ('lower', 'upper', 'all')\n\n def zero_fill(vol, box, full_box):\n \"\"\"\n Given a volume, it's corresponding box, and a 'full box' that encompasses it,\n Return a volume that fills the full box, padding with zeros if necessary.\n \"\"\"\n if (box == full_box).all():\n return vol\n else:\n full_vol = np.zeros(full_box[1] - full_box[0], vol.dtype)\n overwrite_subvol(full_vol, box - full_box[0], vol)\n return full_vol\n\n def _extract_subbrick(brick, box):\n \"\"\"\n Given a brick and the box to extract from it,\n return a new Brick with the same logical_box as the original brick,\n but only containing the subvolume corresponding to the given box.\n\n If necessary, the returned subbrick will be zero-padded to full\n the entirety of the given box.\n \"\"\"\n box_clipped = box_intersection(box, brick.physical_box)\n if (box_clipped[1] - box_clipped[0] <= 0).any():\n return None\n\n subvol = extract_subvol(brick.volume, box_clipped - brick.physical_box[0])\n full_subvol = zero_fill(subvol, box_clipped, box)\n\n # FIXME: Should we bother with location_id?\n # (If we don't, realign operations won't work,\n # but it's not clear what that would mean for halos anyway)\n subbrick = Brick(brick.logical_box, box, full_subvol, compression=brick.compression)\n return subbrick\n\n def _extract_halo_sides(brick):\n \"\"\"\n For the given brick, extract a halo from each side.\n For example, if the brick is 3D and sides='all', extract 6 halos.\n If sides='lower', only extract 3 halos: from the lower Z-face, lower\n Y-face, and lower (left) X-face.\n\n Each halo will have have the same dimensions as the brick's logical box,\n except for the axis from which the halo was extracted.\n\n For example, if a Brick's logical box is ``[[10,20], [20,40]]`` and a physical\n box 1 pixel wider in each dimension, its 'outer' halos along the Y axis would have\n physical boxes of ``[[9,10], [20,40]]`` and ``[[10,11], [20,40]]``.\n\n If the brick's physical_box is smaller than its logical_box along any axis,\n the halos for that axis will be zero-padded to ensure that the halo dimensions\n always correspond to the brick's logical box.\n\n The resulting halos are returned as bricks whose logical_box matches the original\n brick (indicating where they came from), but whose physical boxes correspond to\n the exact region they occupy in space.\n \"\"\"\n halo_bricks = []\n for axis, halo in enumerate(grid.halo_shape):\n logical_lower, logical_upper = brick.logical_box[:,axis]\n\n if sides in ('lower', 'all'):\n lower_halo_box = brick.logical_box.copy()\n if halo_type == 'outer':\n lower_halo_box[:, axis] = (logical_lower - halo, logical_lower)\n else:\n lower_halo_box[:, axis] = (logical_lower, logical_lower + halo)\n\n lower_halo_brick = _extract_subbrick(brick, lower_halo_box)\n if lower_halo_brick is not None:\n halo_bricks.append(lower_halo_brick)\n\n if sides in ('upper', 'all'):\n upper_halo_box = brick.logical_box.copy()\n if halo_type == 'outer':\n upper_halo_box[:, axis] = (logical_upper, logical_upper + halo)\n else:\n upper_halo_box[:, axis] = (logical_upper - halo, logical_upper)\n\n upper_halo_brick = _extract_subbrick(brick, upper_halo_box)\n if upper_halo_brick is not None:\n halo_bricks.append(upper_halo_brick)\n\n return halo_bricks\n\n halo_bricks = bricks.map(_extract_halo_sides).flatten()\n return halo_bricks", "def updateSubhalos(host,file, host2sub):\n if not (host.ID in host2sub):\n return\n g = open(file,'r')\n for posn in host2sub[host.ID]:\n g.seek(posn)\n line = g.readline()\n sub = MTH.MTHalo(line)\n if sub.pid != host.ID:\n print 'WARNING: ERROR: halo not sub of host! Proceeding anyway'\n tree = MT.MergerTree(file,sub.ID)\n tree.haloList.append(sub)\n if sub.num_prog==0:\n tree.progenitors.append(sub)\n # Now deal with all other halos in the tree\n index = 1\n line = g.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = g.readline()\n index += 1\n host.subhalos.append(sub)\n g.close()", "def find_host_for_osd(osd, osd_status):\n\n for obj in osd_status['nodes']:\n if obj['type'] == 'host':\n if osd in obj['children']:\n return obj['name']\n\n return 'unknown'", "def updateSubhalos_old(host, file):\n f = open(file, 'r')\n line = f.readline()\n i = 0\n while line != '':\n if line[0:5] == \"#tree\":\n #if i%10000 == 0:\n #print 'subhalo finder scanned ', i, ' trees'\n i+=1\n num = int(line[6::])\n # Deal with a=0 halo independently\n line = f.readline()\n sub = MTH.MTHalo(line)\n if sub.pid == host.ID: # not upid. only subhalos, not subsub etc.\n #build tree, add to subhalo list of host\n tree = MT.MergerTree(file, num)\n tree.haloList.append(sub)\n if sub.num_prog ==0:\n tree.progenitors.append(sub)\n\n # Now deal with all other halos in the tree\n index = 1\n line = f.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = f.readline()\n index +=1\n # add a=1 subhalo to subhalo list of host (maybe should add tree?)\n host.subhalos.append(sub)\n else:\n line = f.readline()\n else:\n line = f.readline()\n f.close()", "def find_host(self, hostname):\n for host in self.net.hosts:\n if host.name == hostname:\n return host\n return None", "def host_network(self, host):\n for network in self.config['network']:\n if host in network['hosts']:\n return network['label']\n return None", "def build_hals_main_branch_indices(self, do_host=False):\n\n def get_mmp_index(my_id, previous_hal, mmp_prop='vel.circ.max'):\n # first get the indices in the previous catalog where the descendant is my ID\n if not len(previous_hal):\n return -2**31\n\n progenitor_indices = np.where(\n previous_hal.prop('descendant.id') == my_id)[0]\n if not progenitor_indices.size:\n return -2**31\n\n # then get the sorting values of the progenitor halos\n progenitor_mmp_prop = previous_hal.prop(\n mmp_prop, progenitor_indices)\n\n # then return the index of the one that's the biggest\n return progenitor_indices[np.argmax(progenitor_mmp_prop)]\n\n assert self.hals is not None\n if do_host == True or do_host == 'host' or do_host == 'host1':\n starting_index = self.hals[-1].prop('host.index')[0]\n store_name = 'host_hals_mb_indices'\n elif do_host == 'host2' or do_host == '2':\n starting_index = self.hals[-1].prop('host2.index')[0]\n store_name = 'host2_hals_mb_indices'\n else:\n assert self.index is not None\n starting_index = self.index\n store_name = 'hals_mb_indices'\n\n res = np.empty(len(self.hals), dtype=int)\n res.fill(-2**31)\n\n current_snapshot_index = len(self.hals) - 1\n my_index = starting_index\n while my_index >= 0:\n res[current_snapshot_index] = my_index\n\n my_id = self.hals[current_snapshot_index].prop('id', my_index)\n my_index = get_mmp_index(\n my_id, self.hals[current_snapshot_index-1])\n current_snapshot_index -= 1\n\n self.__dict__[store_name] = res\n return res", "def _find_host_id(self, object_):\n if object_.getParentType() == 'Host':\n return object_.getParent()", "def check_main_branches(df):\n if not df.scale.is_monotonic_decreasing:\n raise RuntimeError(\"`df.scale` is not descending.\")\n\n # First halo as no descendants and the last halo has no progenitor.\n desc_ids = df.desc_id.values[1:]\n halo_ids = df.id.values[:-1]\n # The desc_id of the last halo should be the next halo_id\n if np.all(desc_ids == halo_ids):\n return None\n else:\n # Return ID of the last correct halo\n return df.id.values[np.argmin(desc_ids == halo_ids)]", "def host_of_ob(self, obhost: ObservedHost) -> Host:\n for x in Host.objects():\n if x.fqdn == obhost.fqdn:\n return x\n raise Exception(\"Could not find Host\")", "def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i + 1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i + 1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host + 1].properties['children'].append(i + 1)\n except KeyError:\n pass", "def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i+1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i+1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host+1].properties['children'].append(i+1)\n except KeyError:\n pass", "def get_host_structure(impurity_workflow_or_calc):\n #TODO extract host parent no from input but take into account calculation of host GF from inside kkrimp full workflow\n print(\n f'This is line in the combine impurity tool files at:: /opt/aiida-kkr/aiida_kkr/tools for deburging the line',\n end=' '\n )\n print(f'impurity_workflow_or_calc: {impurity_workflow_or_calc}')\n if impurity_workflow_or_calc.process_class == KkrimpCalculation:\n host_parent = impurity_workflow_or_calc.inputs.host_Greenfunction_folder\n # Here 'impurity_workflow_or_calc.process_class== combine_imps_wc' occurs circular import with this present module\n elif impurity_workflow_or_calc.process_class.__name__ == 'combine_imps_wc':\n imp_sub_wc = impurity_workflow_or_calc.get_outgoing(node_class=kkr_imp_sub_wc).first().node\n kkr_imp_calc = imp_sub_wc.get_outgoing(node_class=KkrimpCalculation).all()[-1].node\n host_parent = kkr_imp_calc.inputs.host_Greenfunction_folder\n elif impurity_workflow_or_calc.process_class == kkr_imp_sub_wc:\n kkr_imp_calc = impurity_workflow_or_calc.get_outgoing(node_class=KkrimpCalculation).all()[-1].node\n host_parent = kkr_imp_calc.inputs.host_Greenfunction_folder\n elif 'remote_data' in impurity_workflow_or_calc.inputs:\n # this is the case if impurity_workflow_or_calc workflow is kkr_imp_sub\n host_parent = impurity_workflow_or_calc.inputs.remote_data\n elif 'remote_data_gf' in impurity_workflow_or_calc.inputs:\n host_parent = impurity_workflow_or_calc.inputs.remote_data_gf\n else:\n host_parent = impurity_workflow_or_calc.inputs.remote_data_host\n host_structure, _ = VoronoiCalculation.find_parent_structure(host_parent)\n\n return host_structure", "def _calculate_host_barycenter(\n option: ProductionOption,\n hierarchy: ProductionApplicationHierarchy\n ) -> (float, float):\n num_elements = 0\n x = 0\n y = 0\n for daughter_element in option.mapping.values():\n host_element = hierarchy.map(daughter_element, 'D', 'H')\n if isinstance(host_element, Vertex):\n num_elements += 1\n host_x = float(host_element.attr['x'])\n host_y = float(host_element.attr['y'])\n x += host_x\n y += host_y\n log.debug(f' Host vertex at position {(host_x, host_y)}.')\n elif isinstance(host_element, Edge):\n mother_element = hierarchy.map(daughter_element, 'D', 'M')\n if mother_element.vertex1 is not None:\n host_vertex1 = hierarchy.map(mother_element.vertex1, 'M', 'H')\n host_x = float(host_vertex1.attr[\"x\"])\n host_y = float(host_vertex1.attr[\"y\"])\n log.debug(f' Host vertex1 position: '\n f'{(host_x, host_y)} {host_vertex1}.')\n num_elements += 1\n x += host_x\n y += host_y\n if mother_element.vertex2 is not None:\n host_vertex2 = hierarchy.map(mother_element.vertex2, 'M', 'H')\n host_x = float(host_vertex2.attr[\"x\"])\n host_y = float(host_vertex2.attr[\"y\"])\n log.debug(f' Host vertex2 position: '\n f'{(host_x, host_y)} {host_vertex2}.')\n num_elements += 1\n x += host_x\n y += host_y\n #TODO: Handle the case without any vertices\n if num_elements == 0:\n for mother_element in option.mother_graph:\n host_element = hierarchy.map(mother_element, 'M', 'H')\n if isinstance(host_element, Vertex):\n num_elements += 1\n host_x = float(host_element.attr['x'])\n host_y = float(host_element.attr['y'])\n x += host_x\n y += host_y\n log.debug(f' Host vertex at position {(host_x, host_y)}.')\n elif isinstance(host_element, Edge):\n if host_element.vertex1 is not None:\n host_vertex1 = host_element.vertex1\n host_x = float(host_vertex1.attr[\"x\"])\n host_y = float(host_vertex1.attr[\"y\"])\n log.debug(f' Host vertex1 position: '\n f'{(host_x, host_y)} {host_vertex1}.')\n num_elements += 1\n x += host_x\n y += host_y\n if host_element.vertex2 is not None:\n host_vertex2 = host_element.vertex2\n host_x = float(host_vertex2.attr[\"x\"])\n host_y = float(host_vertex2.attr[\"y\"])\n log.debug(f' Host vertex2 position: '\n f'{(host_x, host_y)} {host_vertex2}.')\n num_elements += 1\n x += host_x\n y += host_y\n x /= num_elements\n y /= num_elements\n return x, y", "def find_sandwich_bottom(blk):\n # Always follow the main branch of a flow: the last connection.\n _blk = blk.connections[len(blk.connections) - 1]\n while _blk is not None:\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return None\n if _blk.name in COLLAPSIBLE:\n return _blk\n _blk = _blk.connections[len(_blk.connections) - 1]\n return None", "def plotHostSubHalos(pos_z_cen_halo, pos_z_sat_halo, pos_z_AGN): \n ra_cen, dec_cen = pos_z_cen_halo[0], pos_z_cen_halo[1]\n ra_sat, dec_sat = pos_z_sat_halo[0], pos_z_sat_halo[1]\n \n fig, ax = plt.subplots(1,1,figsize=(9,8)) \n # plotting host halos\n host_halos = ax.plot(ra_cen, dec_cen, '.', color= 'k', markersize=0.06, label=r'Host-halos $P_{id}=-1$', alpha=0.4)\n \n # plotting sat halos\n sat_halos = ax.plot(ra_sat, dec_sat, 'o', color='#07d9f5', markersize=0.07, label=r'Satellite halos $P_{id} \\neq -1$', alpha=0.7)\n \n # plotting AGNs\n agn = ax.plot(pos_z_AGN[0], pos_z_AGN[1], '*', color='#fff717', markersize=6.5, label=r'AGN', markeredgecolor='w', markeredgewidth=0.4)\n\n # labeling axes and defining limits\n xlim = [np.min(pos_z_AGN[0]), np.max(pos_z_AGN[0])]\n ylim = [np.min(pos_z_AGN[1]), np.max(pos_z_AGN[1])]\n setLabel(ax, 'R.A. (deg)', 'Dec (deg)', '', xlim, ylim, legend=True) \n \n print('AGNs: %d, Host (central) halos: %.2e, Sattelite halos: %.2e'%(len(pos_z_AGN[0]), len(ra_cen), len(ra_sat)))\n return", "def find_host(sources, initialGuess=(2090/2.0, 2108/2.0), searchRadius=200):\n #todo(this fails. It is not selecting the right thing at all.)\n # Where, in array `soucres`, are the objects close to initialGuess?\n\n #make a holding varriable\n centerIDs = []\n #loop through `sources` and save if its center is inside search area\n for i, x in enumerate(sources['x']):\n if ((x-initialGuess[0])**2 + (sources['y'][i]-initialGuess[1])**2) < searchRadius**2:\n centerIDs.append(i)\n \n #check to see if something is found, or else `np.argmax` fails badly\n if len(centerIDs) == 0:\n warnings.warn(\"This SN can't be found in initial search\")\n centerIDs = [] #just play it safe\n\n #increase search area till you find somthing. \n while len(centerIDs) == 0: \n #increase from 200 -> 500 for hst or propotinally\n searchRadius *= 2.5\n print('searchRadius: ', searchRadius)\n print('initialGuess: ', initialGuess)\n for i, x in enumerate(sources['x']):\n if ((x-initialGuess[0])**2 + (sources['y'][i]-initialGuess[1])**2) < searchRadius**2:\n centerIDs.append(i)\n if searchRadius > 1000:\n from sys import exit; exit('You fail and got a search radius of {}.'.format(searchRadius))\n \n # Select largest of the center objects, but save the ID\n #this selects the centerID associated with the max (in size) of the central cources\n idx = centerIDs[np.argmax(sources['npix'][centerIDs])]\n #todo(add error for too many found)\n #todo(implement a way to fail greacefully if argmax is empty)\n\n host = sources[['npix', 'x', 'y', 'a', 'b', 'theta']][idx]\n return host", "def hill_climbing_search(problem, h=None, display=False):\n\texpanded_nodes = 0\n\tfood_nodes = 0\n\th = memoize(h or problem.h, 'h')\n\tnode = Node(problem.initial)\n\tfinished = False\n\texplored = set()\n\ttam = 0\n\n\twhile not finished:\n\t\tif problem.goal_test(node.state):\n\t\t\tif display:\n\t\t\t\tprint(\"Goal found \", len(explored), \" paths have been expanded.\")\n\t\t\treturn node, expanded_nodes, food_nodes, tam\n\t\texpanded_nodes += 1\n\t\tfood_nodes += problem.check_food(node.state)\n\t\texplored.add(node.state)\n\n\t\tnext_node = node\n\t\tbest = []\n\t\tfor child in node.expand(problem):\n\t\t\tif h(child) < h(next_node):\n\t\t\t\tbest.clear()\n\t\t\t\tnext_node = child\n\t\t\t\tbest.append(child)\n\t\t\telif h(child) == h(next_node):\n\t\t\t\tbest.append(child)\n\t\t\t\n\t\tif tam < len(best):\n\t\t\ttam = len(best)\n\n\t\tif next_node == node:\n\t\t\tfinished = True\n\t\telse:\n\t\t\tnode = random.choice(best)\n\n\tif display:\n\t\tprint(\"Goal was not found, \", len(explored), \" paths have been expanded\")\n\treturn node, expanded_nodes, food_nodes, tam", "def find_attached_h(atom_id, rdmol, my_mol=None):\r\n # Get all the bonds\r\n bonds = rdmol.GetAtomWithIdx(atom_id).GetBonds()\r\n # List for the output to go\r\n attached_hs = []\r\n met_c = 0\r\n his_c = 0\r\n for bond in bonds:\r\n end_id = bond.GetEndAtomIdx()\r\n start_id = bond.GetBeginAtomIdx()\r\n # Check it's not the start atom\r\n if end_id != atom_id:\r\n if bond.GetEndAtom().GetSmarts() == \"[H]\":\r\n attached_hs.append(bond.GetEndAtom())\r\n elif bond.GetEndAtom().GetSmarts() == \"C\":\r\n met_c += 1\r\n elif bond.GetEndAtom().GetSmarts() == \"c\":\r\n his_c += 1\r\n elif start_id != atom_id:\r\n if bond.GetBeginAtom().GetSmarts() == \"[H]\":\r\n attached_hs.append(bond.GetBeginAtom())\r\n elif bond.GetBeginAtom().GetSmarts() == \"C\":\r\n met_c += 1\r\n elif bond.GetBeginAtom().GetSmarts() == \"c\":\r\n his_c += 1\r\n if attached_hs: \r\n return attached_hs\r\n elif met_c == 3:\r\n #print \"METHYLATED AMINE\"\r\n return \"METHYLATED\"\r\n elif his_c == 2:\r\n #print \"WRONG HISTIDINE\"\r\n return \"HISTIDINE\"\r\n elif my_mol:\r\n return my_mol\r\n else:\r\n print atom_id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds mass of central halo and all subhaloes.
def halo_mass(self, index): return self.data[self.data["hostIndex"] == index][ "particleNumber" ].sum()
[ "def get_halfmass_radius(halo):\n # generate a profile for the halo's stellar particles\n with pynbody.analysis.halo.center(halo, mode=\"pot\"):\n p = pynbody.analysis.profile.Profile(halo.s, nbins=1000, ndim=2)\n\n M_star = sum(halo.s[\"mass\"])\n\n # Find the radius at which half the stellar mass is contained\n # Interpolate between the point before we exceed half the mass and the point after\n for i, mass_enc in enumerate(p[\"mass_enc\"]):\n if mass_enc > 0.5 * M_star:\n return np.mean(p[\"rbins\"][i-1:i+1])", "def center_of_mass(entity, geometric=False):\n \n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Residues, added 2018-03-17 by Nathan\n elif hasattr(entity, '__iter__') and [x for x in entity if x.level == 'R']:\n atom_list = []\n for res in entity:\n atom_list.extend(list(res.get_atoms()))\n # List of Atoms\n elif hasattr(entity, '__iter__') and [x for x in entity if x.level == 'A']:\n atom_list = entity\n else: # Some other weirdo object\n raise ValueError(\"Center of Mass can only be calculated from the following objects:\\n\"\n \"Structure, Model, Chain, Residue, list of Atoms.\")\n \n masses = []\n positions = [ [], [], [] ] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n \n for atom in atom_list:\n masses.append(atom.mass)\n \n for i, coord in enumerate(np.array(atom.coord).tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if 'ukn' in set(masses) and not geometric:\n raise ValueError(\"Some Atoms don't have an element assigned.\\n\"\n \"Try adding them manually or calculate the geometrical center of mass instead.\")\n \n if geometric:\n return [sum(coord_list)/len(masses) for coord_list in positions]\n else: \n w_pos = [ [], [], [] ]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index]*atom_mass)\n w_pos[1].append(positions[1][atom_index]*atom_mass)\n w_pos[2].append(positions[2][atom_index]*atom_mass)\n\n return [sum(coord_list)/sum(masses) for coord_list in w_pos]", "def get_mass(H, a=0.5):\n\n\t# Computation of the asteroid diameter [m]\n\tD = 1000 * 1329 / np.sqrt(a) * 10 ** (- 0.2 * H)\n\n\t# Computation of the asteroids mass [kg] assuming a mean volumic mass\n\t# rho = 2600 kg/m^3 and a spherical shape for the body\n\tm = 4 / 3 * np.pi * (0.5 * D)**3 * 2600\n\n\treturn m", "def getHMMass(self):\n nucs = []\n for nucName in self.getNuclides():\n if nucDir.isHeavyMetal(nucName):\n nucs.append(nucName)\n mass = self.getMass(nucs)\n return mass", "def query_halo(self, halo):\n ind = np.nonzero(self.haloes == halo)[0]\n return self.mass[ind], self.birth_times[ind], self.radii[ind]", "def get_center_of_mass_allies(self,obs):", "def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]", "def get_halfmass_disc(halo):\n r = get_halfmass_radius(halo)\n cen = pynbody.analysis.halo.center(halo, mode=\"pot\", retcen=True)\n with pynbody.analysis.angmom.faceon(halo):\n return pynbody.filt.Disc(r, height=conf.SLICE_DISC_HEIGHT, cen=cen)", "def get_mass(self) -> float:\n mass = 0\n if hasattr(self, \"SOLUTEATOM\"):\n for i in self.SOLUTEATOM.content:\n mass += i.MASS\n return mass", "def center_of_mass(self):\n masses = cctk.OneIndexedArray([get_avg_mass(z) for z in self.atomic_numbers]).reshape(-1,1)\n return np.sum(masses * self.geometry, axis=0) / np.sum(masses)", "def gas_mass(cell, info):\n msun = 1.98892e33 # solar mass in gram.\n return (cell['rho'] * info.unit_d) * (cell['dx'] * info.unit_l)**3 / msun", "def get_center_of_mass_enemies(self,obs):", "def test_molar_mass():\n first = molar_mass({\"H\":2, \"O\":1})\n assert first == approx(18.01528)\n second = parse_formula(\"C6H6\")\n test2 = molar_mass(second)\n assert test2 == approx(78.11184)\n third = parse_formula(\"PO4H2(CH2)12CH3\")\n test3 = molar_mass(third)\n assert test3 == approx(280.34072)", "def get_mass(self, total=False):\n if self.n == 0:\n return 0.0\n\n grid_cid0 = self.grid.get_positions()\n p1 = grid_cid0[self.node_ids[:, 0]]\n p2 = grid_cid0[self.node_ids[:, 1]]\n L = p2 - p1\n rho = self.model.Materials.get_rho(self.material_id)\n mass = norm(L, axis=1) * self.A * rho + self.nsm\n if total:\n return mass.sum()\n else:\n return mass", "def getHMMoles(self):\n return (\n self.getHMDens()\n / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n * self.getVolume()\n * self.getSymmetryFactor()\n )", "def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass", "def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass", "def half_mass_radius(subhalo, catalogue, rad_key):\n temp = subhalo.copy(deep=True)\n temp.sort_values(by=\"r\", inplace=True) #sort particles by radius\n temp = temp.reset_index(drop=True)\n temp_mass = 0\n #Start adding the masses of all particles starting with smallest radius\n for j in range(len(temp[\"r\"])):\n #Check if total mass is less than half total particle mass\n if temp_mass < (catalogue[\"SubhaloMassStellar\" + rad_key][0]/2):\n temp_mass = temp_mass + temp[\"Masses\"][j] #Add mass of next particle\n else:\n #Add half mass radius.\n #Some uncertainty on calculation method here, now center of mass between particle j and j-i.\n m1 = temp[\"Masses\"][j-1]\n m2 = temp[\"Masses\"][j]\n M = m1+m2\n halfmass_rad = (m1*temp[\"r\"][j-1] + m2*temp[\"r\"][j])/M\n break #stop loop\n halfmass_rad = temp[\"r\"][j]\n catalogue[\"SubhaloHalfmassRadStellar\" + rad_key] = halfmass_rad #save to catalogue\n return catalogue", "def calculate_mass(self):\n sum_protein_mass = 0\n\n # Sum of proteins\n for p in self.protein_species:\n protein_name = '_'.join(p.split('_')[1:])\n protein_length = self.params['n_' + protein_name]\n\n protein_mass = protein_length * self.species_values[p]\n sum_protein_mass += protein_mass\n\n # Sum of mrna bound ribosome\n for c in self.c_mrna_species:\n sum_protein_mass += self.species_values[c] * self.params['n_r']\n \n return sum_protein_mass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates mass assembly history for a given halo. Treebased approach has been abandoned for performace reasons.
def collapsed_mass_history(self, index, nfw_f): logging.debug("Looking for halo %d", index) halo = self.get_halo(index) if halo["hostIndex"] != halo.name: raise ValueError("Not a host halo!") m_0 = self.halo_mass(index) progenitors = pd.concat( [ self.data.loc[index], self.data.loc[self.halo_progenitor_ids(index)], ] ) logging.debug( "Built progenitor sub-table for halo %d of mass %d with %d members", index, m_0, progenitors.size, ) progenitors = progenitors[progenitors["particleNumber"] > nfw_f * m_0] cmh = progenitors.groupby("snapshotNumber", as_index=False)[ "particleNumber" ].sum() cmh["nodeIndex"] = index logging.info( "Aggregated masses of %d valid progenitors of halo %d", progenitors.size, index, ) return cmh
[ "def collapsed_mass_history(self, index, nfw_f):\n\n logging.debug(\"Looking for halo %d\", index)\n halo = self.get_halo(index)\n if halo[\"hostIndex\"] != halo.name:\n raise ValueError(\"Not a host halo!\")\n m_0 = self.halo_mass(index)\n\n progenitors = pd.concat(\n [\n self.data.loc[index],\n self.data.loc[self.halo_progenitor_ids(index)],\n ]\n )\n logging.debug(\n \"built prog sub-table [%d] (m=%d, %d progs)\",\n index,\n m_0,\n progenitors.size,\n )\n\n progenitors = progenitors[progenitors[\"particleNumber\"] > nfw_f * m_0]\n cmh = progenitors.groupby(\"snapshotNumber\", as_index=False)[\n \"particleNumber\"\n ].sum()\n cmh[\"nodeIndex\"] = index\n logging.info(\n \"Aggregated masses of %d valid progenitors of halo %d\",\n progenitors.size,\n index,\n )\n\n return cmh", "def history(self, hash):\n txs = self._t.get(hash, max_transactions=10000)['transactions']\n tree = defaultdict(list)\n number_editions = 0\n\n for tx in txs:\n _tx = self._t.get(tx['txid'])\n txid = _tx['txid']\n verb_str = BlockchainSpider.check_script(_tx['vouts'])\n verb = Spoolverb.from_verb(verb_str)\n from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx)\n timestamp_utc = _tx['time']\n action = verb.action\n\n edition_number = 0\n if action != 'EDITIONS':\n edition_number = verb.edition_number\n else:\n number_editions = verb.num_editions\n\n tree[edition_number].append({'txid': txid,\n 'verb': verb_str,\n 'from_address': from_address,\n 'to_address': to_address,\n 'piece_address': piece_address,\n 'timestamp_utc': timestamp_utc,\n 'action': action,\n 'number_editions': number_editions,\n 'edition_number': edition_number})\n\n # lets update the records with the number of editions of the piece since we do not know\n # this information before the EDITIONS transaction\n for edition, chain in tree.items():\n [d.update({'number_editions': number_editions}) for d in chain]\n return dict(tree)", "def computation(self):\n # In 2015 alimony was moved to other income\n if self.year == 2015:\n self.cps['alm_val'] = np.where(self.cps['oi_off'] == 20,\n self.cps['oi_val'], 0.)\n\n # start by looping through each household\n for num in tqdm(self.h_nums):\n self.nunits = 0\n # clear house_units list to avoid double counting tax units\n del self.house_units[:]\n # self.house_units.clear() for when we move to 3.6 officially\n # only use the households with that h_seq\n household = self.cps[self.cps['h_seq'] == num]\n household = household.sort_values('a_lineno', kind='mergesort')\n house_dicts = household.to_dict('records')\n head = house_dicts[0] # head record for the household\n\n # determine household type to determine how unit will be created\n # TODO: why not also include h_type = 3 and 4 in single?\n single = ((head['h_type'] == 6 or\n head['h_type'] == 7) and\n head['h_numper'] == 1)\n group = head['h_type'] == 9\n # TODO: why not include h_type == 8 in group? define nonfamily\n # single persons living alone\n if single:\n self.house_units.append(self.create(head, house_dicts))\n # create a unit for each person in a group household\n elif group:\n for person in house_dicts:\n self.house_units.append(self.create(person, house_dicts))\n else: # all other household types\n for person in house_dicts:\n # only create a new unit if that person is not flagged\n not_flagged = (not person['h_flag'] and\n not person['s_flag'] and\n not person['d_flag'])\n if not_flagged:\n self.house_units.append(self.create(person,\n house_dicts))\n\n # check if the person is a dependent and must file\n if not person['s_flag'] and person['d_flag']:\n if self.must_file(person):\n self.house_units.append(self.create(person,\n house_dicts))\n\n # check for dependents in the household\n if self.nunits > 1:\n self.search()\n\n # check head of household status\n map(self.hhstatus, self.house_units)\n\n # add units to full tax unit list\n for unit in self.house_units:\n if unit['t_flag']:\n self.tax_units.append(self.output(unit, house_dicts))\n\n final_output = pd.DataFrame(self.tax_units)\n num_units = len(final_output)\n print('There are {:,} tax units in the {} file'.format(num_units,\n self.year))\n return(final_output)", "def metric_halstats(hal_filename, reference_id=\"ref\"):\n \n # Get the list of dicts of per-genome stats.\n status_list = get_halstats_stats(hal_filename)\n \n # Throw out non-leaves\n status_list = [entry for entry in status_list if entry[\"NumChildren\"] == 0]\n \n # Grab all the genome names\n genome_names = [entry[\"GenomeName\"] for entry in status_list]\n \n # Get the dict from genome name to total bases from that genome aligned to\n # the reference at all, and the dict of N compositions, in parallel.\n coverage_dict, basecomp_dict = in_parallel(\n lambda: get_halstats_coverage(hal_filename, genome_names, reference_id),\n lambda: get_halstats_basecomps(hal_filename, genome_names))\n \n for entry in status_list:\n # For each genome, we want the coverage against the reference.\n \n # Grab the genome name\n genome_name = entry[\"GenomeName\"]\n \n if not coverage_dict.has_key(genome_name):\n # This is probably the root sequence and didn't get a coverage for\n # some reason. At any rate, the root sequence would be all Ns\n continue\n \n # Figure out how much of it is not Ns\n non_n = basecomp_dict[genome_name]\n \n # How many bases are eligible?\n eligible = float(entry[\"Length\"] * non_n)\n \n if eligible == 0:\n # No coverage is defined\n entry[\"Coverage\"] = float(\"NaN\")\n continue\n \n # Compute and save the coverage for each entry, by dividing bases\n # aligned by bases eligible.\n entry[\"Coverage\"] = coverage_dict[genome_name] / eligible\n \n # Return the results\n return status_list", "def MAH(self, mobs, zobs, mseed=None, set_history=False):\n lgmobs = log10(mobs)\n lgzobs = log10(zobs+1.)\n if ((lgmobs > self.lgmmax) or \n (lgmobs < self.lgmmin)):\n print(\"lgmobs beyond range lgmobs %10.3f lgmmin %10.3f lgmmax %10.3f \"%(lgmobs, self.lgmmin, self.lgmmax))\n raise cex.ParameterOutsideDefaultRange(mobs)\n if ((lgzobs > self.lgzmax) or \n (lgzobs < self.lgzmin)):\n raise cex.ParameterOutsideDefaultRange(zobs)\n # starting mass\n if mseed is None:\n lgmseed = lgmobs - 2.0\n else:\n lgmseed = log10(mseed)\n if (lgmseed < self.lgmmin):\n print(\"lgmmin too large lgmseed %10.3f lgmmin %10.3f\"%(lgmseed, self.lgmmin))\n raise cex.ParameterOutsideDefaultRange(mseed)\n # for concentration\n m_magic = mobs*self.frac_magic\n lgmmagic = log10(m_magic)\n if (lgmmagic < lgmseed):\n raise cex.ParameterOutsideDefaultRange(m_magic)\n\n lgz_magic, lgm_history, lgz_history = self._MAH_lg(\n lgmobs, lgzobs, lgmseed, lgmmagic)\n t_magic = self.age(10.**lgz_magic-1.)\n t_obs = self.age(zobs)\n# t_magic = self._age(10.**lgz_magic-1.)\n# t_obs = self._age(zobs)\n cvir = self._cvir_fit(t_magic, t_obs)\n if set_history:\n m_history = np.power(10., lgm_history)\n z_history = np.power(10., lgz_history)-1.\n mah_history = np.vstack([m_history, z_history])\n return(cvir, mah_history)\n else:\n return(cvir)", "def bill_history(bill):\n history = []\n\n events = bill.get('events', [])\n events.sort(key=lambda e: [e['date'], get_location(e), get_agent(e, bill)])\n\n for location, location_events in groupby(events, get_location):\n location_history = []\n\n for agent, agent_events in groupby(location_events, lambda e: get_agent(e, bill)):\n info = {'events': list(agent_events)}\n info.update(agent)\n location_history.append(info)\n\n info = {'events': location_history}\n info.update(location)\n history.append(info)\n\n return history", "def __build_history(self, obj: Object) -> dict:\n previous_history = obj.history\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def __build_history(self, obj: Object) -> dict:\n previous_history = dict(obj.history)\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def test_find_homologs(self):\r\n\r\n formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp\r\n system(formatdb_cmd)\r\n self._paths_to_clean_up.append(\"formatdb.log\")\r\n for suffix in [\"nhr\", \"nin\", \"nsd\", \"nsi\", \"nsq\"]:\r\n self._paths_to_clean_up.append(\".\".join(\r\n [self.subjectdb_fp, suffix]))\r\n\r\n blast_output, hit_ids, removed_hit_ids =\\\r\n find_homologs(self.query_fp, self.subjectdb_fp, e_value=1e-4,\r\n max_hits=100, working_dir=\"./\", blast_mat_root=None,\r\n wordsize=28, percent_aligned=0.98, DEBUG=False)\r\n\r\n self.assertEqual(hit_ids, set([\"bth:BT_0001\", \"hsa:8355\"]))\r\n self.assertEqual(removed_hit_ids, set())\r\n\r\n i = 0\r\n for line in blast_output:\r\n\r\n if line.startswith(\"#\"):\r\n i += 1\r\n continue # depends on tmpfilename, skip testing\r\n\r\n self.assertEqual(blast_output[i], EXP_BLAST_OUTPUT[i])\r\n i += 1\r\n\r\n # Ensure low % alignment seqs are removed\r\n blast_output, hit_ids, removed_hit_ids =\\\r\n find_homologs(self.query2_fp, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, working_dir=\"./\",\r\n blast_mat_root=None, wordsize=28, percent_aligned=1.00,\r\n DEBUG=False)\r\n\r\n self.assertEqual(hit_ids, set([\"bth:BT_0001\"]))\r\n self.assertEqual(removed_hit_ids, set([\"hsa:8355_tweaked\"]))\r\n\r\n # Ensure high % alignment seqs are not removed\r\n blast_output, hit_ids, removed_hit_ids =\\\r\n find_homologs(self.query2_fp, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, working_dir=\"./\",\r\n blast_mat_root=None, wordsize=28, percent_aligned=0.75,\r\n DEBUG=False)\r\n\r\n self.assertEqual(hit_ids, set([\"bth:BT_0001\", \"hsa:8355_tweaked\"]))\r\n self.assertEqual(removed_hit_ids, set())", "def buildCache(ham: Dict[str, Any]) -> None:\n\n # Initialize the Hamiltonian\n clearCache(ham)\n\n # Build operators and sequences\n buildOperatorCache(ham)\n buildSequenceCache(ham)", "def compute_ballotHistory_distance(bh1, bh2):\n # import pdb; pdb.set_trace()\n assert(len(bh1)==len(bh2))\n n = len(bh1)\n assert(n >0 )\n dist = sum([(compute_ballot_distance(bh1[i],bh2[i]))**2 for i in range(n)])\n # normalize\n dist = dist/float(n)\n return dist", "def calc_returns(self):\n # Convert all the arrays to numpy arrays.\n self.trajectory_states = np.asarray(\n self.trajectory_states, dtype=self.env.observation_space.dtype)\n self.trajectory_actions = np.asarray(\n self.trajectory_actions, dtype=np.float32)\n self.trajectory_values = np.asarray(\n self.trajectory_values, dtype=np.float32)\n self.trajectory_neg_logprobs = np.asarray(\n self.trajectory_neg_logprobs, dtype=np.float32)\n self.trajectory_means = np.asarray(self.trajectory_means, dtype=np.float32)\n self.trajectory_logstds = np.asarray(\n self.trajectory_logstds, dtype=np.float32)\n self.trajectory_per_episode_rewards = np.asarray(\n self.trajectory_per_episode_rewards, dtype=np.float32)\n self.trajectory_per_episode_lengths = np.asarray(\n self.trajectory_per_episode_lengths, dtype=np.float32)\n self.trajectory_dones = np.asarray(self.trajectory_dones, dtype=np.bool)\n self.trajectory_per_step_rewards = np.asarray(\n self.trajectory_per_step_rewards, dtype=np.float32)\n\n # Perform calculation.\n mb_returns = np.zeros_like(self.trajectory_per_step_rewards)\n mb_advs = np.zeros_like(self.trajectory_per_step_rewards)\n lastgaelam = 0\n for t in reversed(range(len(self.trajectory_per_step_rewards))):\n if t == len(self.trajectory_per_step_rewards) - 1:\n nextnonterminal = 1.0 - self._last_done\n nextvalues = self._last_value\n else:\n nextnonterminal = 1.0 - self.trajectory_dones[t + 1]\n nextvalues = self.trajectory_values[t + 1]\n delta = self.trajectory_per_step_rewards[t] + (\n self._gamma * nextvalues * nextnonterminal) - (\n self.trajectory_values[t])\n mb_advs[t] = lastgaelam = delta + (\n self._gamma * self._lam * nextnonterminal * lastgaelam)\n mb_returns = mb_advs + self.trajectory_values\n self.trajectory_returns = mb_returns", "def process_histone_modifications(self):\n mod_arrays = np.zeros((NUM_HISTONE_MODS, len(self.seq)))\n for histone, start, end in self.histone_mods:\n mod_arrays[histone,start:end] = 1\n return mod_arrays", "def EstimateStateHistory(self, data):\n\n # normalize\n data= ((data.T-self._ave)/self._std).T\n\n # initilize state history matrix\n # first dimension is primitive index, second dimension is time\n self.h = np.zeros((self.K.shape[0], data.shape[1]))\n\n #for t in range(0, self._past):\n # self.h[:, t] = self.EstimateState(data[:, 0:t])\n\n for t in range(self._past, data.shape[1]):\n _Xp = np.reshape(data[:, t-self._past:t].T, (-1, 1))\n self.h[:, t] = np.dot(self.K, _Xp).flatten()", "def agg_history(self):\n cd_list, cr_list = zip(*self._history)\n return pd.concat(cd_list), pd.concat(cr_list)", "def get_history(size):\n \n actions = [seq for seq in itertools.product((0,1), repeat=size)]\n history = []\n \n for action_group in actions:\n # Get a new MDP\n mdp = blind_cliff_walk(size)\n for action in action_group:\n # Take action and record transition\n state = mdp.state\n state_prime,reward = mdp.step(action)\n history.append([state,action,reward,state_prime])\n if state_prime == -1:\n mdp.state = 0\n break\n return history", "def get_mergers_of_major_progenitor(input_halo):\n redshift = []\n ratio = []\n halo = []\n while input_halo is not None:\n mergers = db.relation_finding.MultiHopMostRecentMergerStrategy(input_halo, order_by='weight').all()\n if len(mergers)>0 :\n for m in mergers[1:]:\n redshift.append(mergers[0].timestep.next.redshift)\n halo.append((mergers[0], m))\n ratio.append(float(mergers[0].NDM)/m.NDM)\n input_halo = mergers[0]\n else:\n input_halo = None\n\n return np.array(redshift), np.array(ratio), halo", "def calc_history_average(self):\n\n s = [float(0)] * self.number_of_parameter\n for t in self.history:\n s = list(map(lambda x: x[0] + x[1], zip(s, t)))\n return list(map(lambda x: x / len(self.history), s))", "def saveHabFile(self):\n habMatrix = []\n # Making this generalizeable for preferential looking studies.\n if 'sumOnL' in self.dataMatrix[0].keys():\n sumFields = ['sumOnL', 'numOnL', 'sumOnR', 'numOnR', 'sumOff', 'numOff']\n else:\n sumFields = ['sumOnA', 'numOnA', 'sumOffA', 'numOffA', 'sumOnB', 'numOnB', 'sumOffB', 'numOffB']\n for i in range(0, len(self.dataMatrix)):\n if isinstance(self.dataMatrix[i]['habTrialNo'], int):\n tempType = deepcopy(self.dataMatrix[i]['trialType'])\n tempType = tempType[4:] # to remove 'hab.'\n if tempType in self.calcHabOver: # If not, this should specifically be ignored.\n tempNo = self.dataMatrix[i]['habTrialNo']\n addTo = False\n addIndex = -1\n tempLine = deepcopy(self.dataMatrix[i])\n tempLine['trialType'] = 'Hab'\n for j in range(0, len(habMatrix)):\n if habMatrix[j]['habTrialNo'] == tempNo:\n addTo = True\n addIndex = deepcopy(j)\n if addTo:\n habMatrix[addIndex]['stimName'] = habMatrix[addIndex]['stimName'] + '+' + tempLine['stimName']\n for z in range(0, len(sumFields)):\n habMatrix[addIndex][sumFields[z]] = habMatrix[addIndex][sumFields[z]] + tempLine[sumFields[z]]\n else:\n habMatrix.append(tempLine)\n else:\n pass\n else: # For all non-habituation trials.\n habMatrix.append(deepcopy(self.dataMatrix[i]))\n for i in range(0, len(habMatrix)):\n habMatrix[i]['trial'] = i+1\n return habMatrix" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Number of progenitors at one given snapshot z1
def find_progenitors_at_z(self, SH, mtree, z1, z2): for ss in range(z1, z2): # nodes at redshift ss ss_indx = np.where(mtree.data.snapshotNumber.values == ss) nodeID = mtree.data.index.values[ss_indx] nodeID_desc = mtree.data.descendantIndex.values[ss_indx] # find number of progenitors for nodes at redshift ss if ss != z1: _progcounts = np.zeros(len(nodeID)) for ii in range(len(nodeID_past_desc)): if nodeID_past_desc[ii] in nodeID: indx = np.where(nodeID == nodeID_past_desc[ii]) _progcounts[indx] = count[ii] nodeID_desc_unique, count = np.unique(nodeID_desc, return_counts=True) nodeID_desc_unique=nodeID_desc_unique[1:]; count=count[1:] nodeID_past = nodeID nodeID_past_desc = nodeID_desc_unique if ss != z1: _progcounts_past = _progcounts print('_progcounts', _progcounts)
[ "def num_pauses(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n return int(z1[1])", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def get_num_photo1(self):\n return self.photo1.get_num_photo()", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def z_count(self):\n r = self._countZ\n self._countZ = 0\n return r", "def num_profiles(self):\n pass # pragma: no cover", "def __determine_number_of_plates(self):\n number_members = len(self.__pool_set)\n number_positions = len(self.__base_layout)\n number_plates = ceil(float(number_members) / number_positions)\n self.__number_plates = int(number_plates)", "def Nprofiles(self):\n return self._nprofiles", "def get_n(self, z):\n n = 1\n while n * z < self.thr:\n n += 1\n return n - 1", "def get_profibus_slot_count(self):\n return self.query(\"PROFINUM?\")", "def numero_occorenze(lista_generi):\n count = 0\n for occ in lista_generi:\n count = count + occ[1]\n return count", "def get_marble_count(self):", "def __computer_number_of_samples__(self):\n\n slice_count = []\n for ii in self.imgs_list:\n with h5py.File(ii, 'r') as f:\n aux = f['data'].shape[0]\n slice_count.append(aux - (self.crop[1] + self.crop[0]))\n\n slice_count = np.array(slice_count)\n return slice_count.sum(),slice_count.cumsum()", "def get_controls_snapshots_count(selenium, src_obj):\n controls_ui_service = webui_service.ControlsService(selenium)\n return {\n \"controls_tab_count\": controls_ui_service.get_count_objs_from_tab(\n src_obj=src_obj),\n \"controls_count\": len(controls_ui_service.get_list_objs_from_tree_view(\n src_obj=src_obj))}", "def numProcs(reportname):\n with open(reportname, \"rb\") as f:\n data = json.load(f)\n numProcesses = len(data[\"behavior\"][\"processes\"])\n return numProcesses", "def getUnseenCount():", "def count(self, val):\r\n return sum(1 for e in self.frontierpq if e[0] == val)", "def GetPolyCount(self) -> int:\n ...", "def Count(self) -> int:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Schedule WB category export on Scrapinghub.
def category_export(url: str, chat_id: int, spider='wb', priority=2) -> str: logger.info(f'Export {url} for chat #{chat_id}') client, project = init_scrapinghub() scheduled_jobs = scheduled_jobs_count(project, spider) max_scheduled_jobs = env('SCHEDULED_JOBS_THRESHOLD', cast=int, default=1) if priority < 3 and scheduled_jobs > max_scheduled_jobs: raise Exception('Spider wb has more than SCHEDULED_JOBS_THRESHOLD queued jobs') job = project.jobs.run(spider, priority=priority, job_args={ 'category_url': url, 'callback_url': env('WILDSEARCH_JOB_FINISHED_CALLBACK') + f'/{spider}_category_export', 'callback_params': f'chat_id={chat_id}', }) logger.info(f'Export for category {url} will have job key {job.key}') return 'https://app.scrapinghub.com/p/' + job.key
[ "def _download_categories_csv(self, filename):\n\n # Login if necessary.\n self._login()\n\n # Log time consuming step.\n LOGGER.info(\"Downloading categories\")\n\n # Load the export page.\n url = (\n self._admin_url +\n \"?m=ajax_export&instance=categories&checkAccess=categories\"\n )\n self._browser.open(url)\n\n # Call the doExport function.\n self._do_export(url, filename)", "def run(self):\n page = self.fetch_data(self.url)\n stock_list = self.pop_stock_list(page)\n self.write_csv(stock_list)", "def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))", "def build_export(self,bulk_api_url,data):\n bulk_end_point = \"activities/exports\"\n return self.req(bulk_api_url+bulk_end_point,method = 'post',data = data)", "def download_multimonth_csv(out_dir, year_start, month_start, year_end, month_end, station, product='water_level', datum='STND', time_zone='GMT'):\n\n # add trailing slash to directory name, if necessary\n if out_dir[-1] is not ('/' or '\\\\'):\n out_dir = out_dir+'/'\n\n # create directory if necessary\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n\n file_prefix = station+'_'+product+'_'\n\n years = range(int(year_start),int(year_end)+1)\n\n for year in years:\n if year == years[0]:\n mon_range = range(month_start,13)\n elif year == years[-1]:\n mon_range = range(1,month_end+1)\n else:\n mon_range = range(1,13)\n\n for mon in mon_range:\n mon_str = str(mon).zfill(2)\n\n # file to write\n filename = file_prefix+str(year)+mon_str+'.csv'\n out_file = os.path.join(out_dir,filename)\n\n # determine number of days in month\n ndays = monthrange(year, mon)[1]\n ndays_str = str(ndays).zfill(2)\n\n begin_date = str(year)+mon_str+'01'\n end_date = str(year)+mon_str+ndays_str\n\n download_6min_csv(out_file, begin_date, end_date, station, product, datum, time_zone)", "def final_series():\n tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +\"./codigos.xlsx\", \n header=[0]).values.flatten()\n # tickers = pd.read_excel(\"./codigos.xlsx\", \n # header=[0]).values.flatten()\n ls = fetch_series(list(set(tickers)))\n net_series = [s for s in ls if _cleasing(s, [\"D\", \"M\"]) is not None]\n p = os.path.abspath(os.path.dirname(__file__))\n with open(p + \"/series_bcb\", \"wb\") as f:\n pickle.dump(net_series, f)\n # with open(\"./series_bcb\", \"wb\") as f:\n # pickle.dump(net_series, f) ", "def main():\n df_cars = scrape_summary_data()\n df_cars['detail_link'] = df_cars.apply(build_detail_link, axis=1)\n df_cars = scrape_detail_pages(df_cars)\n df_cars.to_hdf('data/cars_'+str(pd.Timestamp.today().date())+'.h5',\n key='df_cars',\n mode='w')", "def export_bn_in_background(bv):\n options = GetOptions(True)\n background_task = ExportBNInBackground(bv, options)\n background_task.start()", "def mass_download(self, start, final, delay=5, verbose=True):\n\t\tstart = datetime.strptime(start, '%Y-%m-%d')\n\t\tfinal = datetime.strptime(final, '%Y-%m-%d')\n\t\tself._create_folders(start, final)\n\t\tfor d in datastore.DataStore._daterange(start, final):\n\t\t\tstart = d.replace(hour=00, minute=00, second=00).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\tfinal = d.replace(hour=23, minute=59, second=59).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\tfdate = d.strftime('%Y-%m-%d')\n\t\t\tfname = 'stocks{}/'.format(self._extract_year(fdate)) + self._extract_month(fdate) + '/' + fdate + '-stocks.csv'\n\t\t\tself.get_by_range(start, final, delay, fname, verbose)", "def _scrape_category(collection_value, category_value):\n for page in range(400):\n try:\n results = play_scraper.collection(collection=collection_value, category=category_value, detailed=True,\n page=page)\n for result in results:\n url = result.get('developer_url')\n pp_url = result.get('developer_pp_url')\n\n # Replaces ' with '' to support strings with ' in SQL queries\n db_row = [str(result.get('app_id')).replace('\\'', '\\'\\''),\n str(result.get('developer_id')).replace('\\'', '\\'\\''), result.get('category')[0], url, pp_url]\n\n print('INSERTED {}'.format(db_row))\n crawler_db_handler.insert_to_application_table(db_row)\n except:\n # Must NOT print anything to stdout from thread\n pass", "def test_search_category_schedule_transaction(self):\n pass", "def russell3000_tickers():\n ## Get current date in dd/mm/yyyy format\n date = time.strftime(\"%m_%d_%Y\")\n tickers = russell3000_helper_fun()\n tickers.to_csv('russell3000_tickers_'+ date+'.csv', index = False)", "def download_all():\n task = load_jt('task.json')\n data = load_jt('data.json')\n spider = Crawler()\n \n for _, v in task.iteritems():\n disease_name = v['data']['disease_name']\n data.setdefault(disease_name, {})\n for url, v1 in ignore_iteritems(v, ignore = ['data']):\n print url\n html = spider.html(url)\n if html:\n soup = BS4(html)\n div = soup.find('div', id='main-content')\n data[disease_name].setdefault(v1['data']['category'], str(div))\n dump_jt(data, 'data.json', fastmode = True, replace = True)", "def newscrawler(hour_offset, filter_by, file_to_write, output_type=\"json\"):\n\n rss_feeds = [\n {\n \"link\": \"https://www.abc.net.au/news/feed/8055316/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\n \"link\": \"https://www.abc.net.au/news/feed/8057136/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\n \"link\": \"https://www.abc.net.au/news/feed/8053540/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\n \"link\": \"https://www.abc.net.au/news/feed/8054562/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\n \"link\": \"https://www.abc.net.au/news/feed/8057540/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\n \"link\": \"https://www.abc.net.au/news/feed/8057648/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\n \"link\": \"https://www.abc.net.au/news/feed/8057096/rss.xml\",\n \"name\": \"ABC News\",\n },\n {\"link\": \"https://www.watoday.com.au/rss/national.xml\", \"name\": \"WA Today\"},\n {\n \"link\": \"https://www.theguardian.com/australia-news/rss\",\n \"name\": \"The Guardian\",\n },\n {\n \"link\": \"https://www.health.gov.au/news/rss.xml/\",\n \"name\": \"Australian Government Department of Health\",\n },\n {\n \"link\": \"https://www2.health.vic.gov.au/rss/health-alerts-and-advisories\",\n \"name\": \"Victoria State Government\",\n },\n {\n \"link\": \"https://www2.health.vic.gov.au/rss/News\",\n \"name\": \"Victoria State Government\",\n },\n {\"link\": \"https://thewest.com.au/news/wa/rss\", \"name\": \"The West Australian\"},\n {\n \"link\": \"https://theconversation.com/au/covid-19/articles.atom\",\n \"name\": \"The Conversation\",\n },\n {\n \"link\": \"http://newsroom.nt.gov.au/api/RSS/NewsroomIndex\",\n \"name\": \"Northern Territory Government\",\n },\n {\n \"link\": \"https://www.brisbanetimes.com.au/rss/national/queensland.xml\",\n \"name\": \"Brisbane Times\",\n },\n {\n \"link\": \"https://www.canberratimes.com.au/rss.xml\",\n \"name\": \"The Canberra Times\",\n },\n {\n \"link\": \"https://www.sbs.com.au/news/topic/australia/feed\",\n \"name\": \"SBS News\",\n }\n ]\n\n now = datetime.now().astimezone(tz.gettz(\"Australia/Melbourne\"))\n\n ##### Get the current news related to the filters ####\n news_articles = {\n \"updatedTime\": now.strftime(\"%I:%M%p %d %B %Y\"),\n \"news\": [],\n }\n\n # create json object\n for feed in rss_feeds:\n d = feedparser.parse(feed[\"link\"])\n\n author = feed[\"name\"]\n \n for entry in d.entries:\n \n # filter by date\n article_date = dateparser.parse(entry.updated).astimezone(\n tz.gettz(\"Australia/Melbourne\")\n )\n if now - timedelta(hours=hour_offset) < article_date and entry.has_key(\"summary\") :\n \n if any(x in entry.title.lower() for x in filter_by) or any(\n x in entry.summary.lower() for x in filter_by\n ):\n print_date = article_date.strftime(\"%d %B %Y\")\n print_time = article_date.time().strftime(\"%I:%M%p\")\n news_articles[\"news\"].append(\n {\n \"url\": entry.link,\n \"title\": entry.title,\n \"source\": author,\n \"date\": print_date,\n \"time\": print_time,\n }\n )\n\n # sort by date\n news_articles[\"news\"] = sorted(\n news_articles[\"news\"],\n key=lambda i: (\n datetime.strptime(i[\"date\"], \"%d %B %Y\"),\n datetime.strptime(i[\"time\"], \"%I:%M%p\"),\n ),\n )\n news_articles[\"news\"].reverse()\n print(len(news_articles[\"news\"]))\n i = 0\n while i < len(news_articles[\"news\"]) - 1:\n if news_articles[\"news\"][i][\"url\"] == news_articles[\"news\"][i + 1][\"url\"]:\n news_articles[\"news\"].pop(i + 1)\n\n elif news_articles[\"news\"][i][\"title\"] == news_articles[\"news\"][i + 1][\"title\"]:\n news_articles[\"news\"].pop(i + 1)\n\n else:\n i = i + 1\n\n news_articles[\"news\"] = news_articles[\"news\"][:45]\n\n if output_type == \"json\":\n json_string = dumps(news_articles, indent=4)\n out_json = open(file_to_write, \"w\")\n out_json.write(json_string)\n\n out_json.close()\n else: # csv\n out_csv = open(file_to_write, \"w\")\n header_list = list(news_articles[\"news\"][0].keys())\n writer = csv.DictWriter(out_csv, fieldnames=header_list)\n writer.writerow(\n {\n \"url\": \"Last Updated\",\n \"title\": news_articles[\"updateTime\"][0][\"time\"],\n \"source\": \"\",\n \"date\": \"\",\n \"time\": \"\",\n }\n )\n\n writer.writeheader()\n\n for diction in news_articles[\"news\"]:\n writer.writerow(diction)", "def click_report_export_to_csv_button(self):\n self.set_existing_handles()\n try:\n if self.is_element_present(self.report_export_to_csv_button_locator):\n self.click_element(self.report_export_to_csv_button_locator)\n time.sleep(10)\n self.switch_to_window()\n self.close_browser()\n except:\n pass\n finally:\n self.switch_to_previous_window()", "def test_scrape(self):\n simfiles = scrape_category.get_category_from_ziv(\"category_test\", self.CATEGORY_URL)\n compare_simfile_records(simfiles, EXPECTED_SIMFILES)", "def downloadBLS(self):\n seriesList = self._getBLSSeriesList()\n n = 50 # Chunk size\n seriesListChunk = [seriesList[i*n: (i+1)*n]\n for i in range((len(seriesList)+n-1)//n)]\n # Creating counties class to hold class\n geoClassDict = {}\n for areaID in self.fipsList:\n name = self.config.Global.FIPS_CODE[areaID]\n geoClassDict[areaID] = GeoDataFrame(name, dataset=\"BLS\")\n # Initialize session and default data\n s = requests.Session()\n s.headers = {'Content-type': 'application/json'}\n fipsLength = len(self.fipsList[0])\n p = re.compile(r\"[A-Z]+\\d{\"+ str(fipsLength) +\"}\")\n for srsList in tqdm(seriesListChunk, desc=\"Download BLS data\"):\n data = json.dumps({\"seriesid\": srsList,\n \"startyear\": self.config.BLS.START_YEAR,\n \"endyear\": self.config.BLS.END_YEAR,\n \"registrationkey\": self.config.BLS.API_KEY,\n \"calculations\": \"true\",\n \"annualaverage\": self.config.BLS.ANNUAL_AVERAGE})\n r = s.post(\n 'https://api.bls.gov/publicAPI/v2/timeseries/data/',\n data=data)\n n_retry = 0\n while r.status_code != requests.codes.ok and n_retry < 10:\n warnings.warn(\n f\"Request server fail with error code {str(r.status_code)}, sleep 10 sec\",\n ResourceWarning)\n time.sleep(10)\n r = s.post(\n 'https://api.bls.gov/publicAPI/v2/timeseries/data/',\n data=data)\n n_retry += 1\n json_data = r.json()\n for seriesResult in json_data[\"Results\"][\"series\"]:\n m = p.match(seriesResult[\"seriesID\"])\n areaCode = m.group()[-fipsLength:]\n geoClassDict[areaCode].load(seriesResult, fips=areaCode)\n # geoClassDict[areaCode].DataFrame = geoClassDict[areaCode].DataFrame.rename(\n # self.config.BLS.MEASURE_CODE, axis=1)\n return geoClassDict", "def save_new_rss_subscription_task(feed_obj):\n save_new_rss_subscription(feed_obj)\n logger.info(\"Entries for new Feed subcription\")", "def start_crawler_schedule(self, CrawlerName: str) -> Dict:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Train a simple conv net img_h = sentence length (padded where necessary) img_w = word vector length (300 for word2vec) filter_hs = filter window sizes hidden_units = [x,y] x is the number of feature maps (per filter window), and y is the penultimate layer sqr_norm_lim = s^2 in the paper lr_decay = adadelta decay parameter
def train_conv_net(datasets,datasets_weights, U, U_Topical, img_w=300, filter_hs=[3,4,5], hidden_units=[100,2], dropout_rate=[0.5], shuffle_batch=True, n_epochs=25, batch_size=50, lr_decay = 0.95, conv_non_linear="relu", use_valid_set=True, show_states=False, activations=[Iden], sqr_norm_lim=9, non_static=True): rng = np.random.RandomState(3435) img_h = len(datasets[0][0])-1 U_Topical.dtype = "float32" (num_topics,topic_dim) = U_Topical.shape word_w = img_w img_w = int(img_w + num_topics*topic_dim) filter_w = img_w feature_maps = hidden_units[0] filter_shapes = [] pool_sizes = [] for filter_h in filter_hs: filter_shapes.append((feature_maps, 1, filter_h, filter_w)) # 100 1 3 300 pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1)) # size of words samples one parameters = [("image shape",img_h,img_w),("filter shape",filter_shapes), ("hidden_units",hidden_units), ("dropout", dropout_rate), ("batch_size",batch_size),("non_static", non_static), ("learn_decay",lr_decay), ("conv_non_linear", conv_non_linear), ("non_static", non_static) ,("sqr_norm_lim",sqr_norm_lim),("shuffle_batch",shuffle_batch)] #print parameters #define model architecture index = T.lscalar() x = T.matrix('x') y = T.ivector('y') x_topic = T.tensor3('x_topic') Words = theano.shared(value = U, name = "Words") Topics = theano.shared(value=U_Topical,name="Topics") zero_vec_tensor = T.vector() zero_vec = np.zeros(word_w, dtype='float32') set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))]) layer0_input_words = Words[T.cast(x.flatten(),dtype="int32")].reshape((x.shape[0],1,x.shape[1],Words.shape[1])) layer0_inputs_topics = [] for i in range(num_topics): sin_topic = x_topic[:,:,i] Topic = Topics[i].reshape((1,Topics[i].shape[0])) weights = sin_topic.flatten() weights = weights.reshape((weights.shape[0],1)) layer0_inputs_topics.append(T.dot(weights, Topic)) layer0_input_topics = T.concatenate(layer0_inputs_topics,1) layer0_input_topics = layer0_input_topics.reshape((x_topic.shape[0],1,x_topic.shape[1],num_topics*topic_dim)) layer0_input = T.concatenate([layer0_input_words,layer0_input_topics],3) conv_layers = [] layer1_inputs = [] for i in xrange(len(filter_hs)): filter_shape = filter_shapes[i] pool_size = pool_sizes[i] conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w), filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear) layer1_input = conv_layer.output.flatten(2) conv_layers.append(conv_layer) layer1_inputs.append(layer1_input) layer1_input = T.concatenate(layer1_inputs,1) hidden_units[0] = feature_maps*len(filter_hs) classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate) #define parameters of the model and update functions using adadelta params = classifier.params for conv_layer in conv_layers: params += conv_layer.params if non_static: #if word vectors are allowed to change, add them as model parameters params += [Words] #params are model parameters params += [Topics] #Topics embedding are adjusted cost = classifier.negative_log_likelihood(y) dropout_cost = classifier.dropout_negative_log_likelihood(y) grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim) #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate #extra data (at random) np.random.seed(3435) if datasets[0].shape[0] % batch_size > 0: extra_data_num = batch_size - datasets[0].shape[0] % batch_size random_index = np.random.permutation(np.arange(datasets[0].shape[0])) random_index.astype('int32') train_set = datasets[0][random_index,:] train_set_weights = datasets_weights[0][random_index,:,:] extra_data = train_set[:extra_data_num] extra_data_weights = train_set_weights[:extra_data_num] new_data=np.append(datasets[0],extra_data,axis=0) new_data_weights = np.append(datasets_weights[0],extra_data_weights,axis = 0) else: new_data = datasets[0] new_data_weights = datasets_weights[0] random_index = np.random.permutation(np.arange(new_data.shape[0])) random_index.astype('int32') new_data = new_data[random_index] new_data_weights = new_data_weights[random_index] n_batches = new_data.shape[0]/batch_size n_train_batches = int(np.round(n_batches*0.9)) test_set_x = np.asarray(datasets[1][:,:img_h] ,"float32") test_set_x_topic = np.asarray(datasets_weights[1][:,:img_h,:] ,"float32") test_set_y = np.asarray(datasets[1][:,-1],"int32") if use_valid_set: train_set = new_data[:n_train_batches*batch_size,:] train_set_weights = new_data_weights[:n_train_batches*batch_size,:,:] val_set = new_data[n_train_batches*batch_size:,:] val_set_weights = new_data_weights[n_train_batches*batch_size:,:,:] train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) val_set_x, val_set_x_topic, val_set_y = shared_dataset((val_set[:,:img_h],val_set_weights,val_set[:,-1])) n_val_batches = n_batches - n_train_batches val_model = theano.function([index], classifier.errors(y), givens={ x: val_set_x[index * batch_size: (index + 1) * batch_size], x_topic: val_set_x_topic[index * batch_size: (index + 1) * batch_size], y: val_set_y[index * batch_size: (index + 1) * batch_size]}) else: train_set = new_data[:,:] train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) #make theano functions to get train/val/test errors test_model = theano.function([index], classifier.errors(y), givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size]}) train_model = theano.function([index], cost, updates=grad_updates, givens={ x: train_set_x[index*batch_size:(index+1)*batch_size], x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size], y: train_set_y[index*batch_size:(index+1)*batch_size]}) test_pred_layers = [] test_size = test_set_x.shape[0] test_layer0_input_words = Words[T.cast(x.flatten(),dtype="int32")].reshape((test_size,1,img_h,Words.shape[1])) test_layer0_inputs_topics = [] for i in range(num_topics): sin_topic = x_topic[:,:,i] Topic = Topics[i].reshape((1,Topics[i].shape[0])) weights = sin_topic.flatten() weights = weights.reshape((weights.shape[0],1)) test_layer0_inputs_topics.append(T.dot(weights, Topic)) test_layer0_input_topics = T.concatenate(test_layer0_inputs_topics,1) test_layer0_input_topics = test_layer0_input_topics.reshape((test_size,1,img_h,num_topics*topic_dim)) test_layer0_input = T.concatenate([test_layer0_input_words,test_layer0_input_topics],3) for conv_layer in conv_layers: test_layer0_output = conv_layer.predict(test_layer0_input, test_size) test_pred_layers.append(test_layer0_output.flatten(2)) test_layer1_input = T.concatenate(test_pred_layers, 1) test_y_pred = classifier.predict(test_layer1_input) test_error = T.mean(T.neq(test_y_pred, y)) test_model_all = theano.function([x,x_topic,y], test_error) #start training over mini-batches print '... training' epoch = 0 best_val_perf = 0 val_perf = 0 test_perf = 0 cost_epoch = 0 while (epoch < n_epochs): epoch = epoch + 1 if shuffle_batch: for minibatch_index in np.random.permutation(range(n_train_batches)): cost_epoch = train_model(minibatch_index) set_zero(zero_vec) else: for minibatch_index in xrange(n_train_batches): cost_epoch = train_model(minibatch_index) set_zero(zero_vec) train_losses = [test_model(i) for i in xrange(n_train_batches)] train_perf = 1 - np.mean(train_losses) if use_valid_set: val_losses = [val_model(i) for i in xrange(n_val_batches)] val_perf = 1- np.mean(val_losses) if val_perf >= best_val_perf: params_conv = [] params_output = {} test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) test_perf = 1- test_loss best_val_perf = val_perf for conv_layer in conv_layers: params_conv.append(conv_layer.get_params()) params_output = classifier.get_params() word_vec = Words.get_value() Topic_vec = Topics.get_value() else : val_perf = 0 if show_states: print('epoch %i, train perf %f %%, val perf %f' % (epoch, train_perf * 100., val_perf*100.)) if not use_valid_set: params_conv = [] params_output = {} test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) test_perf = 1- test_loss for conv_layer in conv_layers: params_conv.append(conv_layer.get_params()) params_output = classifier.get_params() word_vec = Words.get_value() Topic_vec = Topics.get_value() return test_perf, [params_conv, params_output, word_vec,Topic_vec]
[ "def train(self, x_train, y_train, w2v_size=300, w2v_window=5, w2v_min_count=1,\n w2v_epochs=100, k_max_sequence_len=500, k_batch_size=128, k_epochs=32, k_lstm_neurons=128,\n k_hidden_layer_neurons=(128, 64, 32), verbose=1):\n # Set variables\n self.w2v_size = w2v_size\n self.w2v_window = w2v_window\n self.w2v_min_count = w2v_min_count\n self.w2v_epochs = w2v_epochs\n self.k_max_sequence_len = k_max_sequence_len\n self.k_batch_size = k_batch_size\n self.k_epochs = k_epochs\n self.k_lstm_neurons = k_lstm_neurons\n self.k_hidden_layer_neurons = k_hidden_layer_neurons\n\n # split text in tokens\n x_train = [gensim.utils.simple_preprocess(text) for text in x_train]\n\n logging.info(\"Build & train Word2Vec model\")\n self.w2v_model = gensim.models.Word2Vec(min_count=self.w2v_min_count, window=self.w2v_window,\n size=self.w2v_size,\n workers=multiprocessing.cpu_count())\n self.w2v_model.build_vocab(x_train)\n self.w2v_model.train(x_train, total_examples=self.w2v_model.corpus_count, epochs=self.w2v_epochs)\n w2v_words = list(self.w2v_model.wv.vocab)\n logging.info(\"Vocabulary size: %i\" % len(w2v_words))\n logging.info(\"Word2Vec trained\")\n\n logging.info(\"Fit LabelEncoder\")\n self.label_encoder = LabelEncoder()\n y_train = self.label_encoder.fit_transform(y_train)\n self.num_classes = len(self.label_encoder.classes_)\n y_train = utils.to_categorical(y_train, self.num_classes)\n\n logging.info(\"Fit Tokenizer\")\n self.tokenizer = Tokenizer()\n self.tokenizer.fit_on_texts(x_train)\n x_train = keras.preprocessing.sequence.pad_sequences(self.tokenizer.texts_to_sequences(x_train),\n maxlen=self.k_max_sequence_len)\n num_words = len(self.tokenizer.word_index) + 1\n logging.info(\"Number of unique words: %i\" % num_words)\n\n logging.info(\"Create Embedding matrix\")\n word_index = self.tokenizer.word_index\n vocab_size = len(word_index) + 1\n embedding_matrix = np.zeros((vocab_size, self.w2v_size))\n for word, idx in word_index.items():\n if word in w2v_words:\n embedding_vector = self.w2v_model.wv.get_vector(word)\n if embedding_vector is not None:\n embedding_matrix[idx] = self.w2v_model.wv[word]\n logging.info(\"Embedding matrix: %s\" % str(embedding_matrix.shape))\n\n logging.info(\"Build Keras model\")\n logging.info('x_train shape: %s' % str(x_train.shape))\n logging.info('y_train shape: %s' % str(y_train.shape))\n\n self.k_model = Sequential()\n self.k_model.add(Embedding(vocab_size,\n self.w2v_size,\n weights=[embedding_matrix],\n input_length=self.k_max_sequence_len,\n trainable=False))\n self.k_model.add(LSTM(self.k_lstm_neurons, dropout=0.5, recurrent_dropout=0.2))\n for hidden_layer in self.k_hidden_layer_neurons:\n self.k_model.add(Dense(hidden_layer, activation='relu'))\n self.k_model.add(Dropout(0.2))\n if self.num_classes > 1:\n self.k_model.add(Dense(self.num_classes, activation='softmax'))\n else:\n self.k_model.add(Dense(self.num_classes, activation='sigmoid'))\n\n self.k_model.compile(loss='categorical_crossentropy' if self.num_classes > 1 else 'binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n logging.info(self.k_model.summary())\n\n # Callbacks\n early_stopping = EarlyStopping(monitor='acc', patience=6, verbose=0, mode='max')\n rop = ReduceLROnPlateau(monitor='acc', factor=0.1, patience=3, verbose=1, epsilon=1e-4, mode='max')\n callbacks = [early_stopping, rop]\n\n logging.info(\"Fit Keras model\")\n self.k_model.fit(x_train, y_train,\n batch_size=self.k_batch_size,\n epochs=self.k_epochs,\n callbacks=callbacks,\n verbose=verbose)\n\n logging.info(\"Done\")", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def create_cnn():\r\n\r\n # 1. 生成[词向量个数, 300维]的随机均匀分布\r\n word2vec_random = np.random.uniform(-1.0, 1.0, [len(DICTIONARY), EMBEDDING_SIZE])\r\n # 2. 使用预训练好的词向量替换掉随机生成的分布\r\n get_word2vec(DICTIONARY, word2vec_random, WORD2VEC_DIC)\r\n # 3. 使用此分布创建Tensor对象\r\n phalanx = tf.Variable(initial_value=word2vec_random, dtype=tf.float32)\r\n\r\n embedded_chars = tf.nn.embedding_lookup(phalanx, x)\r\n embedded_expanded = tf.expand_dims(embedded_chars, -1)\r\n\r\n pooled_out = []\r\n for filter_window in FILTERS:\r\n # conv2d\r\n filter = tf.Variable(tf.random_normal([filter_window, EMBEDDING_SIZE, 1, NUM_FILTERS], stddev=0.1))\r\n \"\"\"\r\n 第2个维度为EMBEDDING_SIZE,是为了保证卷积是针对词与词之间的,而不会将词向量卷积掉。\r\n 例如输入为\r\n 我 [[1,0,5,9]\r\n 今 [7,6,0,5]\r\n 天 [5,6,8,8]\r\n 真 [9,0,1,5]\r\n 帅 [6,0,7,4]\r\n 。 [0,0,4,6]]\r\n EMBEDDING_SIZE等于4表示,我们是拿着一个[x,4]的框去框选这个方阵,来实现卷积的,可以保证词向量不会被分裂看待。\r\n \"\"\"\r\n conv = tf.nn.conv2d(embedded_expanded,\r\n filter,\r\n strides=[1, 1, 1, 1],\r\n padding=\"VALID\")\r\n b1 = tf.Variable(tf.constant(0.1, shape=[NUM_FILTERS]))\r\n l = tf.nn.relu(tf.nn.bias_add(conv, b1))\r\n\r\n # maxpooling\r\n pooled = tf.nn.max_pool(\r\n l,\r\n ksize=[1, INPUT_SIZE-filter_window+1, 1, 1],\r\n strides=[1, 1, 1, 1],\r\n padding='VALID'\r\n )\r\n pooled_out.append(pooled)\r\n\r\n # 拼合3个pooling的结果,准备输出给全连接层\r\n pooled_out_sum = tf.concat( pooled_out, 3)\r\n cnn_out = tf.reshape(pooled_out_sum, [-1, NUM_FILTERS*len(FILTERS)])\r\n # dropout\r\n cnn_out = tf.nn.dropout(cnn_out, dropout_keep_prob)\r\n\r\n # 添加3个全连接层\r\n w1 = tf.get_variable('w1', shape=[NUM_FILTERS*len(FILTERS), HIDDEN_DIM],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n if USE_L2:\r\n tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(L2_LAMBDA)(w1))\r\n b1 = tf.Variable(tf.constant(0.1, shape=[HIDDEN_DIM]))\r\n wb1 = tf.matmul(cnn_out, w1)+b1\r\n l1 = tf.nn.relu(wb1)\r\n\r\n w2 = tf.get_variable('w2', shape=[HIDDEN_DIM, HIDDEN_DIM],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n if USE_L2:\r\n tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(L2_LAMBDA)(w2))\r\n b2 = tf.Variable(tf.constant(0.1, shape=[HIDDEN_DIM]))\r\n wb2 = tf.matmul(l1, w2) + b2\r\n l2 = tf.nn.relu(wb2)\r\n\r\n w3 = tf.get_variable('w3', shape=[HIDDEN_DIM, OUTPUT_SIZE],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n if USE_L2:\r\n tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(L2_LAMBDA)(w3))\r\n b3 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_SIZE]))\r\n y = tf.matmul(l2, w3) + b3\r\n\r\n # loss\r\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\r\n tf.add_to_collection('losses', loss)\r\n total_loss = tf.add_n(tf.get_collection('losses'))\r\n optimizer = tf.train.AdamOptimizer(1e-3).minimize(total_loss)\r\n\r\n # 准确率\r\n correct = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n acc = tf.reduce_mean(tf.cast(correct, tf.float32))\r\n\r\n return [loss, optimizer, acc]", "def createConvoLayer(window_size,nb_lines,nb_classes,\n nb_filters_firstlayer = 250, kernel_size = 20,\n hidden_dims = 120,reg_coef_filter=0,reg_coef_dense=0):\n branch = Sequential()\n\n # TODO : since no column can have more than one line with a value for any\n # given column, why don't we simply concatenate the matrix as a line ?\n\n # The first layer will learn filters at the base TF level\n branch.add(Conv2D(filters = nb_filters_firstlayer, # Number of filters\n kernel_size=(kernel_size,nb_lines), # Filter shape in (width,height) order\n # IMPORTANT : The number of lines MUST be equal to the height of your matrix so as to perform UNIDIMENSIONAL CONVOLUTION\n input_shape=(nb_lines,window_size,1), # Shape of our data (rows,columns,c); c is the number of channels, here equal to 1\n activation='relu',\n padding='same',\n kernel_regularizer=regularizers.l2(reg_coef_filter),\n # Regularization is important both to prevent overfitting and to have human-readable elements later\n data_format = 'channels_last'))\n\n # Add a pooling layer here\n branch.add(MaxPooling2D(pool_size = (1,2))) # Do NOT pool on the y (vertical) axis (number of lines)\n\n # We need to flatten this to supply it to a Dense layer\n branch.add(Flatten())\n\n # Final layer : dense one to treat our filters\n branch.add(Dense(hidden_dims,activity_regularizer=regularizers.l2(reg_coef_dense)))\n branch.add(Activation('relu'))\n\n\n return branch", "def train_sentence_dm(model, sentence, lbls, alpha, work=None, neu1=None, train_words=True, train_lbls=True):\n lbl_indices = [lbl.index for lbl in lbls if lbl is not None]\n lbl_sum = np_sum(model.syn0[lbl_indices], axis=0)\n lbl_len = len(lbl_indices)\n neg_labels = []\n if model.negative:\n # precompute negative labels\n neg_labels = zeros(model.negative + 1)\n neg_labels[0] = 1.\n\n for pos, word in enumerate(sentence):\n if word is None:\n continue # OOV word in the input sentence => skip\n reduced_window = random.randint(model.window) # `b` in the original doc2vec code\n start = max(0, pos - model.window + reduced_window)\n window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)\n word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]\n l1 = np_sum(model.syn0[word2_indices], axis=0) + lbl_sum # 1 x layer1_size\n if word2_indices and model.cbow_mean:\n l1 /= (len(word2_indices) + lbl_len)\n neu1e = train_cbow_pair(model, word, word2_indices, l1, alpha, neg_labels, train_words, train_words)\n if train_lbls:\n model.syn0[lbl_indices] += neu1e\n\n return len([word for word in sentence if word is not None])", "def conv_model(conv_width, out_steps, num_features=18):\r\n model = Sequential(name='Convolutional_Neural_Network')\r\n model.add(Lambda(lambda x: x[:, -conv_width:, :]))\r\n model.add(Conv1D(256, activation='relu', kernel_size=conv_width))\r\n model.add(Dense(out_steps*num_features,\r\n kernel_initializer=tf.initializers.zeros()))\r\n model.add(Reshape([out_steps, num_features]))\r\n return model", "def build_character_cnn(model_hyperparameters=None, verbose=None):\r\n if model_hyperparameters is None:\r\n model_hyperparameters = _dutils.load_dictionary('model_hyperparameters.json')\r\n '''\r\n Load hyperparameter-specific values from JSON file.\r\n '''\r\n #The size of the characater vocabulary\r\n vocabulary_size = model_hyperparameters.get(\"vocabulary_size\")\r\n #The max length of the text. Set as 1014 in the original.\r\n text_length = model_hyperparameters.get(\"text_length\")\r\n #Number of filters for each convolutional layer\r\n num_filters = model_hyperparameters.get(\"num_filters\")\r\n #The threshold for the ReLU activation layers\r\n threshold = model_hyperparameters.get(\"relu_threshold\")\r\n #Dropout probability for Dropout layers\r\n dropout_p = model_hyperparameters.get(\"dropout_percent\")\r\n #Embedding output dimension. Implementation sets it equal to vocabulary_size\r\n embed_dim = model_hyperparameters.get(\"embedding_dimension\")\r\n '''\r\n Values below specify the architecture.\r\n These aren't stored in the JSON file due to\r\n architectutre constraints with layers and\r\n kernel sizes.\r\n '''\r\n #The number of units for each dense layer minus output layer\r\n fully_connected_layers = [128,64]\r\n '''\r\n conv_layers is a list of pairs.\r\n First component refers to kernel size.\r\n Second component refers to the size of\r\n the MaxPooling1D layer (-1 indicates said layer is not present).\r\n '''\r\n conv_layers = [[7, 3], [3,-1], [3,-1], [3,-1], [3, 3]]\r\n #Input layer\r\n inputs = Input(shape=(text_length,), name='sent_input', dtype='int32')\r\n #Embedding layers\r\n x = Embedding(vocabulary_size + 1, embed_dim, input_length=text_length, mask_zero=True)(inputs)\r\n #Convolution layers\r\n '''\r\n First Conv1D layer + MaxPooling is separate in case\r\n changes are made upstream. Also it was used to test out\r\n TimeDistributed functionality.\r\n '''\r\n x = (Convolution1D(num_filters, 7))(x)\r\n x = (MaxPooling1D(3))(x)\r\n for cl in conv_layers:\r\n x = (Convolution1D(num_filters, cl[0]))(x)\r\n x = ThresholdedReLU(threshold)(x)\r\n if cl[1] != -1:\r\n x = (MaxPooling1D(cl[1]))(x)\r\n\r\n x = Flatten()(x)\r\n # #Fully connected layers\r\n for fl in fully_connected_layers:\r\n '''\r\n Original architecture did not use L2 regularization.\r\n However, empirical results show that, for my dataset\r\n it works well in handling overfitting.\r\n '''\r\n x = Dense(fl, kernel_regularizer=regularizers.l2(0.0001))(x)\r\n x = ThresholdedReLU(threshold)(x)\r\n '''\r\n Original architecture had dropout at 50%.\r\n This seemed to be too high for my dataset, and\r\n it resulted in underfitting.\r\n '''\r\n x = Dropout(dropout_p)(x)\r\n # #Output layer\r\n predictions = Dense(vocabulary_size, activation='softmax')(x)\r\n # Build and compile model\r\n model = Model(inputs=inputs, outputs=predictions) \r\n if verbose:\r\n model.summary()\r\n return model", "def __init__(self, x, filter_shape, bias=True, stride=1, pad_size=0, pad_mode='CONSTANT',\n is_training=True, niter=1, stop_grad_sigma=False,\n name='sn_conv2d', filler=('msra', 0., 1.), update_collection=None):\n super(SpecNormConv2d, self).__init__(name, update_collection)\n # inputs\n self.inputs.append(x)\n in_shape = x.shape.as_list()\n if len(filter_shape) == 3:\n # get chn_in from input tensor\n kin = in_shape[-1]\n kout = filter_shape[-1]\n filter_shape[-1] = kin\n filter_shape.append(kout)\n kh, kw, kin, kout = filter_shape\n with tf.variable_scope(name) as scope:\n # padding\n padding = 'VALID'\n if pad_size == -1:\n # 'SAME' padding\n if pad_mode == 'CONSTANT':\n padding = 'SAME'\n else:\n w_in = in_shape[-2]\n if w_in % stride == 0:\n pad_size_both = max(kw - stride, 0)\n else:\n pad_size_both = max(kw - (w_in % stride), 0)\n if pad_size_both > 0:\n pad_size = pad_size_both / 2\n x = tf.pad(x, [[0,0], [pad_size, pad_size_both-pad_size],\n [pad_size, pad_size_both-pad_size], [0,0]], pad_mode)\n elif pad_size > 0:\n # pad_size padding on both sides of each dimension\n x = tf.pad(x, [[0,0], [pad_size, pad_size], [pad_size, pad_size], [0,0]], pad_mode)\n # initializer for convolutional kernel\n initializer = None\n if filler[0] == 'uniform':\n initializer = tf.random_uniform_initializer(filler[1], filler[2])\n elif filler[0] == 'msra':\n fan_in = kh * kw * kin\n stdev = np.sqrt(2. / ((filler[1]**2 + filler[2]**2) * fan_in))\n initializer = tf.truncated_normal_initializer(0., stdev)\n elif filler[0] == 'gaussian':\n initializer = tf.truncated_normal_initializer(filler[1], filler[2])\n else:\n raise ValueError('Invalid filler type: %s' % (filler[0]))\n # params\n weight = tf.get_variable('weight', shape=filter_shape, dtype=TF_DTYPE, initializer=initializer)\n self.params.append(weight)\n # update_params\n u = tf.get_variable('u', [1, kout], dtype=TF_DTYPE,\n initializer=tf.truncated_normal_initializer(), trainable=False)\n sigma = tf.get_variable('sigma', [], dtype=TF_DTYPE,\n initializer=tf.constant_initializer(1.), trainable=False)\n self.update_params.extend([u, sigma])\n # normalize weight\n if is_training:\n weight_normalized, u_new, sigma_new = spec_norm_weight(weight, u, niter, stop_grad_sigma)\n else:\n weight_normalized = weight / sigma\n u_new, sigma_new = None, None\n # udpate_ops\n def get_update_ops(update_collection=update_collection):\n if self._update_ops is None:\n self._update_ops = list()\n with tf.name_scope(scope.original_name_scope):\n with tf.name_scope(update_collection, default_name='default'):\n self._update_ops.extend([u.assign(u_new), sigma.assign(sigma_new)])\n return self._update_ops\n if is_training:\n self.update_ops_getter = get_update_ops\n # conv2d\n y = tf.nn.conv2d(x, weight_normalized, [1, stride, stride, 1], padding=padding)\n # add channel-wise bias\n if bias:\n b = tf.get_variable('bias', shape=kout, dtype=TF_DTYPE, initializer=tf.constant_initializer(0.))\n self.params.append(b)\n y = tf.nn.bias_add(y, b)\n # outputs\n self.outputs.append(y)\n self.print_info(LAYERS_VERBOSE)", "def __init__(self, char_embedding_size, output_embedding_size, max_word_len=21, k=5, bias=True):\n super(CNN, self).__init__()\n self.conv = nn.Conv1d(char_embedding_size,\n output_embedding_size, k, bias=bias)\n conv_output_size = max_word_len - k + 1\n self.maxpool = nn.MaxPool1d(conv_output_size)", "def create_cnn(num_half_rows, num_half_columns, num_channels):\n\n error_checking.assert_is_integer(num_half_rows)\n error_checking.assert_is_integer(num_half_columns)\n error_checking.assert_is_integer(num_channels)\n\n error_checking.assert_is_greater(num_half_rows, 0)\n error_checking.assert_is_greater(num_half_columns, 0)\n error_checking.assert_is_greater(num_channels, 0)\n\n regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)\n\n num_grid_rows = 2 * num_half_rows + 1\n num_grid_columns = 2 * num_half_columns + 1\n input_layer_object = keras.layers.Input(\n shape=(num_grid_rows, num_grid_columns, num_channels)\n )\n\n current_num_filters = None\n current_layer_object = None\n\n # Add convolutional layers.\n for _ in range(NUM_CONV_LAYER_SETS):\n for _ in range(NUM_CONV_LAYERS_PER_SET):\n\n if current_num_filters is None:\n current_num_filters = (\n num_channels * NUM_CHANNELS_TO_FIRST_NUM_FILTERS)\n this_input_layer_object = input_layer_object\n\n else:\n current_num_filters *= 2\n this_input_layer_object = current_layer_object\n\n current_layer_object = keras.layers.Conv2D(\n filters=current_num_filters,\n kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),\n strides=(1, 1), padding='valid', data_format='channels_last',\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(this_input_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if CONV_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=CONV_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n current_layer_object = keras.layers.MaxPooling2D(\n pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n padding='valid', data_format='channels_last'\n )(current_layer_object)\n\n these_dimensions = numpy.array(\n current_layer_object.get_shape().as_list()[1:], dtype=int)\n num_features = numpy.prod(these_dimensions)\n\n current_layer_object = keras.layers.Flatten()(current_layer_object)\n\n # Add intermediate dense layers.\n _, num_outputs_by_dense_layer = (\n architecture_utils.get_dense_layer_dimensions(\n num_input_units=num_features, num_classes=NUM_CLASSES,\n num_dense_layers=NUM_DENSE_LAYERS)\n )\n\n for k in range(NUM_DENSE_LAYERS - 1):\n current_layer_object = keras.layers.Dense(\n num_outputs_by_dense_layer[k], activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n # Add output layer (also dense).\n current_layer_object = keras.layers.Dense(\n NUM_CLASSES, activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.Activation(\n 'softmax'\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None and NUM_DENSE_LAYERS == 1:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n # Put the whole thing together and compile.\n cnn_model_object = keras.models.Model(\n inputs=input_layer_object, outputs=current_layer_object)\n cnn_model_object.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=LIST_OF_METRIC_FUNCTIONS)\n\n cnn_model_object.summary()\n return cnn_model_object", "def stem(input): \r\n\tl1=Conv2D(32,(3,3),strides=(2,2),activation='relu',padding='same')(input)\r\n\tl2=Conv2D(32,(3,3),strides=(2,2),activation='relu',padding='same')(l1)\r\n\tl3=Conv2D(64,(3,3),activation='relu',padding='same')(l2)\r\n\tl4_1=Conv2D(96,(3,3),strides=(2,2),activation='relu',padding='same')(l3)\r\n\tl4_2=Conv2D(96,(3,3),strides=(2,2),activation='relu',padding='same')(l3)\r\n\tl5=concatenate([l4_1,l4_2])\r\n\r\n\tl6_1=Conv2D(64,(1,1),activation='relu',padding='same')(l5)\r\n\tl6_2=Conv2D(96,(3,3),strides=(2,2),activation='relu',padding='same')(l6_1)\r\n\r\n\tl7_1=Conv2D(64,(1,1),activation='relu',padding='same')(l5)\r\n\tl7_2=Conv2D(64,(7,1),activation='relu',padding='same')(l7_1)\r\n\tl7_3=Conv2D(64,(1,7),activation='relu',padding='same')(l7_2)\r\n\tl7_4=Conv2D(96,(3,3),strides=(2,2),activation='relu',padding='same')(l7_3)\r\n\r\n\tl8=concatenate([l6_2,l7_4])\r\n\r\n\tl9_1=Conv2D(192,(3,3),strides=(2,2),activation='relu',padding='same')(l8)\r\n\tl9_2=MaxPooling2D((3,3),strides=(2,2),padding='same')(l8)\r\n\r\n\toutput=concatenate([l9_1,l9_2])\r\n\r\n\treturn output", "def init_wide_conv(self):\n self._layers = [\n tf.keras.layers.Conv2D(input_shape=(MAP_SIZE_x, MAP_SIZE_y, CHANNELS),\n filters=64, kernel_size=KERNEL_SIZE, padding='same', activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=2),\n tf.keras.layers.Conv2D(filters=192, kernel_size=KERNEL_SIZE, padding='same', activation=tf.nn.relu),\n tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=(3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.Conv2D(filters=384, kernel_size=(3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.Conv2D(filters=192, kernel_size=(3,3), padding='same', activation=tf.nn.relu),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(units=10, activation=tf.nn.relu),\n tf.keras.layers.Dense(units=5)\n ]", "def train():\n\n # Set the random seeds for reproducibility. DO NOT CHANGE.\n tf.set_random_seed(42)\n np.random.seed(42)\n\n ########################\n # PUT YOUR CODE HERE #\n ########################\n #first lets test that out model works:\n \n #initialize:\n \n weight_init_scale = 0.001\n cifar10 = cifar10_utils.get_cifar10(validation_size=100)\n\n cnet = ConvNet(10)\n \n x_in = tf.placeholder(tf.float32, [None,32,32,3])\n y_true = tf.placeholder(tf.float32, [None,10])\n \n with tf.variable_scope(\"ConvNet\",reuse=None):\n filter1=tf.get_variable(\"filter1\",initializer=tf.random_normal([5,5,3,64], stddev=weight_init_scale, dtype=tf.float32))\n filter2=tf.get_variable(\"filter2\",initializer=tf.random_normal([5,5,64,64], stddev=weight_init_scale, dtype=tf.float32))\n\n \n W1=tf.get_variable(\"W1\",initializer=tf.random_normal([4096,384], stddev=weight_init_scale, dtype=tf.float32))\n W2=tf.get_variable(\"W2\", initializer= tf.random_normal([384, 192], stddev=weight_init_scale, dtype=tf.float32))\n W3=tf.get_variable(\"W3\", initializer = tf.random_normal([192,10], stddev=weight_init_scale, dtype=tf.float32))\n \n \n sess = tf.Session()\n saver = tf.train.Saver()\n #define things\n logits, flatten, fc1, fc2 = cnet.inference(x_in)\n \n \n loss= cnet.loss(logits,y_true)\n \n \n \n \n acc = cnet.accuracy(logits, y_true)\n opt_iter = train_step(loss)\n sess.run(tf.initialize_all_variables())\n \n swriter = tf.train.SummaryWriter(FLAGS.log_dir+ '/ConvNet', sess.graph)\n \n\n #xbat, ybat = cifar10.train.next_batch(100)\n \n #begin the training\n with sess:\n \n # loop\n for i in range(FLAGS.max_steps+1):\n xbat, ybat = cifar10.train.next_batch(FLAGS.batch_size)\n sess.run(opt_iter, feed_dict={x_in:xbat, y_true:ybat})\n if i % FLAGS.print_freq == 0:\n xbat, ybat = cifar10.validation.next_batch(100)\n val_acc, val_loss = sess.run([acc,loss], feed_dict={x_in:xbat, y_true:ybat})\n \n sys.stderr.write(\"iteration : \" + str(i)\n + \", validation loss : \" \n + str(val_loss)\n + \", validation_accuracy\"\n + str(val_acc) \n + \"\\n\")\n swriter.add_summary(\n sess.run(tf.scalar_summary(\"accuracy\", val_acc),\n feed_dict = {x_in : xbat, y_true: ybat}), i)\n\n \n if i% FLAGS.checkpoint_freq == 0:\n lo, flatsave, fc1save, fc2save = sess.run(cnet.inference(x_in), feed_dict={x_in:xbat, y_true:ybat})\n np.save(FLAGS.checkpoint_dir +\"/ConvNet/flatten\", flatsave)\n np.save(FLAGS.checkpoint_dir + \"/ConvNet/fc1\", fc1save)\n np.save(FLAGS.checkpoint_dir + \"/ConvNet/fc2\", fc2save)\n np.save(FLAGS.checkpoint_dir + \"/ConvNet/labels\", ybat)\n saver.save(sess, FLAGS.checkpoint_dir + \n \"/ConvNet/\" + \"checkpoint.ckpt\")\n \n \n if i%FLAGS.eval_freq ==0:\n xbat, ybat = cifar10.test.next_batch(100)\n \n sys.stderr.write(\"test accuracy:\" + str(sess.run(acc, feed_dict={x_in:xbat, y_true:ybat})) + \"\\n\")\n \n \n \n ########################\n # END OF YOUR CODE #\n ########################", "def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None", "def trainNet():", "def dpcnn_two_layers_conv(self, inputs,double_num_filters=True):\n # conv1:\n # filter1's first three dimension apply to [total_sequence_length, embed_size, 1] of embedding_documents\n print(\"dpcnn_two_layers_conv.inputs:\", inputs) # (128, 400, 64, 250)\n channel = inputs.get_shape().as_list()[-1]\n if double_num_filters:\n hpcnn_number_filters =channel * 2\n else:\n hpcnn_number_filters=self.hpcnn_number_filters\n filter1 = tf.get_variable(\"filter1-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, channel, hpcnn_number_filters],initializer=self.initializer)\n conv1 = tf.nn.conv2d(inputs, filter1, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n conv1 = tf.contrib.layers.batch_norm(conv1, is_training=self.is_training_flag, scope='cnn1')\n\n print(\"dpcnn_two_layers_conv.conv1:\", conv1) # (128, 400, 64, 250)\n b1 = tf.get_variable(\"b-cnn-%s\" % hpcnn_number_filters, [hpcnn_number_filters])\n conv1 = tf.nn.relu(tf.nn.bias_add(conv1, b1),\"relu1\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n\n # conv2\n # filter2's first three dimension apply to:[total_sequence_length,embed_size,hpcnn_number_filters] of conv1\n filter2 = tf.get_variable(\"filter2-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, hpcnn_number_filters, hpcnn_number_filters],initializer=self.initializer)\n conv2 = tf.nn.conv2d(conv1, filter2, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv2\") # shape:[batch_size,stotal_sequence_length,embed_size,hpcnn_number_filters]\n conv2 = tf.contrib.layers.batch_norm(conv2, is_training=self.is_training_flag, scope='cnn2')\n\n print(\"dpcnn_two_layers_conv.conv2:\", conv2) # (128, 400, 64, 250)\n return conv2 # shape:[batch_size,total_sequence_length,embed_size,num_filters]", "def captionme(args, modelfn):\n\n imgs, imgs_fn = load_images(args.image_dir)\n\n #For trained model released with the code\n batchsize = 1\n max_tokens = 15\n num_layers = 3 \n is_attention = True \n worddict_tmp = pickle.load(open('data/wordlist.p', 'rb'))\n wordlist = [l for l in iter(worddict_tmp.keys()) if l != '</S>']\n wordlist = ['EOS'] + sorted(wordlist)\n numwords = len(wordlist)\n\n model_imgcnn = Vgg16Feats()\n model_imgcnn.cuda() \n\n model_convcap = convcap(numwords, num_layers, is_attention = is_attention)\n model_convcap.cuda()\n\n print('[DEBUG] Loading checkpoint %s' % modelfn)\n checkpoint = torch.load(modelfn)\n model_convcap.load_state_dict(checkpoint['state_dict'])\n model_imgcnn.load_state_dict(checkpoint['img_state_dict'])\n\n model_imgcnn.train(False) \n model_convcap.train(False)\n\n pred_captions = []\n for batch_idx, (img_fn) in \\\n tqdm(enumerate(imgs_fn), total=len(imgs_fn)):\n \n img = imgs[batch_idx, ...].view(batchsize, 3, 224, 224)\n\n img_v = Variable(img.cuda())\n imgfeats, imgfc7 = model_imgcnn(img_v)\n\n b, f_dim, f_h, f_w = imgfeats.size()\n imgfeats = imgfeats.unsqueeze(1).expand(\\\n b, args.beam_size, f_dim, f_h, f_w)\n imgfeats = imgfeats.contiguous().view(\\\n b*args.beam_size, f_dim, f_h, f_w)\n\n b, f_dim = imgfc7.size()\n imgfc7 = imgfc7.unsqueeze(1).expand(\\\n b, args.beam_size, f_dim)\n imgfc7 = imgfc7.contiguous().view(\\\n b*args.beam_size, f_dim)\n\n beam_searcher = beamsearch(args.beam_size, batchsize, max_tokens)\n \n wordclass_feed = np.zeros((args.beam_size*batchsize, max_tokens), dtype='int64')\n wordclass_feed[:,0] = wordlist.index('<S>') \n outcaps = np.empty((batchsize, 0)).tolist()\n\n for j in range(max_tokens-1):\n wordclass = Variable(torch.from_numpy(wordclass_feed)).cuda()\n\n wordact, attn = model_convcap(imgfeats, imgfc7, wordclass)\n wordact = wordact[:,:,:-1]\n wordact_j = wordact[..., j]\n\n beam_indices, wordclass_indices = beam_searcher.expand_beam(wordact_j) \n\n if len(beam_indices) == 0 or j == (max_tokens-2): # Beam search is over.\n generated_captions = beam_searcher.get_results()\n for k in range(batchsize):\n g = generated_captions[:, k]\n outcaps[k] = [wordlist[x] for x in g]\n else:\n wordclass_feed = wordclass_feed[beam_indices]\n imgfc7 = imgfc7.index_select(0, Variable(torch.cuda.LongTensor(beam_indices)))\n imgfeats = imgfeats.index_select(0, Variable(torch.cuda.LongTensor(beam_indices)))\n for i, wordclass_idx in enumerate(wordclass_indices):\n wordclass_feed[i, j+1] = wordclass_idx\n\n for j in range(batchsize):\n num_words = len(outcaps[j]) \n if 'EOS' in outcaps[j]:\n num_words = outcaps[j].index('EOS')\n outcap = ' '.join(outcaps[j][:num_words])\n pred_captions.append({'img_fn': img_fn, 'caption': outcap})\n\n return pred_captions", "def all_views_conv_layer(input_layer, layer_name, number_of_filters=32, filter_size=(3, 3), stride=(1, 1),\r\n padding='VALID', biases_initializer=tf.zeros_initializer()):\r\n\r\n\r\n h = tf.contrib.layers.convolution2d(inputs=input_layer, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n\r\n return h", "def dpcnn_two_layers_conv(self, inputs,double_num_filters=False):\n # conv1:\n # filter1's first three dimension apply to [total_sequence_length, embed_size, 1] of embedding_documents\n print(\"dpcnn_two_layers_conv.inputs:\", inputs) # (128, 400, 64, 250)\n channel = inputs.get_shape().as_list()[-1]\n if double_num_filters:\n hpcnn_number_filters =channel * 2\n else:\n hpcnn_number_filters=self.hpcnn_number_filters\n filter1 = tf.get_variable(\"filter1-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, channel, hpcnn_number_filters],initializer=self.initializer)\n conv1 = tf.nn.conv2d(inputs, filter1, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n conv1 = tf.contrib.layers.batch_norm(conv1, is_training=self.is_training_flag, scope='cnn1')\n\n print(\"dpcnn_two_layers_conv.conv1:\", conv1) # (128, 400, 64, 250)\n b1 = tf.get_variable(\"b-cnn-%s\" % hpcnn_number_filters, [hpcnn_number_filters])\n conv1 = tf.nn.relu(tf.nn.bias_add(conv1, b1),\"relu1\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n\n # conv2\n # filter2's first three dimension apply to:[total_sequence_length,embed_size,hpcnn_number_filters] of conv1\n filter2 = tf.get_variable(\"filter2-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, hpcnn_number_filters, hpcnn_number_filters],initializer=self.initializer)\n conv2 = tf.nn.conv2d(conv1, filter2, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv2\") # shape:[batch_size,stotal_sequence_length,embed_size,hpcnn_number_filters]\n conv2 = tf.contrib.layers.batch_norm(conv2, is_training=self.is_training_flag, scope='cnn2')\n\n print(\"dpcnn_two_layers_conv.conv2:\", conv2) # (128, 400, 64, 250)\n return conv2 # shape:[batch_size,total_sequence_length,embed_size,num_filters]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the absolute path to a valid plugins.cfg file. Copied from sf_OIS.py
def getPluginPath(): import sys import os import os.path paths = [os.path.join(os.getcwd(), 'plugins.cfg'), '/etc/OGRE/plugins.cfg', os.path.join(os.path.dirname(os.path.abspath(__file__)), 'plugins.cfg')] for path in paths: if os.path.exists(path): return path sys.stderr.write("\n" "** Warning: Unable to locate a suitable plugins.cfg file.\n" "** Warning: Please check your ogre installation and copy a\n" "** Warning: working plugins.cfg file to the current directory.\n\n") raise ogre.Exception(0, "can't locate the 'plugins.cfg' file", "")
[ "def get_plugin_dir(self, f): # f should be __file__\n d = f.split(os.sep)[-2]\n return os.path.abspath( os.path.join(\"plugins\", d) )", "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def get_config_file():\n home_path = path_join(expanduser('~'), CONFIG_FILENAME)\n cwd_path = path_join(getcwd(), CONFIG_FILENAME)\n if isfile(home_path):\n return home_path\n elif isfile(cwd_path):\n return cwd_path\n return None", "def get_plugins_dir(env):\n plugins_dir = os.path.realpath(\".\")\n path = root_path()\n return os.path.normcase(path)", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def config_path(self):\n\n return os.path.join(self.git.toplevel_path, CONFIG_FILENAME)", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def _find_config_path(self):\n for _dir in (os.environ['WinDir'], self.install_dir):\n path = os.path.join(_dir, 'Sandboxie.ini')\n if os.path.exists(path):\n return path\n return None", "def get_plugins_base_dir():\n if \"MFMODULE_PLUGINS_BASE_DIR\" in os.environ:\n return os.environ.get(\"MFMODULE_PLUGINS_BASE_DIR\")\n return os.path.join(RUNTIME_HOME, \"var\", \"plugins\")", "def _get_egg_path(self):\n try:\n _dist = get_distribution('janitoo_nut')\n return _dist.__file__\n except AttributeError:\n return 'src-nut/config'", "def __get_project_config_path(self):\r\n return osp.join(self.root_path, self.CONFIG_NAME)", "def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()", "def get_plugin_directory_path(self):\n return os.path.join(MODULE_RUNTIME_HOME,\n 'var', 'plugins', self.plugin_name)", "def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This shows the config dialog and returns the renderWindow.
def configure(ogre_root): user_confirmation = ogre_root.showConfigDialog() if user_confirmation: return ogre_root.initialise(True, "OGRE Render Window") else: return None
[ "def get_config_dialog(self):", "def on_config(self, e):\n self.config_window = configwindow.ConfigWindow(self)\n self.config_window.Show()", "def config_show():\n Config().show()", "def openRocConfig(self):\n self.rocConfig_Window = QtWidgets.QDialog()\n self.rocConfig_ui = Ui_rocConfigure()\n self.rocConfig_ui.setupUi(self.rocConfig_Window)\n self.rocConfig_Window.show()", "def display_window(self):\n frame = tk.Frame(master=self.param_window)\n frame.grid(padx=10, pady=20, columnspan=2)\n tk.Label(master=frame, text=\"Enter simulation parameters\").pack()\n\n self.status_text = tk.StringVar()\n self.status_text.set(\"Status message\")\n \n self.rows = 1\n for input_key in self.inputs.keys():\n input_dict = self.inputs[input_key]\n \n frame = tk.Frame(master=self.param_window)\n frame.grid(row=self.rows, column=0, padx=10, pady=1)\n input_dict['label'] = tk.Label(master=frame, text=input_dict['label'])\n input_dict['label'].pack()\n\n frame = tk.Frame(master=self.param_window)\n frame.grid(row=self.rows, column=1, padx=10, pady=1)\n input_dict['entry'] = tk.Entry(master=frame, width=10)\n input_dict['entry'].insert(0, input_dict['default'])\n input_dict['entry'].pack()\n \n self.rows += 1\n\n frame = tk.Frame(master=self.param_window)\n frame.grid(padx=10, pady=20, columnspan = 2)\n self.submit_btn = tk.Button(master=frame, text=\"Submit\", width=10)\n self.submit_btn.pack()\n self.submit_btn.bind(\"<Button-1>\", self.submit_values)\n\n self.param_window.mainloop()\n return self.parameters", "def open_configuration(self,event):\n configDevFrame = Single_deviceconf(parent=self, ID=996)\n configDevFrame.Centre()\n configDevFrame.Show()\n configDevFrame.ShowModal()\n configDevFrame.Destroy()", "def _on_build_programmatic_display_config_gui(self):\n if self.ui.active_figure_format_config_widget is None:\n # Create a new one:\n # curr_selected_context = self.ui.contextSelectorWidget.current_selected_context\n active_config_name = self.ui.contextSelectorWidget.current_selected_context_key\n curr_active_config = self.owning_pipeline.active_configs[active_config_name] # Get default config for this config name\n # print(f'active_config_name: {active_config_name}, curr_active_config: {curr_active_config}')\n self.ui.active_figure_format_config_widget = FigureFormatConfigControls(config=curr_active_config)\n self.ui.active_figure_format_config_widget.figure_format_config_finalized.connect(self.on_finalize_figure_format_config)\n self.ui.active_figure_format_config_widget.show() # even without .show() being called, the figure still appears\n\n ## Get the figure_format_config from the figure_format_config widget:\n figure_format_config = self.ui.active_figure_format_config_widget.figure_format_config\n else:\n print(f'figure GUI already exists. Just showing again.')\n self.ui.active_figure_format_config_widget.show()", "def __showConfigDialog(self, pageName=None):\n activeFieldNamesList = []\n for idx in range(len(self.fieldTypes)):\n fieldName = self.fieldTypes.keys()[idx]\n if fieldName != 'Cell_Field': # rwh: dangerous to hard code this field name\n # self.dlg.fieldComboBox.addItem(fieldName) # this is where we set the combobox of field names in Prefs\n activeFieldNamesList.append(str(fieldName))\n\n Configuration.setUsedFieldNames(activeFieldNamesList)\n\n dlg = ConfigurationDialog(self, 'Configuration', True)\n self.dlg = dlg # rwh: to allow enable/disable widgets in Preferences\n\n if len(self.fieldTypes) < 2:\n self.dlg.tab_field.setEnabled(False)\n else:\n self.dlg.tab_field.setEnabled(True)\n\n self.dlg.fieldComboBox.clear()\n\n for fieldName in activeFieldNamesList:\n self.dlg.fieldComboBox.addItem(fieldName) # this is where we set the combobox of field names in Prefs\n\n # TODO - fix this - figure out if config dialog has configsChanged signal\n # self.connect(dlg, SIGNAL('configsChanged'), self.__configsChanged)\n # dlg.configsChanged.connect(self.__configsChanged)\n\n\n dlg.show()\n\n dlg.exec_()\n QApplication.processEvents()\n\n if dlg.result() == QDialog.Accepted:\n # Saves changes from all configuration pages!\n # dlg.setPreferences()\n Configuration.syncPreferences()\n self.__configsChanged() # Explicitly calling signal 'configsChanged'", "def showParallelSettings(self):\n self.psWindow.show()\n return self.psWindow", "def show(self, window):\r\n\r\n return", "def show_window(self):\n self.show()", "def showconfig(self):\n # using tcp communication\n if self.use_tcp_flag:\n # show tcp configuration unless serial is wanted\n if self.sender().text() == self.langstr[5]:\n self.configdlg.show()\n else:\n self.tcpdialog.show()\n else:\n self.configdlg.show()", "def display_settings(self):\n dialog = SettingsDialog(self)\n dialog.run()\n self.refresh()", "def createRenderWindow():\n ogre_root.initialise(True, app_title)", "def configure(self):\n title = _(\"Configure %(cat)s - %(view)s\") % \\\n {'cat': self.get_translated_category(), \n 'view': self.get_title()}\n\n if self.can_configure():\n config_funcs = self._get_configure_page_funcs()\n else:\n config_funcs = []\n if self.sidebar:\n config_funcs += self.sidebar.get_config_funcs()\n if self.bottombar:\n config_funcs += self.bottombar.get_config_funcs()\n\n try:\n ViewConfigureDialog(self.uistate, self.dbstate, \n config_funcs,\n self, self._config, dialogtitle=title,\n ident=_(\"%(cat)s - %(view)s\") % \n {'cat': self.get_translated_category(),\n 'view': self.get_title()})\n except WindowActiveError:\n return", "def showSettingsWindow(self) -> None:\n if not self._settings_dialog:\n self._settings_dialog = self._createDialog(\"ThingiSettings.qml\")\n self._settings_dialog.show()", "async def _show_config_form(\n self,\n ):\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(CONF_DEVICEID): str,\n }\n ),\n errors=self._errors,\n )", "def ask_for_config(frameworks, config, parent=None):\n dialog = ConfigDialog(frameworks, config, parent)\n result = dialog.exec_()\n if result == QDialog.Accepted:\n return dialog.get_config()", "def show_window(self):\n self._window.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a DICOM file, raising an exception if the 'DICM' marker is not present at byte 128. dicom.read_file() does this as of pydicom 0.9.5.
def read_dicom_file(fname): fo = open(fname) try: preamble = fo.read(128) magic = fo.read(4) if len(preamble) != 128 or magic != 'DICM': raise InvalidDicomError fo.seek(0) do = dicom.read_file(fo) finally: fo.close() return do
[ "def dicom_read(dicom_path):", "def dcmread(dcm_file, force = False) :\n try:\n ds = pydicom.read_file(dcm_file)\n except pydicom.filereader.InvalidDicomError as e:\n if self.options.force:\n ds = pydicom.read_file(dcm_file, force = force)\n else:\n raise pydicom.filereader.InvalidDicomError(\"%s use force = 'true' if you are sure this is a dicom file\" % e)\n #endif\n #end try\n return ds", "def is_dicom(filename):\n if os.path.isfile(filename):\n s = open(filename, 'rb').read(132)\n if isinstance(s,(bytes, bytearray)):\n try:\n s = s.decode('utf-8')\n except:\n # bugfix - was trying to read PNG and found a char utf8 did not like\n try:\n s = s.decode('utf-16')\n except:\n return False\n return s.endswith('DICM')\n else:\n return False", "def is_dicom(self, filename):\n if os.path.isfile(filename):\n f = open(filename, \"rb\")\n s = f.read(132)\n if isinstance(s, (bytes, bytearray)):\n try:\n s = s.decode('utf-8')\n except:\n # bugfix - was trying to read PNG and found a char utf8 did not like\n try:\n s = s.decode('utf-16')\n except:\n return False\n f.close()\n return s.endswith(\"DICM\")\n else:\n return False", "def is_dicom(file):\n fp = open(file, 'rb')\n preamble = fp.read(0x80)\n prefix = fp.read(4)\n return prefix == b\"DICM\"", "def read_dicom(path, only_header=False):\n if (not os.path.exists(path)):\n raise FileNotFoundError('file at {!s} does not exist'.format(path))\n try:\n ds = pydicom.dcmread(path, stop_before_pixels=only_header)\n except pydicom.errors.InvalidDicomError as e:\n warnings.warn('pydicom.read_dicom() failed with error: \"{!s}\". Trying again with force=True'.format(e))\n ds = pydicom.dcmread(path, stop_before_pixels=only_header, force=True)\n return ds", "def is_dicom(filename):\n # Per the DICOM specs, a DICOM file starts with 128 reserved bytes\n # followed by \"DICM\".\n # ref: DICOM spec, Part 10: Media Storage and File Format for Media \n # Interchange, 7.1 DICOM FILE META INFORMATION \n if os.path.isfile(filename):\n f = open(filename, \"rb\")\n s = f.read(132)\n f.close()\n return s.endswith(\"DICM\")\n else:\n return False", "def parse_dicom_file(filename):\n\n try:\n dcm = dicom.read_file(filename)\n dcm_image = dcm.pixel_array\n\n try:\n intercept = dcm.RescaleIntercept\n except AttributeError:\n intercept = 0.0\n try:\n slope = dcm.RescaleSlope\n except AttributeError:\n slope = 0.0\n\n if intercept != 0.0 and slope != 0.0:\n dcm_image = dcm_image*slope + intercept\n dcm_dict = {'pixel_data' : dcm_image}\n return dcm_dict\n except InvalidDicomError:\n return None", "def read_file_meta_info(filename):\n fp = DicomFile(filename, 'rb')\n read_preamble(fp, False) # if no header, raise exception\n return _read_file_meta_info(fp)", "def read_dicom(path):\n pd = pydicom.read_file(path)\n img_arr = pd.pixel_array\n img_arr = img_arr/img_arr.max()\n img_arr = (255*img_arr).clip(0,255).astype(np.uint8)\n img = Image.fromarray(img_arr).convert('RGB')\n return img", "def read_dicom(folder):\n files = [join(folder, f) for f in listdir(folder) if is_dcm(f)]\n assert len(files) > 0, 'No dicom files in {0}'.format(folder)\n info = dcmread(files[0])\n maker = info.Manufacturer\n print('Reading', len(files), maker, 'dicom files...')\n \n # Find min and max slice location, number of echoes\n min_slice = np.float(info.SliceLocation)\n max_slice = np.float(info.SliceLocation)\n min_pos = np.array(info.ImagePositionPatient)\n max_pos = np.array(info.ImagePositionPatient)\n max_echo = int(info.EchoNumbers)\n for f in files[1:]:\n file = dcmread(f)\n slice_loc = np.float(file.SliceLocation)\n echo = int(file.EchoNumbers)\n if slice_loc < min_slice:\n min_slice = slice_loc\n min_pos = np.array(file.ImagePositionPatient)\n if slice_loc > max_slice:\n max_slice = slice_loc\n max_pos = np.array(file.ImagePositionPatient)\n if echo > max_echo:\n max_echo = echo\n \n voxel_size = np.array([info.PixelSpacing[0], info.PixelSpacing[1],\n info.SliceThickness])\n slices = np.round(norm(max_pos - min_pos) / voxel_size[2]) + 1\n \n # Fill mag, phase, and TE arrays\n shape = (int(info.Rows), int(info.Columns), int(slices), int(max_echo))\n mag = np.zeros(shape)\n phase = np.zeros(shape)\n TE = np.zeros(max_echo)\n for f in files:\n file = dcmread(f)\n slice_num = int(np.round((norm(np.array(file.ImagePositionPatient) - \n min_pos) / voxel_size[2])))\n echo = int(file.EchoNumbers) - 1\n TE[echo] = float(file.EchoTime)\n if maker.startswith('GE'):\n if int(file.InstanceNumber) % 2 == 1:\n mag[:,:,slice_num,echo] = file.pixel_array\n else:\n phase[:,:,slice_num,echo] = file.pixel_array\n elif maker.startswith('Ph'):\n if 'm' in file.ImageType or 'M' in file.ImageType:\n mag[:,:,slice_num,echo] = file.pixel_array\n elif 'p' in file.ImageType or 'P' in file.ImageType:\n phase[:,:,slice_num,echo] = file.pixel_array\n elif maker.startswith('SIE'): # does not work with multiple coils\n if 'm' in file.ImageType or 'M' in file.ImageType:\n mag[:,:,slice_num,echo] = file.pixel_array\n elif 'p' in file.ImageType or 'P' in file.ImageType:\n phase[:,:,slice_num,echo] = ((file.pixel_array * \n np.float(file.RescaleSlope) + \n np.float(file.RescaleIntercept)) / \n (np.float(file.LargestImagePixelValue) * np.pi))\n if maker.startswith('GE') or maker.startswith('Ph'):\n phase = 2 * np.pi * phase / (np.max(phase) - np.min(phase))\n if maker.startswith('GE'):\n phase[:,:,::2,:] = phase[:,:,::2,:] + np.pi\n data = mag * np.exp(-1j * phase)\n \n # Acq params\n CF = info.ImagingFrequency * 1e6\n if len(TE) == 1:\n delta_TE = TE\n else:\n delta_TE = TE[1] - TE[0]\n affine_2d = np.array(info.ImageOrientationPatient).reshape(3,2)\n z = (max_pos - min_pos) / ((slices - 1) * voxel_size[2] - 1)\n z = np.array([z]).T\n affine_3d = np.concatenate((affine_2d, z), axis = 1)\n B0_dir = np.linalg.lstsq(affine_3d, [0, 0, 1])[0]\n B0 = int(info.MagneticFieldStrength)\n params = {'voxel_size': voxel_size, 'CF': CF, 'delta_TE': delta_TE, \n 'TE': TE, 'B0_dir': B0_dir, 'B0': B0}\n return data, params", "def _get_dcm(path):\n if not isdicom(path):\n return None\n dcm = DicomParser.to_attributedict(dicom.read_file(path))\n dcm.filename = path\n log.debug('loaded dicom from disk %s' % path)\n return dcm", "def read_dicomdir(filename=\"DICOMDIR\"):\n # Read the file as usual.\n # read_file will return a DicomDir instance if file is one.\n # Here, check that it is in fact DicomDir\n ds = read_file(filename)\n if not isinstance(ds, DicomDir):\n msg = u\"File '{0}' is not a Media Storage Directory file\".format(filename)\n raise InvalidDicomError(msg)\n return ds", "def read_cfe_barcode(filename):\n decoded = decode(Image.open(filename))\n\n for match in decoded:\n # Check for right type of barcode and right length of data\n if match.type == 'CODE128' and len(match.data) == 30:\n return match.data\n\n raise Exception(\"No valid CFE barcode found\")", "def is_dicom_file(path, verbose=False):\n try:\n pydicom.read_file(str(Path(path).resolve()), stop_before_pixels=True)\n result = True\n except Exception as ex:\n if verbose:\n print(\"'{}' appears not to be a DICOM file\\n({})\".format(path, ex))\n result = False\n return result", "def is_dicom_image(file):\n result = False\n try:\n img = dicom.read_file(file, force=True)\n img.pixel_array\n result = True\n except:\n pass\n return result", "def open_image(path, verbose=True):\n # Create a reader for the image\n reader = gdcm.ImageReader()\n reader.SetFileName(path)\n # Try to actually read it\n read_success = reader.Read()\n if not read_success:\n raise IOError(\"Error opening dicom-file.\")\n image = reader.GetImage()\n # Convert to Numpy array\n try:\n data = __to_array(image)\n except ValueError:\n # Re-raise as IOError with complete traceback\n import sys\n (value, traceback) = sys.exc_info()[1:]\n raise (IOError, value, traceback)\n if verbose:\n print \"Image loaded:\", path\n print \"Meta data:\"\n print image\n return (reader, data)", "def read_preamble(fp, force):\n logger.debug(\"Reading preamble...\")\n preamble = fp.read(0x80)\n if dicom.debugging:\n sample = bytes2hex(preamble[:8]) + \"...\" + bytes2hex(preamble[-8:])\n logger.debug(\"{0:08x}: {1}\".format(fp.tell() - 0x80, sample))\n magic = fp.read(4)\n if magic != b\"DICM\":\n if force:\n logger.info(\"File is not a standard DICOM file; 'DICM' header is \"\n \"missing. Assuming no header and continuing\")\n preamble = None\n fp.seek(0)\n else:\n raise InvalidDicomError(\"File is missing 'DICM' marker. \"\n \"Use force=True to force reading\")\n else:\n logger.debug(\"{0:08x}: 'DICM' marker found\".format(fp.tell() - 4))\n return preamble", "def read_ifd(filehandle, byte_order, meta_dict, subifd_queue):\n decoder = ExifDecoder(byte_order)\n\n directory_entries = decoder.decode_bytes(filehandle.read(2))\n for _ in range(directory_entries):\n\n exif_tag_no = decoder.decode_bytes(filehandle.read(2))\n data_type = decoder.decode_bytes(filehandle.read(2))\n number_of_values = decoder.decode_bytes(filehandle.read(4))\n tagdata = filehandle.read(4)\n if exif_tag_no in (34665, 34853, 40965):\n # these tags are offsets to Exif-specific IFDs, so add to the\n # sub-IFD queue for processing later\n subifd_queue.append(decoder.decode_bytes(tagdata))\n\n else:\n # this is metadata, so add to the metadata dictionary\n tagname = exiftag(exif_tag_no)\n dict_key = 'Exif|' + tagname\n tagvalue = exifdata_tostring(tagdata, data_type, decoder)\n meta_dict[dict_key] = (tagvalue, str(exif_tag_no), data_type, number_of_values)\n\n # return the offset to the next IFD\n return decoder.decode_bytes(filehandle.read(4))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given our dicom_files and studies records and a patient ID, return a list of (datetime, study instance UID) ordered by date+time
def patient_studies(dicom_files, studies, patient_id): ps = [] for uid in dicom_files[patient_id]: datetime = '%s%s' % studies[uid] ps.append([datetime, uid]) ps.sort(lambda a, b: cmp(a[0], b[0])) for el in ps: date_time_parts = (el[0][0:4], el[0][4:6], el[0][6:8], el[0][8:10], el[0][10:12], el[0][12:14]) el[0] = '%s-%s-%s %s:%s:%s' % date_time_parts return ps
[ "def get_samples_from_patient_id(patient_id):\n all_files = FileRepository.all()\n q_pid = Q(metadata__cmoPatientId=patient_id)\n q_fg = build_argos_file_groups_query()\n q = q_pid & q_fg\n files = FileRepository.filter(queryset=all_files, q=q, filter_redact=True)\n data = list()\n for current_file in files:\n sample = dict()\n sample[\"id\"] = current_file.file.id\n sample[\"path\"] = current_file.file.path\n sample[\"file_name\"] = current_file.file.file_name\n sample[\"metadata\"] = current_file.metadata\n data.append(sample)\n\n samples = list()\n # group by igoId\n igo_id_group = dict()\n for sample in data:\n igo_id = sample[\"metadata\"][settings.SAMPLE_ID_METADATA_KEY]\n if igo_id not in igo_id_group:\n igo_id_group[igo_id] = list()\n igo_id_group[igo_id].append(sample)\n\n for igo_id in igo_id_group:\n samples.append(build_sample(igo_id_group[igo_id]))\n samples, bad_samples = remove_with_caveats(samples)\n number_of_bad_samples = len(bad_samples)\n if number_of_bad_samples > 0:\n LOGGER.warning(\"Some samples for patient query %s have invalid %i values\", patient_id, number_of_bad_samples)\n return samples", "def process_dicom_file_list(dicom_file_list, parent_sorting_field=\"PatientName\", verbose=False):\n dicom_series_dict_parent = {}\n\n for i, dicom_file in enumerate(sorted(dicom_file_list)):\n if verbose is True:\n logger.debug(\" Sorting file %d\", i)\n\n dicom_file = dicom_file.as_posix()\n\n if \"dicomdir\" in dicom_file.lower():\n logger.warning(\n \"DICOMDIR is not supported in this tool, images are read directly. Skipping.\"\n )\n continue\n\n dicom_object = pydicom.read_file(dicom_file, force=True)\n\n parent_sorting_field_data = dicom_object[parent_sorting_field].value\n\n if parent_sorting_field_data not in dicom_series_dict_parent.keys():\n dicom_series_dict_parent[parent_sorting_field_data] = {}\n\n series_uid = dicom_object.SeriesInstanceUID\n\n if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():\n dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]\n\n else:\n dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)\n\n return dicom_series_dict_parent", "def get_paths_to_animal_recordings_on_single_day(fpath, animal_id):\n return sorted(find_recording_sessions(find_recording_session_groups(os.path.join(fpath, animal_id))[0]))", "def get_dicoms(data_dir, patient_id):\n dicom_paths = []\n path = os.path.join(data_dir, 'dicoms/' + patient_id)\n for (dirpath, _, filenames) in os.walk(path):\n for filename in filenames:\n dicom_paths.append(os.path.join(dirpath, filename))\n break\n return dicom_paths", "def ExtractDetVOffInfo(directory=os.getcwd()):\n ls=os.listdir(directory)\n TimesLogs=[]\n O2OData={}\n for log in ls:\n if \"DetVOffReaderDebug__FROM\" in log:\n (start,end)=log[:-4].split(\"FROM_\")[1].split(\"_TO_\")\n TimeStamp=datetime.datetime.strptime(start.replace(\"__\",\"_0\"),\"%a_%b_%d_%H_%M_%S_%Y\")\n #print start,TimeStamp\n file=open(log,'r')\n filelines=file.readlines()\n file.close()\n LVOff=[]\n HVOff=[]\n for line in filelines:\n #print line\n if \"OFF\" in line:\n detid,hv,lv=line.split()\n #print line,detid,hv,lv\n if hv==\"OFF\":\n HVOff.append(int(detid))\n if lv==\"OFF\":\n LVOff.append(int(detid))\n \n O2OData.update({TimeStamp:(HVOff,LVOff)})\n return O2OData", "def fetch_all_subj_dicom_info(self):\n all_sample_dicom = [os.listdir(path)[0]\n for path in self.all_subj_folder_path]\n all_sample_dicom = [\n os.path.join(\n path, dicom) for path, dicom in zip(\n self.all_subj_folder_path, all_sample_dicom)]\n\n # read dicom info\n print(\"### Reading dicom info... ###\\n\")\n self.dicom_info = [\n pydicom.read_file(\n sample_dicom,\n force=True) for sample_dicom in all_sample_dicom]\n self.patient_ID = [info.PatientID for info in self.dicom_info]\n self.patient_name = [\n info.PatientName.components for info in self.dicom_info]\n print(\"### dicom info read completed! ###\\n\")\n return self", "def sort_seqinfo_by_series_date_time(seqinfo):\n # sort by concatenated date and time (strings):\n # (use set to only keep unique ones):\n dateTimes = set([s.date + s.time for s in seqinfo if s.date and s.time])\n sortedSeqinfo = []\n for dt in sorted(dateTimes):\n dTseries = [\n ss for ss in seqinfo if (\n ss.date\n and ss.time\n and ss.date + ss.time == dt\n )\n ]\n # sort series with identical date and time by series_uid:\n for sid in sorted([s.series_uid for s in dTseries if s.series_uid]):\n for ss in dTseries:\n if ss.series_uid == sid:\n sortedSeqinfo.append(ss)\n\n # Now, add the series which do not have series_uid:\n for ss in dTseries:\n if ss not in sortedSeqinfo:\n sortedSeqinfo.append(ss)\n\n # Now, add the series which do not have date or time:\n for ss in seqinfo:\n if ss not in sortedSeqinfo:\n sortedSeqinfo.append(ss)\n\n return sortedSeqinfo", "def loadPatientList(df):\r\n patient_list = df.Patient_ID.unique()\r\n return patient_list", "def get_metadata(hf_patients_file, metadata_file, output_file):\n\n # Use 'dicom_id' as names for row indices\n hf_patients = pd.read_csv(hf_patients_file, sep=',', index_col=\"dicom_id\")\n\n # Use 'dicom' as name\n metadata = pd.read_csv(metadata_file, index_col=\"dicom\", dtype={\"StudyDate\": str, \"StudyTime\": str})\n\n # Disregard all columns except 'subject_id' and 'study_id'\n hf_patients = pd.concat([hf_patients['study_id'], hf_patients['subject_id']], axis=1)\n\n # Find study date/time for heart failure patients\n study_date = metadata[\"StudyDate\"][hf_patients.index]\n study_time = metadata[\"StudyTime\"][hf_patients.index]\n\n result = pd.concat([hf_patients, study_date, study_time], axis=1)\n result = result.rename(columns={\"StudyDate\": \"study_date\", \"StudyTime\": \"study_time\"})\n\n result.to_csv(output_file)", "def extract_notes(infile):\n\n # get patient ID\n subj_id = patient_id_from_file(infile)\n \n #get lab_events for this patient\n con = open_db()\n \n query = \\\n \"\"\"\n SELECT i.chartdate, i.charttime, i.description, i.category, i.text\n FROM noteevents i\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n\n notes = pd.read_sql_query(query,con)\n \"\"\" change time stamp to seconds from origin \"\"\"\n \n origin = pd.to_datetime(wfdb.rdheader(infile).base_datetime)\n notes.insert(0, 'time', '')\n for idx, row in notes.iterrows():\n notes['time'].iloc[idx]=int((pd.to_datetime(row['charttime'])-origin).total_seconds())\n del notes['charttime']\n del notes['chartdate']\n\n return (notes)", "def get_studydate(file):\n return dicom.read_file(file).StudyDate", "def extract_data(self, dicom_raw, tag_list, our_patient_id):\n patient_data = {}\n try:\n patient_data[\"filename\"] = self.get_filename_with_extension(dicom_raw, '')\n patient_data[str(Tag(0x0010, 0x0020))] = our_patient_id\n for tag in tag_list.tag_to_extract:\n tag = Tag(tag)\n exclude = False\n if tag_list.tag_to_extract.get(tag) is not None:\n vr = tag_list.tag_to_extract[tag].VR\n value = tag_list.tag_to_extract[tag].value\n if value:\n #DICOM Body Part Examined se debe mapear a terminologia SNOMED-CT ref: http://dicom.nema.org/medical/dicom/current/output/chtml/part16/chapter_L.html#chapter_L\n #si no pertenece a ninguno entonces se omite\n if tag == Tag(0x0018, 0x0015):\n value_snomed = self.dicom2snomedct_dict.get(value.upper())\n if value_snomed: value = value_snomed[\"code\"]\n elif self.online_lookup:\n concept = get_snomed_eng(value.lower())\n if isinstance(concept, list) and concept != []:\n value = concept[1]\n else: exclude = True\n else: exclude = True\n #DICOM StudyInstanceUID ref: https://www.ietf.org/rfc/rfc3001.txt\n elif tag == Tag(0x0020, 0x000D):\n value = 'urn:oid:' + str(value)\n #DICOM SOP Class UID ref: http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0008,0016)\n elif tag == Tag(0x0008, 0x0016):\n value = 'urn:oid:' + str(value)\n #DICOM Laterality ref: http://dicomlookup.com/lookup.asp?sw=Tnumber&q=(0020,0060)\n elif tag == Tag(0x0020, 0x0060):\n value = self.laterality.get(value)\n if not value: exclude = True\n #DICOM Date (DA) tiene el formato YYYYMMDD y Date Time (DT) YYYY-MM-DDThh:mm:ss.sss+zz:zz por lo que se deben parsear\n elif vr in ['DA', 'DT', 'TM']:\n try:\n if value != '': value = parser.parse(value)\n else: value = parser.parse(\"19000101\") # Valor por defecto, en caso de la hora se agrega 00:00:00 de forma automática\n except:\n exclude = True\n #DICOM Gender usa codificación ['F', 'M', 'O'] por lo que se debe mapear a la codificación HL7 FHIR ['female','male','other','unknown']\n elif tag == Tag(0x0010, 0x0040):\n value = self.gender.get(value)\n if not value: exclude = True\n else: exclude = True\n #Se extrae el valor del DICOM Tag\n if not exclude:\n if isinstance(value, str):\n patient_data[str(tag)] = value\n else:\n patient_data[str(tag)] = str(value)\n # logger.debug(\"Valores de información personal del paciente: {0}\".format(patient_data))\n except Exception as e:\n logger.error(\"Unexpected exception at extract_data(): {0}\".format(e))\n raise\n \n return patient_data", "def loadDicomsFromDatabase(self, dicomFiles):\n\n #--------------------\n # Create dictionary of downloaded DICOMS\n # for quick retrieval when comparing with files\n # in the slicer.dicomDatabase. Speed preferred over\n # memory consumption here.\n #-------------------- \n dlDicomObj = {}\n for dlFile in dicomFiles:\n dlDicomObj[os.path.basename(dlFile)] = dlFile\n\n\n \n #--------------------\n # Parse through the slicer.dicomDatabase\n # to get all of the files, as determined by series.\n #--------------------\n matchedDatabaseFiles = []\n for patient in slicer.dicomDatabase.patients():\n for study in slicer.dicomDatabase.studiesForPatient(patient):\n for series in slicer.dicomDatabase.seriesForStudy(study):\n seriesFiles = slicer.dicomDatabase.filesForSeries(series)\n #\n # Compare files in series with what was just downloaded.\n # If there's a match, append to 'matchedDatabaseFiles'.\n #\n for sFile in seriesFiles:\n if os.path.basename(sFile) in dlDicomObj: \n matchedDatabaseFiles.append(sFile)\n\n\n \n #--------------------\n # Acquire loadabes as determined by\n # the 'DICOMScalarVolumePlugin' class, by feeding in \n # 'matchedDatabaseFiles' as a nested array.\n #--------------------\n dicomScalarVolumePlugin = \\\n slicer.modules.dicomPlugins['DICOMScalarVolumePlugin']()\n loadables = dicomScalarVolumePlugin.examine([matchedDatabaseFiles])\n\n\n \n #--------------------\n # Determine loadable with the highest file count. \n # This is usually all DICOM files collated as one volume.\n #--------------------\n highestFileCount = 0\n highestFileCountIndex = 0\n for i in range(0, len(loadables)):\n if len(loadables[i].files) > highestFileCount:\n highestFileCount = len(loadables[i].files)\n highestFileCountIndex = i\n\n\n \n #--------------------\n # Load loadable with the highest file count.\n # This is assumed to be the volume file that contains\n # the majority of the downloaded DICOMS.\n #--------------------\n dicomScalarVolumePlugin.load(loadables[highestFileCountIndex])\n \n\n\n \n #--------------------\n # Return true if login successful.\n #-------------------- \n return True", "def _GetRefdat(self):\n for rfile in self.refdats.keys():\n# Get times for ref.dat files with a time-stamp.\n words = rfile.replace('.','_').split('_')\n if len(words) == 6 and words[-2].count(':') == 20:\n# This file was time-stamped by the sequence. Get the\n# date and time. file name format:\n# ref_Sep_9_2007_11:28:32.dat\n rtime[rfile] = hms_to_secs(words[-2])\n for pfile in self.pfiles:\n min_difftime = 1.e20\n self.info[pfile]['refdat'] = None\n for rfile in self.refdats.keys():\n if rfile[:3] == 'ref' and 'dat' in rfile:\n# This is a reference data file. First see if the orientation is\n# appended. If the file has neither a time-stamp nor a plane and\n# there is more than one ref.dat, the epi reconstruction will\n# be aborted.\n rinfo = {}\n ref_file = None\n if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif len(self.refdats.keys()) == 1:\n# Use the only one if that is all there is.\n ref_file = rfile\n epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2])\n if epi_time - rtime[rfile] < min_difftime and \\\n rftime[rfile] > epi_time:\n# Use the reference file that acquired nearest to the EPI\n# but before it.\n min_difftime = epi_time - rtime[rfile]\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n if ref_file:\n# Found a candidate.\n if not self.info[pfile]['refdat']:\n# Haven't found one yet, use it.\n self.info[pfile]['refdat'] = ref_file\n else:\n# Found two. Choose one in the same directory.\n oldpath = os.path.dirname(self.info[pfile]['refdat'])\n newpath = os.path.dirname(ref_file)\n pfile_path = os.path.dirname(pfile)\n if oldpath == newpath:\n# Same path, use the old one.\n self.info[pfile]['refdat'] = ref_file\n elif newpath == pfile_path:\n self.info[pfile]['refdat'] = ref_file\n# else Do nothing, use existing choice.\n elif not os.path.exists(rfile):\n self.info[pfile]['refdat'] = None\n elif os.stat(rfile).st_size > 0:\n# This path is taken if no info is encoded in the file name.\n# Don't use empty ref.dat files.\n self.info[pfile]['refdat'] = rfile", "def getFileDates(self, file_id):\n sq = self.getEntry('File', file_id)\n start_time = sq.utc_start_time.date()\n stop_time = sq.utc_stop_time.date()\n return [start_time, stop_time]", "def get_patient_cases(patient):\n # ----- Get database connection\n db = connect_to_db()\n try:\n c1 = db.cursor()\n try:\n c1.execute(\n \"\"\"SELECT tc.SLABEL \"\"\"\n \"\"\"FROM BOM.PATIENT pt \"\"\"\n \"\"\" INNER JOIN BOM.TCASE tc ON pt.SUID = tc.SPATIENTUID \"\"\"\n \"\"\"WHERE \"\"\"\n \"\"\" pt.SID = '%s' \"\"\" %\n patient)\n res = c1.fetchall()\n cases = []\n for re in res:\n cases.append(re[0])\n finally:\n c1.close()\n finally:\n db.close()\n return cases", "def get_dates(path, files):\n photos = []\n\n for file in files:\n # Open file and get date\n with open(path + \"/\" + file, 'rb') as f:\n tags = exifread.process_file(f)\n date = tags[\"EXIF DateTimeOriginal\"]\n\n photos.append({\n \"name\": file,\n \"date\": str(date)\n })\n\n os.remove(path + \"/\" + file)\n\n return photos", "def __read_patients(self, filename, patient_subset):\r\n\r\n patient_tuples = []\r\n i = 0\r\n\r\n with open(filename) as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for row in reader:\r\n if patient_subset is None or i in patient_subset:\r\n patient_tuples.append((row['patient_id'], row['original_id']))\r\n i = i + 1\r\n\r\n return patient_tuples", "def get_subjects_info(data_folder, dataset_id, format=\"dict\"):\r\n subjects_info = {} # build of dictionnary of all session for each subject\r\n\r\n if dataset_id == \"raw_clean_32\":\r\n \"\"\" High Versus Low inhibitory Stimuli of Tinnitus and control patients\r\n \"\"\"\r\n patient = 2 # patient group (static for a given dataset)\r\n session = 9 # 6 = 1 old remplacer apres (session 'high')\r\n ses2 = 8 # (session 'low')\r\n names = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(session)))\r\n names2 = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(ses2)))\r\n\r\n pat = []\r\n pat2 = []\r\n for name in names:\r\n # print name.split('_')[0]\r\n pat.append(name.split('_')[0]) # all subjects ID from names\r\n for name in names2:\r\n # print name.split('_')[0]\r\n pat2.append(name.split('_')[0]) # all subjects ID from names2\r\n\r\n for name in names2:\r\n if pat.__contains__(name.split('_')[0]):\r\n if subjects_info.keys().__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name) # add file to the list\r\n else:\r\n subjects_info[name.split('_')[0]] = [name] # add first file to the list\r\n for name in names:\r\n if pat2.__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name)\r\n\r\n elif dataset_id == \"Distress2010\":\r\n \"\"\" High Versus Low Distress patients (1, 2, 3, 4 Distress)\r\n \"\"\"\r\n sub_high = 'high distress'\r\n sub_low = 'low distress'\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, sub_high)) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, sub_low))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if filename[0] in valid_id:\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append({\"distress\": int(filename[0])})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n elif dataset_id == \"Tinnitus_EEG\":\r\n \"\"\" extended Distress2010 dataset with more than 310 patients\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id))\r\n subjects_csv = pd.read_csv(os.path.join(data_folder, dataset_id,\"labels_name_cat_TQ_vas.csv\"),\r\n names=[\"session\", \"distress\", \"TQ\", \"VAS\"], index_col=\"session\")\r\n\r\n for filename in filenames:\r\n if filename.split(\".\")[1] == \"txt\":\r\n if np.any(subjects_csv.index.str.match(filename)):\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n distress_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"distress\"].values[0])\r\n TQ_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"TQ\"].values[0])\r\n VAS_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"VAS\"].values[0])\r\n\r\n symptoms.append({\"distress\": distress_val})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"TQ\": TQ_val, \"VAS\": VAS_val}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n else:\r\n print(\"file \" + filename + \" not listed in labels_name_cat_TQ_vas.csv, subject rejected\")\r\n\r\n elif dataset_id == \"NormativeDB\":\r\n \"\"\" Control subjects in resting state\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"M\")) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"F\"))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if not (filename.split(\".\")[0][-2:] == \"EC\"): # remove eyes closed\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append(\"Control\")\r\n symptoms.append({\"distress\": int(0)})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"gender\": filename[2]}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session\r\n\r\n else:\r\n print(\"get_subjects_info: unknown dataset\")\r\n if format == \"DataFrame\":\r\n subjects_info = _subjects_dict_to_pandas(subjects_info)\r\n\r\n return subjects_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a project/subject/session identifier is valid. Identifiers can only contain alphanumeric characters and underscores.
def _validate_identifier(self, identifier): for c in identifier: if c not in string.letters + string.digits + '_': return False return True
[ "def IsProjectIDValid(project):\n if len(project) < 6 or len(project) > 30:\n return False\n return bool(re.match('^[a-z][a-z0-9\\\\-]*[a-z0-9]$', project))", "def check_valid_identifier(identifier):\n return bool(re.match(\"[_A-Za-z][_a-zA-Z0-9]*$\", identifier))", "def is_valid_id(id_):\n if not re.match(r'^[%A-Za-z0-9_.\\\\\\-~]*$', id_):\n return False\n return True", "def is_valid_project_name(project_name):\n return project_name.replace(\",\",\"\").replace(\".\",\"\").replace(\"-\",\"\").replace(\"_\",\"\").isalnum()", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def is_valid_project_id(project_id):\n return re.match(r'^(google.com:)?[a-z0-9\\-]+$', project_id)", "def isValidAseqId(self, aseqId = None):\n if aseqId == None:\n return False\n if re.match('[A-Za-z0-9_-]{22}$',aseqId):\n return True\n else:\n return False", "def check_identifiers():\n if not PROJECT_SLUG.isidentifier():\n sys.exit(f\"project_slug='{PROJECT_SLUG}' is not a valid Python identifier.\")\n if not PROJECT_DIRNAME.isidentifier():\n sys.exit(\n f\"project_dirname='{PROJECT_DIRNAME}' is not a valid Python identifier.\"\n )", "def is_identifier_char(c):\n return re.match('[_\\w]', c) is not None", "def valid_id(id):\n reserved = (\n [ \"_annalist_site\"\n , \"_annalist_collection\"\n ])\n # cf. urls.py:\n if id and re.match(r\"\\w{1,32}$\", id):\n return id not in reserved\n return False", "def is_valid_identifier(s):\n try:\n assert s[0] in INITIAL\n assert False not in [x in INNER for x in s]\n return True\n except AssertionError:\n return False", "def is_valid_youtube_id(potential_id):\r\n if type(potential_id) is not str:\r\n return False\r\n if len(potential_id) is not 11:\r\n return False\r\n for char in potential_id:\r\n if not char.isalnum() and char is not \"-\" and char is not \"_\":\r\n return False\r\n return True", "def is_valid_string_id(k):\n print(\"k=\" + k)\n ret = k is not None and k != \"\"\n if not ret and k:\n global IGNORED_ID\n IGNORED_ID.add(k)\n return ret", "def valid_uid(uid):\n return re.compile(\"[A-Za-z][A-Za-z0-9]{10}\").match(uid)", "def valid_identifier_name(name):\n remove_characters_regex = '[^a-zA-Z0-9_]'\n name = re.sub(remove_characters_regex, '', name)\n # Remove beginning characters that are numbers\n name = re.sub('^[0-9]*', '', name)\n return name", "def id_check(employee_id):\r\n# badge_pattern = re.compile('[A-Za-z]{2}-\\d{4}')\r\n# re.search(badge_pattern, employee_id)\r\n\r\n # if statement\r\n if not re.match('[A-Z]{2}-\\d{4}', employee_id):\r\n print(employee_id, 'is not a valid ID.')", "def is_valid_name(name: str) -> bool:\n return bool(re.fullmatch(pattern=r\"\\w{4,16}\", string=name))", "def validate_empid(empid):\r\n validate = re.compile('[A-Z][0-9]{3}')\r\n\r\n if re.fullmatch(validate, empid):\r\n return True\r\n else:\r\n return False", "def _database_username_validate(s):\n if len(s) < 1 or len(s) > 63:\n raise ValueError('Database user name must be 1 to 63 characters long')\n if s[0] not in string.ascii_letters:\n raise ValueError('Database user name must start with a letter')\n allowed_characters = frozenset(string.ascii_letters + string.digits + '_')\n if frozenset(s).issuperset(allowed_characters):\n raise ValueError('Invalid character in database user name. Only '\n 'numbers, letters, and _ are acceptable.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalizes USD price with thousand separator into float value
def normalize_price(price: str) -> float: return float(price.strip().replace(',', ''))
[ "def get_price(str_val):\n return float(str_val.replace('.', '').replace(',', '.'))", "def price_str_to_float(tkt_item):\n try:\n tkt_item[10] = float(tkt_item[10].replace(',', '.'))\n except:\n tkt_item[10] = 0\n\n return tkt_item", "def convert(price_usd) :\n\tprice_eur = price_usd / get_change_conversion()\n\treturn float(price_eur)", "def clean_value(self, value):\n return float(value.replace('.', '').replace(',', '.'))", "def normalize_loan_amount(value):\n normalized_value = value.lower()\n if 'k' in normalized_value:\n normalized_value = normalized_value.replace('k', '000')\n normalized_value = normalized_value.replace('.', '')\n\n normalized_value = normalized_value.replace('$', '')\n normalized_value = normalized_value.replace(',', '')\n\n try: \n return Decimal(normalized_value)\n except: InvalidOperation\n \n return None", "def to_usd(my_price): \n\n return f\"${my_price:,.2f}\" \n ## Taken from shopping-cart project", "def parse_float(self, value):\n return float(value.replace(',','.'))", "def euro(value):\n try:\n val = u\"%.2f\" % (float(value))\n except:\n return u''\n return val.replace('.', ',')", "def check_price(URL, headers):\n page = requests.get(URL, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n price = soup.find(id=\"priceblock_ourprice\").get_text()\n converted_price = price[:-3]# -3 removes the .99 pence value from product\n float_price = ''\n for c in converted_price:\n if c.isdigit():\n float_price = float_price + c\n #loop that removes the £$,. from product so the string can convert to float correctly\n return float(float_price)", "def eur(value):\n float(value)\n return f\"€{value:,.2f}\"", "def format_usd(my_price):\n return f\"${my_price:,.2f}\"", "def clean_currency(x):\n \n if isinstance(x, str):\n x=x.replace(\"*\",\"\")\n x=x.replace(\",\",\"\")\n if x=='':\n return(0)\n elif x[0]!='$':\n return(0)\n else:\n x=x.split(' ')[0]\n x=x.replace('$',\"\")\n return float(x)\n return(x)", "def clean_currency(x: str):\n # cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n # x = str(x)\n if isinstance(x, str):\n if x.startswith(\"$\"):\n return x.replace('$', '').replace(',', '')\n # return float(x)\n return x\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())", "def _fix_balance(self, balance):\n\n return float(balance.replace(',', '.').replace(' ', ''))", "def usd(value):\r\n return f\"${Decimal(value):,.2f}\"", "def format_as_usd(value):\n return f\"${value:,.2f}\"", "def convert_amount(amount):\n amount = re.sub(r\"\\$\", r\"\", amount)\n amount = re.sub(r\"\\,\", r\"\", amount)\n return int(float(amount))", "def thousands(value):\n try:\n value = float(value)\n except ValueError:\n return value\n return f\"{value:,}\".replace(',',' ')", "def unit_conversion(val, change):\n return round(val / 1000 ** change, 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the csv file CSV file should contain ['Question', 'Answer'] columns Remove NaN values Throw error if format is bad or file does not exist
def parse_csv_file(self, csv_file: str): try: df = pd.read_csv(csv_file) if not set(['Question', 'Answer']).issubset(df.columns): raise BadCSVFile( "CSV file does not contain ['Question', 'Answer'] columns.") df.dropna(inplace=True) except Exception as e: raise BadCSVFile( "Error while reading the csv file. Please check the path of the file or the file might be curropted.") return df
[ "def custom_read_csv(self):\n # read csv file into 'data' data frame\n # strip data of leading or tailing whitespaces\n df = pd.read_csv(self.path, skipinitialspace=True)\n # drop \"fnlwgt\" column\n df.drop(columns=\"fnlwgt\", inplace=True)\n # replace all \"?\" with NaN\n df.replace('?', np.nan, inplace=True)\n # filter rows with NaNs\n df.dropna(axis=0, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df", "def parse_csv(self):\n # delim = sys.argv[2]\n delim = ';'\n data_tmp = pandas.read_csv(self.filepath, delimiter=delim)\n rows_read = self.parse_data(data_tmp)\n print('created: ' + str(rows_read) + ' Questions with answers')", "def read_csv_file(self):\n pass", "def _read_csv(csv_file, discrete=None):\n if csv_file == 'demo':\n return load_demo()\n discrete_cols = discrete if discrete is not None else discrete.split(',')\n return pd.read_csv(csv_file), discrete_cols", "def load_clean_data_to_df(csv_path):\n df = pd.read_csv(csv_path)\n df = df.fillna(\"NaN\")\n for column in df:\n df[column] = df[column].apply(clean_txt)\n\n return df", "def loadCSV(input_file):", "def test_read_data_from_csv(self):\n self.assertEqual(CsvOperations.read_data(self.file_name)[0], [\"1\", \"Gold\", \"10\"])\n self.assertEqual(CsvOperations.read_data(self.file_name)[1], [\"2\", \"Platinum\", \"5\"])\n self.assertEqual(CsvOperations.read_data(self.file_name)[2], [\"3\", \"Bronze\", \"0\"])", "def load_data(csv):\n df = pd.read_csv(csv, sep=',', error_bad_lines=False)\n return df", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def parse_csv(self, path, answer_col, query_col='', context_col='context_string', kb_name=''):\n kb_name = kb_name if kb_name is not None else path.split('/')[-1].split('.')[0]\n df = pd.read_csv(path)\n kb = self.parse_df(kb_name, df, answer_col, query_col, context_col)\n return kb", "def load_and_clean(self,in_path):\n in_path = Path(in_path)\n try:\n df = pd.read_csv(in_path, index_col = 0, parse_dates = True, infer_datetime_format = True)\n except:\n print(\"Could not read csv file. Please check the path\")\n finally:\n #attempt to clean df\n df.dropna(inplace = True)\n df.drop_duplicates(inplace = True)\n df.sort_index()\n return df", "def read_data():\n df = pd.read_csv('faculty.csv')\n df.columns = df.columns.str.strip()\n df.degree = df.degree.str.strip()\n return df", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def test_import_csv_file_filled(self):\n\n complete_data = parse_csv_file(self.test_file_path)\n self.assertNotEqual([], complete_data)", "def __read_csv_file(self):\n if '.csv' not in self.file_path:\n raise RuntimeError(\"Trying to load a non-CSV file...\")\n\n self.df = pd.read_csv(self.file_path, encoding='utf_16', dtype={'Level': object},\n float_precision='round_trip', error_bad_lines=False)", "def import_csv(self, file_path):\n file_extension = os.path.splitext(file_path)[1]\n if not os.path.isfile(file_path) or file_extension != \".csv\": #Do nothing if file doesn't exist or is not csv\n return\n\n temp_df = pd.read_csv(file_path)\n col_list = temp_df.columns.tolist()\n\n if col_list != self.Music_cols: #do nothing if columns don't match\n return\n else:\n self.Music = temp_df\n\n has_error = False\n\n #get all tags\n song_paths = self.Music['path'].tolist()\n for music_path in song_paths:\n file_extension = os.path.splitext(music_path)[1]\n if os.path.isfile(music_path) and file_extension in self.supported_format:\n tag = TinyTag.get(music_path)\n self.tags[music_path] = tag\n else: #file doesn't exist or not supported format\n has_error = True\n self.Music = self.Music[self.Music['path'] != music_path]\n\n self.clear_all_youtube_links()\n\n if has_error:\n print(\"Warning: Some music files found in .csv are missing/modified\")", "def open_csv_file():\n df = pd.read_csv('../data/hiv_data/hiv-protease-data-expanded.csv',\n index_col=0)\n df = df.dropna(subset=['FPV'])\n return df", "def test_import_csv_file_empty(self):\n\n complete_data = parse_csv_file(self.test_empty_file_path)\n self.assertEqual([], complete_data)", "def ReadData( fileName ):\n \n # define column names\n colNames = ['agency_cd', 'site_no', 'Date', 'Discharge', 'Quality']\n\n # open and read the file\n DataDF = pd.read_csv(fileName, header=1, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[2], comment='#',\n na_values=['Eqp'])\n DataDF = DataDF.set_index('Date')\n \n # quantify the number of missing values\n MissingValues = DataDF[\"Discharge\"].isna().sum()\n \n return( DataDF, MissingValues )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a vector for a given query
def get_vector(self, query: list): if len(query) == 0: raise BadQueryParameter("Query (list) can not be empty.") return self.vectorizer.transform(query)
[ "def get_vector_for_query(self, query: Query) -> np.ndarray:\n return self._vectorizer.transform([query.text])", "def _to_full_vector(self, query_vector: List[Tuple[str, float]]) -> np.array:\n terms = list(self.index.get_terms())\n terms.sort()\n vector = np.zeros(len(terms))\n\n for (term, weight) in query_vector:\n index = terms.index(term)\n vector[index] = weight\n\n return vector", "def querytovector(self,topology,query,evidence,negate=False):\n vector=[]\n for item in topology:\n if item in query:\n if not negate:\n vector.append('T')\n else:\n vector.append('F')\n elif item in [a for (a,b) in evidence]:\n vector.append([b for (a,b) in evidence if a==item][0])\n else:\n vector.append('-')\n return vector", "def transform_query(vectorizer_model, query):\n x_request = vectorizer_model.transform(query)\n x0 = x_request.toarray()\n return x0", "def calcQueryVector(self):\n query = input(\"Query: \");\n ana = StemmingAnalyzer() ### lowercases, stems, ignores stopwords\n tokens = [token.text for token in ana(query)]\n\n queryVector = {}\n for token in tokens:\n if token in self.invertedIndex.keys():\n if token in queryVector.keys():\n queryVector[token]+=1;\n else:\n queryVector[token] = 1;\n\n return self.normalizeQueryVector(queryVector);", "def get_vector(self, vec_id):\n pass", "def _query(self, query):\n return self.sf.query(query)", "def query_to_vector(self, query_terms):\n n=len(self.documents)\n vector=defaultdict(lambda:0)\n tf=defaultdict(lambda:0)\n for term in query_terms:\n tf[term]+=1\n for term in tf:\n if term not in self.doc_freqs.keys():\n vector[term]=0\n else:\n vector[term]=(1+math.log10(tf[term]))*(math.log10(1. * n/self.doc_freqs[term]))\n return vector", "def query_to_word_vector(query_string, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n query_word_list = vsm_retrieval.convert_query(query_string)\n for count_vec, word in enumerate(inv_index):\n if word in query_word_list:\n word_vec[count_vec] = 1\n return word_vec", "def create_vector_query(self, bag_of_words):\n queries = {}\n mod= 0.0\n for word in bag_of_words.values:\n mul_tf_idf = 0\n if word in self.words_index:\n idf = self.getIDF(word)\n tf = self.getTF(bag_of_words.values[word], bag_of_words.document_len())\n tf_num = float(tf[0]) / tf[1]\n mul_tf_idf = idf * tf_num\n\n queries[word] = mul_tf_idf\n mod += math.pow(mul_tf_idf, 2)\n\n\n queries['##mod##'] = math.sqrt(mod)\n return queries", "def query_to_vector(query_terms):\r\n \r\n query_doc_freq = {}\r\n for word in query_terms:\r\n try:\r\n query_doc_freq[word]=DF[word]\r\n except:\r\n continue\r\n \r\n result = {}\r\n for key in query_doc_freq.keys():\r\n result[key] = math.log((1000/query_doc_freq[key]),10) \r\n return result\r\n\r\n pass", "def bv(query):\n return biovelo(query)", "def search_vector(self, key):\n index = self._inv_payload.get(key, None)\n if index is not None:\n return self._index.reconstruct(index)\n return np.zeros(self._vector_dim, dtype=np.float32)", "def vector_q(q_1: Q) -> Q:\n\n end_q_type = f\"vector_q({q_1.q_type})\"\n\n v = Q(\n [0, q_1.x, q_1.y, q_1.z],\n q_type=end_q_type,\n representation=q_1.representation,\n )\n return v", "def create_vec(self, in_line):\r\n result= self.d2v.infer_vector(in_line.split())\r\n return result", "def get_vector_by_id(self, idx):\n raise NotImplementedError(\"should be implemented by subclass\")", "def query(_from, _select, _geomselect=None, _where=None, _groupby=None, _limit=None):\n # INSTEAD MAKE INTO CLASS\n # WITH .fields attr\n # AND .__iter__()\n # AND .get_vectordata()\n # AND MAKE EACH YIELDED ROW A VECTOR FEATURE CLASS\n # THIS WAY ALLOWING CHAINED QUERIES\n\n # parse args\n iterables = _from\n columnfuncs = _select\n geomfunc = _geomselect\n condition = _where\n key = _groupby\n n = _limit\n \n # first yield header as list of column names\n colnames = [each[0] for each in columnfuncs]\n yield colnames\n\n # make an iterable that yields every combinaion of all input iterables' items\n if len(iterables) == 1:\n iterable = iterables[0]\n else:\n iterable = itertools.product(*iterables)\n\n # iterate and add\n if key:\n groups = groupby(iterable, key)\n\n # limit\n if n:\n groups = limit(groups, n)\n \n for items in groups:\n # filter\n if condition:\n items = where(items, condition)\n \n # aggregate\n # NOTE: columnfuncs and geomfunc must expect an iterable as input and return a single row,geom pair\n item = aggreg(items, columnfuncs, geomfunc)\n yield item\n \n else:\n # filter\n if condition:\n iterable = where(iterable, condition)\n\n # limit\n if n:\n iterable = limit(iterable, n)\n\n # select\n for item in select(iterable, columnfuncs, geomfunc):\n yield item", "def get_vector(self, source, destination):", "def generate_vector(self,dim=0,v=None):\n vec = dl.Vector()\n self.init_vector(vec,dim)\n if v is not None:\n vec[:]=v\n return vec" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Groups data in SimulationReport's by the value of alpha or gamma2
def group_data(simulation_reports: List[SimulationReport]) -> Dict[float, SimulationTable]: heat_maps: OrderedDict[float, SimulationTable] = OrderedDict() for report in simulation_reports: if report.param not in heat_maps: param_name = "alpha" if report.growth_type == GrowthType.Polynomial else "gamma2" simulation_table = heat_maps.setdefault( report.param, SimulationTable(report.growth_type, param_name, report.param, OrderedDict()), ) else: simulation_table = heat_maps[report.param] errors_by_prefix = simulation_table.errors.setdefault(report.prefix_length, []) errors_by_prefix.append((report.b0, report.error)) return heat_maps
[ "def alpha_stats (alphas, groups, significant = 0.05):\n\t\n\tK = alphas.shape[1]\n\tgroup1 = np.array(groups)\n\tgroup2 = ~ group1\n\n\t#T-test\n\ta = alphas[group1 ] \n\tb = alphas[group2]\n\n\t#T-test\n\ttt,p_tt = ttest_ind (a,b)\n\t# Mann Whitney Test\n\tmw = []\n\tp_mw =[]\n\tfor i in range (K):\n\t\t m,p =mannwhitneyu(a[:,i],b[:,i])\n\t\t mw.append(2*m)\n\t\t p_mw.append(p)\n\t\t\n\t# create a data frame with FC, T , p-value, adjusted p-value for each topic\n\trows = []\n\tK = alphas.shape[1]\n\tfc = np.log2(np.mean(a,axis=0)) - np.log2(np.mean(b, axis=0))\n\tp_adjust_tt = p_adjust_bh(p_tt)\n\tp_adjust_mw = p_adjust_bh(p_mw)\n\t\n\tfor i in range(K):\n\t\tsig_tt = p_adjust_tt < significant\n\t\tsig_mw = p_adjust_mw < significant\n\t\trows.append ((i,fc[i], p_tt[i],p_adjust_tt[i],sig_tt[i],p_mw[i],p_adjust_mw[i],sig_mw[i]))\n\n\tdf1 = pd.DataFrame(rows, columns=[ 'topic', 'log2FC', 'TT p-value','TT p_adjust', 'TT significant', 'MW p-value','MW p_adjust', 'MW significant'])\n\treturn df1", "def _do_group_data(self):\n self.height_levels = []\n self.all_expt_data = []\n\n self.expts = ExptList(self.suite)\n self.expts.find(self.task.expts)\n for expt in self.task.expts:\n expt_obj = self.expts.get(expt)\n cubes = self.expt_cubes[expt]\n sorted_cubes = []\n\n for cube in cubes:\n if cube.name().startswith('mass_flux'):\n (height_level_index, thresh_index) = cube.attributes['mass_flux_key']\n mf_key = (height_level_index, thresh_index)\n sorted_cubes.append((mf_key, cube))\n\n # Each element is a tuple like: ((1, 3), cube)\n # Sorting will put in correct order, sorting on initial tuple.\n sorted_cubes.sort()\n\n # Group on first element of tuple, i.e. on 1 for ((1, 3), cube)\n # i.e. group on height_level_index.\n for height_level_index, cubes in groupby(sorted_cubes, lambda x: x[0][0]):\n if height_level_index not in self.height_levels:\n self.height_levels.append(height_level_index)\n expt_data = {\n 'cubes': list(cubes),\n 'expt_obj': expt_obj,\n 'height_level_index': height_level_index\n }\n self.all_expt_data.append(expt_data)", "def compute_data_gamma_(idx2_daid, wx2_rvecs, wx2_aids, wx2_idf,\n alpha=3, thresh=0):\n if utool.DEBUG2:\n from ibeis.model.hots.smk import smk_debug\n smk_debug.rrr()\n smk_debug.check_wx2(wx2_rvecs=wx2_rvecs, wx2_aids=wx2_aids)\n wx_sublist = pdh.ensure_values(pdh.ensure_index(wx2_rvecs))\n if utool.VERBOSE:\n print('[smk_index] Compute Gamma alpha=%r, thresh=%r: ' % (alpha, thresh))\n mark1, end1_ = utool.log_progress(\n '[smk_index] Gamma group (by word): ', len(wx_sublist),\n flushfreq=100, writefreq=50, with_totaltime=True)\n # Get list of aids and rvecs w.r.t. words\n aids_list = pdh.ensure_values_subset(wx2_aids, wx_sublist)\n rvecs_list1 = pdh.ensure_values_subset(wx2_rvecs, wx_sublist)\n # Group by daids first and then by word index\n daid2_wx2_drvecs = utool.ddict(lambda: utool.ddict(list))\n for wx, aids, rvecs in zip(wx_sublist, aids_list, rvecs_list1):\n group_aids, groupxs = clustertool.group_indicies(aids)\n rvecs_group = clustertool.apply_grouping(rvecs, groupxs) # 2.9 ms\n for aid, rvecs_ in zip(group_aids, rvecs_group):\n daid2_wx2_drvecs[aid][wx] = rvecs_\n\n if utool.VERBOSE:\n end1_()\n\n # For every daid, compute its gamma using pregrouped rvecs\n # Summation over words for each aid\n if utool.VERBOSE:\n mark2, end2_ = utool.log_progress(\n '[smk_index] Gamma Sum (over daid): ', len(daid2_wx2_drvecs),\n flushfreq=100, writefreq=25, with_totaltime=True)\n # Get lists w.r.t daids\n aid_list = list(daid2_wx2_drvecs.keys())\n # list of mappings from words to rvecs foreach daid\n # [wx2_aidrvecs_1, ..., wx2_aidrvecs_nDaids,]\n _wx2_aidrvecs_list = list(daid2_wx2_drvecs.values())\n _aidwxs_iter = (list(wx2_aidrvecs.keys()) for wx2_aidrvecs in _wx2_aidrvecs_list)\n aidrvecs_list = [list(wx2_aidrvecs.values()) for wx2_aidrvecs in _wx2_aidrvecs_list]\n aididf_list = [[wx2_idf[wx] for wx in aidwxs] for aidwxs in _aidwxs_iter]\n\n #gamma_list = []\n if utool.DEBUG2:\n try:\n for count, (idf_list, rvecs_list) in enumerate(zip(aididf_list, aidrvecs_list)):\n assert len(idf_list) == len(rvecs_list), 'one list for each word'\n #gamma = smk_core.gamma_summation2(rvecs_list, idf_list, alpha, thresh)\n except Exception as ex:\n utool.printex(ex)\n utool.embed()\n raise\n gamma_list = [smk_core.gamma_summation2(rvecs_list, idf_list, alpha, thresh)\n for idf_list, rvecs_list in zip(aididf_list, aidrvecs_list)]\n\n if WITH_PANDAS:\n daid2_gamma = pdh.IntSeries(gamma_list, index=aid_list, name='gamma')\n else:\n daid2_gamma = dict(zip(aid_list, gamma_list))\n if utool.VERBOSE:\n end2_()\n\n return daid2_gamma", "def gamma_h_subgroups(self):\n from .all import GammaH\n N = self.level()\n R = IntegerModRing(N)\n return [GammaH(N, H) for H in R.multiplicative_subgroups()]", "def test_0003_group_statistics_dict(self):\n np.random.seed(987654321)\n x_input_array = st.norm.rvs(2, 1, size=100)\n y_input_array = st.norm.rvs(2, 3, size=45)\n z_input_array = st.norm.rvs(8, 1, size=18)\n data = {\"one\": x_input_array, \"two\": y_input_array, \"three\": z_input_array}\n output = \"\"\"\n\nOverall Statistics\n------------------\n\nNumber of Groups = 3\nTotal = 163\nGrand Mean = 4.1568\nPooled Std Dev = 2.0798\nGrand Median = 2.2217\n\n\nGroup Statistics\n----------------\n\nn Mean Std Dev Min Median Max Group \n--------------------------------------------------------------------------------------------------\n100 2.0083 1.0641 -0.4718 2.0761 4.2466 one \n18 8.0944 1.1855 6.0553 7.9712 10.5272 three \n45 2.3678 3.5551 -4.8034 2.2217 11.4199 two \"\"\"\n res = GroupStatistics(data, display=False)\n self.assertTrue(res)\n self.assertEqual(str(res), output)\n self.assertEqual(res.total, 163)\n self.assertEqual(res.k, 3)\n self.assertAlmostEqual(res.pooled, 2.0798, 4)\n self.assertAlmostEqual(res.pooled_std, 2.0798, 4)\n self.assertAlmostEqual(res.gmean, 4.1568, 4)\n self.assertAlmostEqual(res.grand_mean, 4.1568, 4)\n self.assertAlmostEqual(res.gmedian, 2.2217, 4)\n self.assertAlmostEqual(res.grand_median, 2.2217, 4)", "def get_group_report(self, phenotype):", "def test_group_by_hardware_info(self):\n self._test_group_by('Hardware Info', [1, 1, 2, 1, 1])", "def test_0017_group_statistics_dict_groups_is_none(self):\n np.random.seed(987654321)\n x_input_array = st.norm.rvs(2, 1, size=100)\n y_input_array = st.norm.rvs(2, 3, size=45)\n z_input_array = st.norm.rvs(8, 1, size=18)\n data = {\"one\": x_input_array, \"two\": y_input_array, \"three\": z_input_array}\n output = \"\"\"\n\nOverall Statistics\n------------------\n\nNumber of Groups = 3\nTotal = 163\nGrand Mean = 4.1568\nPooled Std Dev = 2.0798\nGrand Median = 2.2217\n\n\nGroup Statistics\n----------------\n\nn Mean Std Dev Min Median Max Group \n--------------------------------------------------------------------------------------------------\n100 2.0083 1.0641 -0.4718 2.0761 4.2466 one \n18 8.0944 1.1855 6.0553 7.9712 10.5272 three \n45 2.3678 3.5551 -4.8034 2.2217 11.4199 two \"\"\"\n res = GroupStatistics(data, groups=None, display=False)\n self.assertTrue(res)\n self.assertEqual(str(res), output)\n self.assertEqual(res.total, 163)\n self.assertEqual(res.k, 3)\n self.assertAlmostEqual(res.pooled, 2.0798, 4)\n self.assertAlmostEqual(res.pooled_std, 2.0798, 4)\n self.assertAlmostEqual(res.gmean, 4.1568, 4)\n self.assertAlmostEqual(res.grand_mean, 4.1568, 4)\n self.assertAlmostEqual(res.gmedian, 2.2217, 4)\n self.assertAlmostEqual(res.grand_median, 2.2217, 4)", "def get_group_results(upper_letter):\n feed_group = get_feed_group(upper_letter)\n\n if not feed_group:\n return GroupResult(group_name='Groupe {0}'.format(upper_letter), results=[])\n\n matches = get_matches_from_feed(feed_group, 'Phase de groupe')\n result = get_results_from_matches(matches)\n result_group = GroupResult(group_name='Groupe {0}'.format(upper_letter),\n results=[result for result in result.values()])\n return result_group", "def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data", "def test_AB(dataframe, group, target, alpha):\r\n\r\n #Step-1\r\n\r\n temp_list = []\r\n for x in df[group].unique():\r\n for y in df[group].unique():\r\n temp_list.append([x, y])\r\n\r\n fset = set(frozenset(x) for x in temp_list)\r\n uniq_list = [list(x) for x in fset if len(x) > 1]\r\n\r\n final_df = pd.DataFrame()\r\n\r\n #Step-2\r\n\r\n #H0: There is no difference between group averages\r\n #H1: There is a difference between the group averages\r\n\r\n for i in range(0, len(uniq_list)):\r\n group_A = dataframe[dataframe[group] == uniq_list[i][0]][target]\r\n group_B = dataframe[dataframe[group] == uniq_list[i][1]][target]\r\n\r\n #Step-3\r\n\r\n normal_A = shapiro(group_A)[1] < alpha\r\n normal_B = shapiro(group_B)[1] < alpha\r\n\r\n #Step-4\r\n\r\n if (normal_A == False) & (normal_B == False):\r\n levene_ = levene(group_A, group_B)[1] < alpha\r\n #Step-5\r\n\r\n if levene_ == False:\r\n p_value = ttest_ind(group_A, group_B, equal_var=True)[1]\r\n else:\r\n p_value = ttest_ind(group_A, group_B, equal_var=False)[1]\r\n else:\r\n p_value = mannwhitneyu(group_A, group_B)[1]\r\n\r\n #Step-6\r\n\r\n temp_df = pd.DataFrame({\"Hypothesis\": [p_value < alpha],\"p_value\": p_value,\r\n \"Group_A_Mean\": [group_A.mean()], \"Group_B_Mean\": [group_B.mean()],\r\n \"Mean_Difference\":[abs(group_A.mean()-group_B.mean())]}, index = [set(uniq_list[i])])\r\n\r\n temp_df[\"Hypothesis\"] = np.where(temp_df[\"Hypothesis\"] == False, \"Fail to Reject H0\", \"Reject H0\")\r\n temp_df[\"Test\"] = np.where((normal_A == False) & (normal_B == False), \"Parametric\", \"Non-Parametric\")\r\n final_df = pd.concat([final_df, temp_df[[\"Test\", \"Hypothesis\", \"p_value\",\"Group_A_Mean\",\"Group_B_Mean\",\"Mean_Difference\"]]])\r\n return final_df", "def divisor_subgroups(self):\n return [Gamma0_constructor(M) for M in self.level().divisors()]", "def get_filter_stats(data: AnnData) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n # cell stats\n gb1 = data.obs.groupby(\"Channel\")\n df_before = gb1.median()\n df_before = df_before.assign(total=gb1.size())\n df_before.rename(\n columns={\n \"n_genes\": \"median_n_genes_before\",\n \"n_counts\": \"median_n_umis_before\",\n \"percent_mito\": \"median_percent_mito_before\",\n },\n inplace=True,\n )\n\n data = data[data.obs[\"passed_qc\"]] # focusing only on filtered cells\n\n gb2 = data.obs.groupby(\"Channel\")\n df_after = gb2.median()\n df_after = df_after.assign(kept=gb2.size())\n df_after.rename(\n columns={\n \"n_genes\": \"median_n_genes\",\n \"n_counts\": \"median_n_umis\",\n \"percent_mito\": \"median_percent_mito\",\n },\n inplace=True,\n )\n df_cells = pd.concat((df_before, df_after), axis=1, sort=False)\n df_cells.fillna(0, inplace=True)\n df_cells[\"kept\"] = df_cells[\"kept\"].astype(int)\n df_cells[\"filt\"] = df_cells[\"total\"] - df_cells[\"kept\"]\n df_cells = df_cells[\n [\n \"kept\",\n \"median_n_genes\",\n \"median_n_umis\",\n \"median_percent_mito\",\n \"filt\",\n \"total\",\n \"median_n_genes_before\",\n \"median_n_umis_before\",\n \"median_percent_mito_before\",\n ]\n ]\n df_cells.sort_values(\"kept\", inplace=True)\n\n # gene stats\n idx = data.var[\"robust\"] == False\n df_genes = pd.DataFrame(\n {\n \"n_cells\": data.var.loc[idx, \"n_cells\"],\n \"percent_cells\": data.var.loc[idx, \"percent_cells\"],\n }\n )\n df_genes.index.name = \"gene\"\n df_genes.sort_values(\"n_cells\", ascending=False, inplace=True)\n\n return df_cells, df_genes", "def group(self):\n group_name = dict(rate_value=\"alpha\", meas_value=\"beta\", meas_std=\"gamma\")\n return group_name[self.grid_spec.mulcov_type]", "def test_0006_group_statistics_dict_single_empty_vector(self):\n np.random.seed(987654321)\n x_input_array = st.norm.rvs(2, 1, size=10)\n y_input_array = [\"this\", \"is\", \"a\", \"string\"]\n z_input_array = st.norm.rvs(8, 1, size=10)\n data = {\"one\": x_input_array, \"two\": y_input_array, \"three\": z_input_array}\n output = \"\"\"\n\nOverall Statistics\n------------------\n\nNumber of Groups = 2\nTotal = 20\nGrand Mean = 5.1489\nPooled Std Dev = 1.2409\nGrand Median = 5.1744\n\n\nGroup Statistics\n----------------\n\nn Mean Std Dev Min Median Max Group \n--------------------------------------------------------------------------------------------------\n10 2.3511 1.3732 0.6591 2.3882 4.2466 one \n10 7.9466 1.0927 6.3630 7.9607 9.7260 three \"\"\"\n res = GroupStatistics(data, display=False)\n self.assertTrue(res)\n self.assertEqual(str(res), output)\n self.assertEqual(res.total, 20)\n self.assertEqual(res.k, 2)\n self.assertAlmostEqual(res.pooled, 1.2409, 4)\n self.assertAlmostEqual(res.pooled_std, 1.2409, 4)\n self.assertAlmostEqual(res.gmean, 5.1489, 4)\n self.assertAlmostEqual(res.grand_mean, 5.1489, 4)\n self.assertAlmostEqual(res.gmedian, 5.1744, 4)\n self.assertAlmostEqual(res.grand_median, 5.1744, 4)", "def group_mae(outputs: torch.Tensor, targets: torch.Tensor) -> List[Tuple[int, int, int, float, str]]:\n # groups = [\n # (-1, 1800, \"0-0.5h\"),\n # (1800, 3600, \"0.5-1h\"),\n # (3600, 7200, \"1-2h\"),\n # (7200, 10800, \"2-3h\"),\n # (10800, 14400, \"3-4h\"),\n # (14400, 18000, \"4-5h\"),\n # (18000, 21600, \"5-6h\"),\n # (21600, 25200, \"6-7h\"),\n # (25200, 28800, \"7-8h\"),\n # (28800, 32400, \"8-9h\"),\n # (32400, 36000, \"9-10h\"),\n # (36000, 39600, \"10-11h\"),\n # (39600, 43200, \"11-12\"),\n # (43200, 86400, \"12h - 1 day\"),\n # (86400, 172800, \"1 day - 2 days\"),\n # (172800, 259200, \"2 days - 3 days\"),\n # (259200, 345600, \"3 days - 4 days\"),\n # (345600, 432000, \"4 days - 5 days\"),\n # (432000, 518400, \"5 days - 6 days\"),\n # (518400, 604800, \"6 days - 1 week\"),\n # (604800, 155520000, \"1 week - 1 month\"),\n # (155520000, int(data_ranges[\"label\"][\"max\"]), \"> 1 month\")\n # ]\n groups = [\n (-1, 1800, \"0-0.5h\"),\n (1800, 3600, \"0.5-1h\"),\n (3600, 7200, \"1-2h\"),\n (7200, 10800, \"2-3h\"),\n (10800, 14400, \"3-4h\"),\n (14400, 21600, \"4-6h\"),\n (21600, 28800, \"6-8h\"),\n (28800, 36000, \"8-10h\"),\n (36000, 43200, \"10-12h\"),\n (43200, 50400, \"12-16h\"),\n (50400, 64800, \"16-20h\"),\n (64800, 86400, \"20-24h\"),\n (86400, 172800, \"1-2d\"),\n (172800, 259200, \"2-3d\"),\n (259200, 345600, \"3-4d\"),\n (345600, 432000, \"4-5d\"),\n (432000, 518400, \"5-6d\"),\n (518400, 604800, \"6-7d\"),\n (604800, 1209600, \"1-2w\"),\n (1209600, 2419200, \"2-4w\"),\n (2419200, int(data_ranges[\"label\"][\"max\"]), \"> 4w\")\n ]\n\n def scale(seconds: int) -> float:\n # half_range = (data_ranges[\"label\"][\"max\"] - data_ranges[\"label\"][\"min\"]) / 2\n # result = seconds / half_range\n # return -1 + result if seconds < half_range else result\n label_range = data_ranges[\"label\"][\"max\"]\n return seconds / label_range\n\n def process_group(x: torch.Tensor, y: torch.Tensor, group: Tuple[int, int, str]) -> Tuple[int, int, int, float,\n str]:\n criterion = nn.L1Loss(reduction=\"mean\")\n mask = (y > scale(group[0])) & (y <= scale(group[1]))\n # mask = (y > group[0]) & (y <= group[1])\n x = x[mask]\n y = y[mask]\n mae = 0.\n num_data = x.shape[0]\n if num_data > 0:\n loss = criterion(x, y)\n mae = loss.item()\n return group[0], group[1], num_data, mae, group[2]\n\n mae_groups = [process_group(outputs, targets, group) for group in groups]\n return mae_groups", "def create_arrays(self, j, gamma):\n # Шаг\n lamb = (self.__b - self.__a) / j\n\n result = dict()\n\n self.__x_array.append(self.__a)\n # Содержит j подмассивов со значениями от X[j] до X[j+1]\n uzli = [[] for _ in range(j)]\n\n for i in range(j):\n self.__x_array.append(self.__x_array[i] + lamb)\n\n for value in self.selection:\n index = math.floor((value - self.__a) / lamb)\n uzli[index].append(value)\n\n # Сортировка подмасивов (можно убрать)\n for i in range(len(uzli)):\n uzli[i] = sorted(uzli[i])\n\n # Массив содержащий частоты\n counts = []\n # Подсчет частот\n for i, vals in enumerate(uzli):\n counts.append(len(vals))\n\n m = [0]\n k = []\n for i in range(j):\n m.append(sum([counts[j] for j in range(i + 1)]))\n m[i] /= self.__n\n k.append(1 - m[i])\n m[-1] /= self.__n\n result.update({\"uzli\": uzli, \"counts\": counts, \"m\": m, \"k\": k})\n result.update(self.define_xr_indicators(self.__x_array, k, gamma, j))\n\n return result", "def test_grouped(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_grouped_mean('Group')\n gfile.table_format = 'org'\n self.check_output(self.output_str2, gfile)", "def print_groups():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Abstract method invoked when a trial is completed or terminated. Do nothing by default.
def trial_end(self, parameter_id, success, **kwargs):
[ "def on_trial_complete(self, trial_runner, trial, result):\n\n raise NotImplementedError", "def on_trial_complete(self, trial: Trial, result: Dict[str, Any]):\n pass", "def trial(self):\n pass", "def trial_clean_up(self):\n pass", "def on_trial_result(self, trial_runner, trial, result):\n\n raise NotImplementedError", "def _on_test_end(self):\n pass", "def on_trial_error(self, trial: Trial):\n pass", "def run_trial(self, trial_info):\n raise NotImplementedError", "def on_trial_add(self, trial: Trial):\n pass", "def on_trial_error(self, trial_runner, trial):\n\n raise NotImplementedError", "def on_trial_add(self, trial_runner, trial):\n\n raise NotImplementedError", "def end(self) -> None:\n self.status = ExperimentStatus.SUCCEED\n self.ended_at = int(datetime.utcnow().timestamp())", "def on_trial_remove(self, trial: Trial):\n pass", "def finalize_integration(self, **kwargs):", "def trial_completed(self, behavior_data):\r\n # Update elapsed_time\r\n self.elapsed_time = datetime.datetime.now() - self.init_datetime\r\n self.behavior_data = behavior_data\r\n correct = ~np.isnan(\r\n self.behavior_data['States timestamps']['correct'][0][0])\r\n error = ~np.isnan(\r\n self.behavior_data['States timestamps']['error'][0][0])\r\n no_go = ~np.isnan(\r\n self.behavior_data['States timestamps']['no_go'][0][0])\r\n assert correct or error or no_go\r\n # Add trial's response time to the buffer\r\n self.response_time = misc.get_trial_rt(self.behavior_data)\r\n self.response_time_buffer.append(self.response_time)\r\n # Update response buffer -1 for left, 0 for nogo, and 1 for rightward\r\n if (correct and self.position < 0) or (error and self.position > 0):\r\n self.response_side_buffer.append(1)\r\n elif (correct and self.position > 0) or (error and self.position < 0):\r\n self.response_side_buffer.append(-1)\r\n elif no_go:\r\n self.response_side_buffer.append(0)\r\n # Update the trial_correct variable + buffer\r\n self.trial_correct = bool(correct)\r\n self.trial_correct_buffer.append(self.trial_correct)\r\n # Increment the trial correct counter\r\n self.ntrials_correct += self.trial_correct\r\n # Update the water delivered\r\n if self.trial_correct:\r\n self.water_delivered += self.reward_amount\r\n\r\n # SAVE TRIAL DATA\r\n params = self.__dict__.copy()\r\n params.update({'behavior_data': behavior_data})\r\n # Convert to str all non serializable params\r\n params['data_file'] = str(params['data_file'])\r\n params['osc_client'] = 'osc_client_pointer'\r\n params['init_datetime'] = params['init_datetime'].isoformat()\r\n params['elapsed_time'] = str(params['elapsed_time'])\r\n params['position'] = int(params['position'])\r\n # Delete buffered data\r\n params['stim_probability_left_buffer'] = ''\r\n params['position_buffer'] = ''\r\n params['contrast_buffer'] = ''\r\n params['signed_contrast_buffer'] = ''\r\n params['response_time_buffer'] = ''\r\n params['response_side_buffer'] = ''\r\n params['trial_correct_buffer'] = ''\r\n # Dump and save\r\n out = json.dumps(params, cls=ComplexEncoder)\r\n self.data_file.write(out)\r\n self.data_file.write('\\n')\r\n self.data_file.close()\r\n # If more than 42 trials save transfer_me.flag\r\n if self.trial_num == 42:\r\n misc.create_flags(self.data_file_path, self.poop_count)\r\n\r\n return self", "def __on_trial_start__(self) -> None:\n if self.__experiment__.is_experiment_complete:\n self.change_page(self.experiment_end_page_id)\n return\n\n if not self.__practice_complete_handled__ and \\\n self.__experiment__.is_practice_complete:\n self.__practice_complete_handled__ = True\n self.change_page(self.practice_end_page_id)\n return\n\n if self.__experiment__.is_block_complete:\n self.__experiment__.is_block_complete = False\n self.change_page(self.block_end_rest_page_id)\n QTimer.singleShot(configuration.BLOCK_END_REST_DURATION, self.__on_trial_start__)\n return\n\n self.__current_trial__ = self.__experiment__.get_current_trial()\n self.set_trial_bars_to_display(self.__current_trial__.bars_to_display)\n self.change_page(self.fixation_page_id)\n QTimer.singleShot(configuration.FIXATION_DURATION, self.__on_fixation_end__)", "def on_completed(self):\n\n pass", "def on_experiment_end(self):\n G.debug(\"BaseCallback.on_experiment_end()\")", "def trial_prep(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }