query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Merges metric data from a snapshot. This is used both when merging data from a single transaction into the main stats engine, and for performing a rollback merge. In either case, the merge is done the exact same way.
def merge_metric_stats(self, snapshot): if not self.__settings: return for key, other in six.iteritems(snapshot.__stats_table): stats = self.__stats_table.get(key) if not stats: self.__stats_table[key] = other else: stats.merge_stats(other)
[ "def merge(self, snapshot):\n\n if not self.__settings:\n return\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot)\n self._merge_synthetics_events(snapshot)\n self._merge_error_events(snapshot)\n self._merge_error_traces(snapshot)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges in a set of custom metrics. The metrics should be provide as an iterable where each item is a tuple of the metric name and the accumulated stats for the metric.
def merge_custom_metrics(self, metrics): if not self.__settings: return for name, other in metrics: key = (name, '') stats = self.__stats_table.get(key) if not stats: self.__stats_table[key] = other else: stats.merge_stats(other)
[ "def add_metrics(self, metrics: List[Tuple]):\n for metric in metrics:\n metric_name, metric_function = metric\n self[metric_name] = metric_function\n return self", "def add_metrics(self, metrics):\n for i,metric in enumerate(self.config.metrics):\n tf.summary...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if player ready to be rendered on the character sheet
def is_player_ready(self): player = self.base.game_instance['player_ref'] if (player and base.player_states["is_alive"] and base.player_states["is_idle"] and not base.player_states["is_moving"] and not base.player_states["is_running"] and not base.player_states["is_crouch_moving"] and not base.player_states["is_crouching"] and not base.player_states["is_standing"] and not base.player_states["is_jumping"] and not base.player_states["is_h_kicking"] and not base.player_states["is_f_kicking"] and not base.player_states["is_using"] and not base.player_states["is_attacked"] and not base.player_states["is_busy"] and not base.player_states["is_turning"] and not base.player_states["is_mounted"] and not base.player_states["horse_riding"] and not self.base.game_instance["is_player_sitting"] and not player.get_python_tag("is_on_horse") ): return True else: return False
[ "def is_ready(self):\n if self.game.has_started():\n return True\n return self.status == self.PLAYER_READY", "def ready(self):\n return self.shader is not None and self.texturesReady()", "def ready(self):\n return self.shader is not None and self.textureReady()", "def ch...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the script at given path catching exceptions. This function should only be used internally by Pyto.
def runScriptAtPath(path): sys.argv = [path] for arg in PytoClasses.Python.shared.args: sys.argv.append(str(arg)) def run() -> None: os.system = PytoClasses.Python.shared.system directory = os.path.expanduser(os.path.dirname(path)) sys.path.insert(0, directory) try: global __script__ spec = importlib.util.spec_from_file_location("__main__", path) __script__ = importlib.util.module_from_spec(spec) spec.loader.exec_module(__script__) PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith("__")] except SystemExit: print("SystemExit") except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() extracts = traceback.extract_tb(sys.exc_info()[2]) count = len(extracts) lineNumber = -1 fileName = path for i, extract in enumerate(extracts): if extract[0] == fileName: lineNumber = extract[1] break count -= 1 if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1] PytoClasses.Python.shared.errorType = exc_type.__name__ PytoClasses.Python.shared.errorReason = str(e) PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber) print(traceback.format_exc(limit=-count)) sys.path.remove(directory) PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1 PytoClasses.ReviewHelper.shared.requestReview() PytoClasses.Python.shared.isScriptRunning = False thread = threading.Thread(target=run, args=()) def loop(): while PytoClasses.Python.shared.isScriptRunning: time.sleep(1) ignoredThreads.append(thread) raise Exception("Stopped script!") def runLoop(): try: loop() except: pass thread.start() runLoop() return __script__
[ "def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n execfile(script_path, globals_)", "def script(self, path):\n e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expected defaults when no project exists
def test_no_project_defaults(self): ep = exposed.ExposedProject() self.assertIsNone(ep.display) self.assertIsNone(ep.shared) self.assertIsNone(ep.settings) self.assertIsNone(ep.title) self.assertIsNone(ep.id) self.assertIsNone(ep.path()) with self.assertRaises(RuntimeError): ep.title = 'Some Title'
[ "def test_get_project(self):\n pass", "def _determine_default_project(project=None):\n if project is None:\n project = _get_gcd_project()\n\n if project is None:\n project = _helpers._determine_default_project(project=project)\n\n return project", "def test_project(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should abort stopping and not raise an error when no internal step is available to stop.
def test_step_stop_aborted(self, _step: PropertyMock): _step.return_value = None es = exposed.ExposedStep() es.stop()
[ "def abort(self):\n self.__stop()", "def _force_stop(self):\n if self.force_stop_func(instance=self.instance):\n self.force_stop_func(instance=self.instance, _set=True)\n raise StoppedException", "def _gracefully_stop(self):\n pass", "def test_v1_stop(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should abort stopping and not raise an error when no internal project is available to stop.
def test_project_stop_aborted(self, get_internal_project: MagicMock): get_internal_project.return_value = None ep = exposed.ExposedProject() ep.stop()
[ "def stopBuild(reason=\"<no reason given>\"):", "def _gracefully_stop(self):\n pass", "def abort(self):\n self.__stop()", "def test_provider_project_development_stop(self):\n pass", "def stopSolution(self):\n raise NotImplementedError", "def test_v1_stop(self):\n pass", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should fail to get internal project and return None after eventually timing out.
def test_get_internal_project_fail( self, sleep: MagicMock, time_time: MagicMock, internal_project: PropertyMock ): project = exposed.ExposedProject() time_time.side_effect = range(20) internal_project.return_value = None result = project.get_internal_project() self.assertIsNone(result) self.assertEqual(10, sleep.call_count)
[ "def test_get_project(self):\n pass", "def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None", "def Project(self):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should write to the console using a write_source function call on the internal step report's stdout_interceptor.
def test_write_to_console(self, _step: PropertyMock): trials = [2, True, None, 'This is a test', b'hello'] for message in trials: _step_mock = MagicMock() write_source = MagicMock() _step_mock.report.stdout_interceptor.write_source = write_source _step.return_value = _step_mock step = exposed.ExposedStep() step.write_to_console(message) args, kwargs = write_source.call_args self.assertEqual('{}'.format(message), args[0])
[ "def test_render_to_console(self, _step: PropertyMock):\n message = ' {{ a }} is not {{ b }}.'\n\n _step_mock = MagicMock()\n write_source = MagicMock()\n _step_mock.report.stdout_interceptor.write_source = write_source\n _step.return_value = _step_mock\n step = exposed.E...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should render to the console using a write_source function call on the internal step report's stdout_interceptor.
def test_render_to_console(self, _step: PropertyMock): message = ' {{ a }} is not {{ b }}.' _step_mock = MagicMock() write_source = MagicMock() _step_mock.report.stdout_interceptor.write_source = write_source _step.return_value = _step_mock step = exposed.ExposedStep() step.render_to_console(message, a=7, b='happy') args, kwargs = write_source.call_args self.assertEqual('7 is not happy.', args[0])
[ "def test_write_to_console(self, _step: PropertyMock):\n trials = [2, True, None, 'This is a test', b'hello']\n\n for message in trials:\n _step_mock = MagicMock()\n write_source = MagicMock()\n _step_mock.report.stdout_interceptor.write_source = write_source\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should raise a ValueError when there is no current step to operate upon by the write function call.
def test_write_to_console_fail(self, _step: PropertyMock): _step.return_value = None step = exposed.ExposedStep() with self.assertRaises(ValueError): step.write_to_console('hello')
[ "def save_step(self, step: 'BaseTransformer', context: 'CX') -> 'BaseTransformer':\n raise NotImplementedError()", "def bad_step(self):\n assert False, \"This step is meant to fail.\"", "def save_to_file(self, iter_num, iter_step):\n if iter_num%iter_step==0:\n current_date = datetim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should render an empty stack frame when the stack data is invalid.
def test_render_stop_display_error( self, get_formatted_stack_frame: MagicMock, render_template: MagicMock ): get_formatted_stack_frame.return_value = None step = MagicMock() exposed.render_stop_display(step, 'FAKE') self.assertEqual({}, render_template.call_args[1]['frame'])
[ "def stack_bad(self):\n self.dyn_stack_current_state = STACK_STATE_BAD", "def is_stack_empty(self):\n if self.stack.__len__() == 0:\n return True\n else:\n return False", "def test_empty_stack() -> None:\n with raises(GrammarParseError):\n grammar_parser.pars...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates % of alphanumeric characters in string.
def _alnum_percent(line): total = len(line) test_set = set() for letter in string.ascii_letters: test_set.add(letter) test_set.add(' ') # Return a failure (no good characters) if there are no characters if total < 1: return 0 alnum_count = 0 star_count = 0 bar_count = 0 for letter in line: # if letter.isalnum(): if letter in test_set: alnum_count += 1 if letter == '*': star_count += 1 if letter == 'I' or letter == 'i' or letter == 'l' or letter == '|': bar_count += 1 # TODO(searow): properly implement this, but sticking this here for now. if star_count / total > 0.1: return 0 if bar_count / total > 0.5: return 0 return alnum_count / total
[ "def letter_percent(s):\r\n\r\n alpha = 'abcdefghijklmnopqrstuvwxyz'\r\n s_lower = s.lower()\r\n s_length = 0\r\n letter_count = {} # empty dictionary\r\n keys = letter_count.keys()\r\n\r\n for char in s_lower:\r\n if char in alpha:\r\n s_length = s_length + 1\r\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Analyzes text lines, in order read from OCR processing. Populates the MailFields object with information gathered from OCR. Uses information from each of the lines to best figure out who is the main addresssee and which box it is trying to reach.
def parse_text_lines(self, text_lines): self.__fields = mail_fields.MailFields() alphanum_threshold = 0.5 # Only evaluate lines that are predominantly alphanumeric for line in text_lines: if _alnum_percent(line) > alphanum_threshold: try: parsed = usaddress.tag(line)[0] except usaddress.RepeatedLabelError as e: # If usaddress gets confused, just throw away the answer as if # we got nothing for now. # TODO(searow): fix this to handle multiple tags and labels. parsed = {} for tag in parsed: self._add_to_fields(tag, parsed[tag]) return self.__fields
[ "def readFile(self):\n \n doc = self.__openFile() #Get opened file from __openFile()\n readDoc = doc.read() #Read document contents\n message = email.message_from_string(readDoc) #Get message object from string\n\n #Get the body of the message\n mBody = message.get_payload(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the clear method works for posixbased systems
def test_clear_posix(self): with mock.patch("hangman.cli.screen.os.system") as mock_system: hangman.cli.screen.Screen.clear() mock_system.assert_called_with("clear")
[ "def clearscreen():\n if os.name == 'nt':\n os.system('cls')\n elif os.name == 'posix':\n os.system('clear')\n else:\n print \"Untested OS. Please tell the developer you're on: %s\" % os.name \n sys.exit(0)", "def clear_screen():\n if sys.platform == \"linux\" or sys.platf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the goodbye method
def test_goodbye(self): with mock.patch("builtins.print") as mock_print: hangman.cli.screen.Screen.goodbye() output = ",".join([str(x) for x in mock_print.call_args_list]) self.assertTrue("Goodbye" in output)
[ "def test_program_quit(self):\n d = CommandLineInterface(donors_test)\n with self.assertRaises(SystemExit):\n d.quit_the_program()\n del d", "def test_stop(self):\n pass", "def test_quit_game(run):\n out, _ = run(dork.cli.quit_game)\n assert \"Thank you\" in out", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the total number of hives in this apiary.
def hives_count(self) -> int: return self.hives.count()
[ "def total_number_of_animals(self):\n animals = self.animal()\n print 'Total number of animals on island: {:4}'.format(\n animals[\"Herbivores\"] + animals[\"Carnivores\"])", "def count_hp(self):\n print(self.character_class.hp, count_att_bonus(self.constitution))\n return s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience method which overrides the call method to call the getExpansion function
def __call__(self, data): return self.getExpansion(data)
[ "def applyExpansion(self, coem):\n pass", "def expand(self) -> Callable[\n [gs_echo.ExpandRequest],\n gs_echo.EchoResponse]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports a database from the tmp directory. Use very carefully! (or just to remind yourself how to import mysql data) Modify this code directly if needed, as it hardwires the username, db name and filename.
def mysql_import(): # first make another copy of the db run("mysqldump -u database_user database_name -p > ~/tmp/exported_db_temp.sql") # then import from the backup run("mysql -u database_user -p -D database_name < ~/tmp/exported_db.sql")
[ "def import_remote_db():\n run_export_db()\n run_download_db()\n drop_db()\n create_db()\n import_db()\n reset_passwords()", "def import_database(filename):\n local('pg_restore -O -c -U pyconsg -d pyconsg {0}'.format(filename))", "def import_db(self, mysql_dump):\n print('Importing m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up an ssh shortcut. Called by setup_ssh_keys. You can call it separately if desired.
def update_ssh_shortcut(output_keyfile, quickname=None): if quickname: with settings(warn_only=True): local("touch $HOME/.ssh/config") local(r"echo '' >> $HOME/.ssh/config") local(r"echo 'Host %s' >> $HOME/.ssh/config" % quickname) local(r"echo '' >> $HOME/.ssh/config") local(r"echo 'Hostname %s' >> $HOME/.ssh/config" % host_name) local(r"echo 'User %s' >> $HOME/.ssh/config" % user) local(r"echo 'IdentityFile ~/.ssh/%s' >> $HOME/.ssh/config" % output_keyfile) local(r"echo 'ServerAliveCountMax 3' >> $HOME/.ssh/config") local(r"echo 'ServerAliveInterval 10' >> $HOME/.ssh/config")
[ "def setup_ssh(public_key: Union[str, List[str]], mattermost_webhook_address: Optional[str] = None):\n public_key = parse_public_key(public_key)\n\n if not check_gpu_available():\n return # pragma: no cover\n\n # Config password for root user\n msg = \"\"\n msg = config_root_password(msg)\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a new SSH key and deliver it to the server. If quickname is provided, also set up an ssh shortcut. Use this to enable passwordless access to webfaction.
def setup_ssh_keys(output_keyfile="id_rsa", ssh_type="rsa", quickname=None): with settings(warn_only=True): local("mkdir -p $HOME/.ssh") with cd("$HOME/.ssh"): local("ssh-keygen -t %s -f %s" % (ssh_type, output_keyfile)) for host in env.hosts: local("scp %s.pub %s:temp_id_key.pub" % (output_keyfile, host)) with settings(warn_only=True): run("mkdir -p $HOME/.ssh") run("cat $HOME/temp_id_key.pub >> ~/.ssh/authorized_keys") run("rm $HOME/temp_id_key.pub") run("chmod 600 $HOME/.ssh/authorized_keys") run("chmod 700 $HOME/.ssh") run("chmod go-w $HOME") if quickname: update_ssh_shortcut(output_keyfile, quickname)
[ "def update_ssh_shortcut(output_keyfile, quickname=None):\n if quickname:\n with settings(warn_only=True):\n local(\"touch $HOME/.ssh/config\")\n local(r\"echo '' >> $HOME/.ssh/config\")\n local(r\"echo 'Host %s' >> $HOME/.ssh/config\" % quickname)\n local(r\"echo '' >> $HO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Installs pip itself if needed.
def install_pip(): with settings(warn_only=True): run('mkdir $HOME/lib/python2.7') run('easy_install-2.7 pip')
[ "def install_pip():\n if sys.version_info[0] < 3: # We are running python 2.x\n cmd = ['python -m easy_install pip']\n process = subprocess.Popen(cmd, shell=True)\n process.wait()\n process.poll()\n if process.returncode is 0:\n print('Successfully installed pip')\n return 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new git repo on the server (do not include the .git ending in git_repo_name)
def create_prod_git_repo(git_repo_name): with cd(git_dir): run("git init --bare %s.git && cd %s.git && git config http.receivepack true" % (git_repo_name,git_repo_name))
[ "def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the git repo on the server as the local .git repo's origin, and pushes master to it. (do not include the .git ending in git_repo_name)
def add_prod_repo_as_origin_and_push(git_repo_name): local("""echo '[remote "origin"]' >> .git/config""") local(r"echo ' fetch = +refs/heads/*:refs/remotes/origin/*' >> .git/config") local(r"echo ' url = %s:webapps/git/repos/%s.git' >> .git/config" % (env.hosts[0], git_repo_name)) local(r"git push origin master")
[ "def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()", "def push(self):\n pass # push will happen at workspace level\n # actions.call_subprocess([GIT_EXE_PATH, 'push', 'origin', 'master'],\n # cwd=self.local_path, verbose=self.verbose...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the apache httpd.conf file to point to the new project instead of the default 'myproject'. This is called as part of clone_into_project, or you can call
def update_conf_file(): filepath = remote_dir + "/apache2/conf/httpd.conf" fabric.contrib.files.sed(filepath, 'myproject', project_name)
[ "def set_apache_config():\n # Delete the existing default config file.\n if exists(\"/etc/apache2/sites-enabled/000-default.conf\"):\n run(\"rm /etc/apache2/sites-enabled/000-default.conf\")\n\n with cd(\"/etc/apache2/sites-enabled/\"):\n run(\"ln -sf /app/workshop-php-bootstrap/support/apach...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clones the git repo into the new webapp, deleting the default myproject project and updating the config file to point to the new project. Also adds a site_settings.py file to the project/project folder.
def clone_into_project(git_repo_name): repo_dir = git_dir + "/%s.git" % git_repo_name with cd(remote_dir): run('rm -rf myproject') run("git clone %s %s" % (repo_dir, project_name)) run("echo 'MY_ENV=\"prod\"' > %s/%s/site_settings.py" % (project_name,project_name)) update_conf_file()
[ "def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the "/static" and "/media" directories to the static webapp if needed, and deletes the default index.html. Also adds a project/project/static directory if there isn't one.
def add_dirs_to_static(static_webapp_name): static_dir = '$HOME/webapps/%s' % static_webapp_name with settings(warn_only=True): with cd(static_dir): run("mkdir static && mkdir media") run("rm index.html") run("touch index.html") with cd(code_dir): run("mkdir %s/static" % project_name)
[ "def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)", "def setup_statics(project, frontend):\n # Create JS Static structure\n static_path = '{}/static/js'....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialises the database to contain the tables required for DjangoCMS with South. Runs syncdb all and migrate fake.
def initialise_database(): with cd(code_dir): run(python_add_str + "python manage.py syncdb --all") run(python_add_str + "python manage.py migrate --fake")
[ "def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)", "def initialize_test_db(self):\n # C...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Binary mask from cv2 styled contour (gets filled)
def make_mask(shape, contour): mask = np.zeros(shape, np.int32) cv2.drawContours(mask, [contour], 0, (255), -1) return mask
[ "def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts either bytes or unicode to `bytes`, using utf8 encoding for text.
def as_bytes(bytes_or_text, encoding='utf-8'): if isinstance(bytes_or_text, _six.text_type): return bytes_or_text.encode(encoding) elif isinstance(bytes_or_text, bytes): return bytes_or_text else: raise TypeError('Expected binary or unicode string, got %r' % (bytes_or_text,))
[ "def utf8_bytes(text):\n if not isinstance(text, bytes):\n return text.encode('utf-8')\n return text", "def _to_bytes(value: Union[str, bytes]) -> bytes:\n return value if isinstance(value, bytes) else value.encode(\"utf-8\")", "def ensure_utf8_bytes(v: Union[str, bytes]) -> bytes:\n if isins...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the file system path representation of a `PathLike` object.
def path_to_str(path): if hasattr(path, '__fspath__'): path = as_str_any(path.__fspath__()) return path
[ "def stringify_pathlike(pathlike):\n maybe_pathlike_str = (\n pathlike.__fspath__() if hasattr(pathlike, \"__fspath__\") else pathlike\n )\n\n return maybe_pathlike_str", "def as_path(path: PathLike) -> Path:\n msg = py_utils.dedent(\"\"\"\n `tfds.core.as_path` is deprecated. Pathlib API has...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a node to the front of the list with value 'val'
def push_front(self, val): new_node = Node(val, self.head) if self.is_empty(): self.tail = new_node self.head = new_node self.size += 1
[ "def insert_before(self, val, new_value):\n\n current = self.head\n\n if current.val is val:\n self.insert(new_value)\n else:\n while current.next.val is not val:\n current = current.next\n\n new_node = Node(new_value, current.next)\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a node to the back of the list with value 'val'
def push_back(self, val): new_node = Node(val) # Update current head and tail, if necessary if self.is_empty(): self.head = new_node else: self.tail.next_node = new_node # new_node is now the tail self.tail = new_node self.size += 1
[ "def push_back(self, val: Generic[T]) -> None:\n return insert(self,self.node.next,val)", "def addAtTail(self, val):\n if self.head is None:\n self.addAtHead(val)\n else:\n new_node = Node(val)\n curr = self.head\n while (curr.next is not None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get a post given its title
def get(self, title): post = get_a_post(title) if not post: api.abort(404) else: return post
[ "def getPost(title):\n try:\n path = os.path.join(\"data\",title)\n if not os.path.isfile(path):\n raise PostDoesNotExist(title)\n \n except PostDoesNotExist:\n print \"PostDoesNotExist\"\n pass\n \n else:\n datafile = open(path, mode=\"rb\")\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reinvite an already invited user.
def reinvite_user(self, user, email): if self.is_moderator and self.has_perm('accounts.invite_user'): # Reset email, set a new token and update decision datetime user.email = email user.auth_token = generate_unique_id() user.decision_datetime = timezone.now() user.save() return user else: raise PermissionDenied
[ "def invited_user(self, invited_user):\n self._invited_user = invited_user", "def resend_invite(id):\n invite = s.query(Invites). \\\n filter(Invites.id==id). \\\n first()\n\n send_mail_unknown(invite.email, \"Register for CompetenceDB\",\n 'You are invited to regis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Approve a user's application
def approve_user_application(self, user): if self.is_moderator and \ self.has_perm('accounts.approve_user_application'): user.moderator = self user.moderator_decision = user.APPROVED user.decision_datetime = timezone.now() user.auth_token = generate_unique_id() user.save() return user else: raise PermissionDenied
[ "def approve_application(request, application_pk):\n application = get_object_or_404(\n teambuilder_models.Application, pk=application_pk)\n developer = application.applicant\n application.approve()\n email.send_email(\n 'Your application was approved!',\n '''Hello {}!\n Than...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reject a user's application
def reject_user_application(self, user): if self.is_moderator \ and self.has_perm('accounts.reject_user_application'): user.moderator = self user.moderator_decision = user.REJECTED user.decision_datetime = timezone.now() user.save() return user else: raise PermissionDenied
[ "def reject_application(request, application_pk):\n application = get_object_or_404(\n teambuilder_models.Application, pk=application_pk)\n developer = application.applicant\n application.reject()\n email.send_email(\n 'Your application was rejected!',\n '''Hello {}!\n Thank ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a user's profiency in a particular skill as a percentage, based on the position of the proficiency in PROFICIENCY_CHOICES.
def get_proficiency_percentage(self): choice_values = [choice[0] for choice in self.PROFICIENCY_CHOICES] if '' in choice_values: choice_values.remove('') # Remove the empty proficiency choice choice_values.sort() # Ensure values are in the correct order value = choice_values.index(self.proficiency) + 1 factor = 100 / len(choice_values) percentage = round(value * factor) return percentage
[ "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def female_pct(self) -> float:\n return sum([p.sex_female for p in self.pop]) / self.starting_population", "def setup_proficiencies(self):\n Proficiency.objects.create(name='starter', needed_percentage=0)\n Profic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the preparation files for the projects in a run
def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline, verbose): sample_sheet = KLSampleSheet(sample_sheet) df_sheet = sample_sheet_to_dataframe(sample_sheet) if pipeline == 'atropos-and-bowtie2': click.echo('Stats collection is not supported for pipeline ' 'atropos-and-bowtie2') else: stats = run_counts(run_dir, sample_sheet) stats['sample_name'] = \ df_sheet.set_index('lane', append=True)['sample_name'] # returns a map of (run, project_name, lane) -> preparation frame preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline) os.makedirs(output_dir, exist_ok=True) for (run, project, lane), df in preps.items(): fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv') if pipeline == 'fastp-and-minimap2': # stats are indexed by sample name and lane, lane is the first # level index. When merging, make sure to select the lane subset # that we care about, otherwise we'll end up with repeated rows df = df.merge(stats.xs(lane, level=1), how='left', on='sample_name') # strip qiita_id from project names in sample_project column df['sample_project'] = df['sample_project'].map( lambda x: re.sub(r'_\d+$', r'', x)) # center_project_name is a legacy column that should mirror # the values for sample_project. df['center_project_name'] = df['sample_project'] df.to_csv(fp, sep='\t', index=False) if verbose: project_name = remove_qiita_id(project) # assume qiita_id is extractable and is an integer, given that # we have already passed error-checking. qiita_id = project.replace(project_name + '_', '') print("%s\t%s" % (qiita_id, abspath(fp)))
[ "def newproject(self):\n \n self.path = os.path.join(self.base, self.name)\n subpath = os.path.join(self.path, self.lowname)\n check_build_path(subpath)\n \n for filename, content in self.files.items():\n self.buildfile(filename, content, self.path)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return tokenized list of strings from raw text input using keras functionality
def tokenize_keras(raw_data): from keras.preprocessing.text import text_to_word_sequence return [text_to_word_sequence(d) for d in raw_data]
[ "def _text_tokenize(input_data: Mapping[str, tf.Tensor]) -> tf.Tensor:\n input_str = tf.reshape(input_data['translation'], shape=[1])\n standard_text = _text_standardization(input_str)\n return tf.strings.split(standard_text)", "def _batch_tokenize(self, text: List[str]) -> List[List[str]]:\n return sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if word passes filter
def filter1(word): if not word: return False w = word.lower() if w in STOPWORDS: return False return True
[ "async def wordfilter(self, ctx):\n pass", "async def wordfilter_test(self, ctx, *, message):\n found = self.test_sentence(message)\n if found:\n await ctx.send(f\"Message contains `{found}`\")\n else:\n await ctx.send(\"Couldn't detect any filtered words\")", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return dict of wordtoid from raw text data If max_size is specified, vocab is truncated to set of highest frequency words within size.
def build_vocab(raw_data, max_size=None): data = [w for doc in tokenize_keras(raw_data) for w in doc] counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) if max_size: count_pairs = count_pairs[:max_size] words, _ = list(zip(*count_pairs)) word_to_id = dict(zip(words, range(len(words)))) word_to_id[UNKNOWN_WORD] = len(word_to_id) word_to_id[PAD_WORD] = len(word_to_id) return word_to_id
[ "def embeddings_to_dict(path, max_words=None):\n w2v = {} \n with codecs.open(path,\"r\",\"utf-8\") as fid:\n #ignore first line\n fid.readline() \n #avoid extra comparisons if we want load all the words\n if max_words is None:\n for line in fid:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert raw text data into integer ids
def raw_to_ids(raw_data, word_to_id): docs = tokenize_keras(raw_data) uid = word_to_id[UNKNOWN_WORD] return [[word_to_id.get(w, uid) for w in doc] for doc in docs]
[ "def map_text_to_id(self, text: str) -> List[int]:\n return self.map_token_to_id(self.map_text_to_token(text))", "def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):\n source_id_text = []\n target_id_text = []\n for text in source_text.split('\\n'):\n source...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
callback for when the detector has found a stop sign. Note that a distance of 0 can mean that the lidar did not pickup the stop sign at all
def stop_sign_detected_callback(self, msg): # distance of the stop sign corners = msg.corners dx = corners[3] - corners[1] dy = corners[2] - corners[0] r = dx/dy # aspect ratio rdist = np.array([.15, .20, .25, .30,.35, .40, .45, .50]) pixelheight = np.array([139, 102, 82, 64, 56, 50, 44, 40]) if dy > pixelheight[-1] and dy < pixelheight[0]: dist = np.interp(dy, pixelheight[::-1], rdist[::-1]) else: return # Get location of camera with respect to the map try: (translation,rotation) = self.tf_listener.lookupTransform('/map', '/camera', rospy.Time(0)) xcam = translation[0] ycam = translation[1] zcam = translation[2] euler = tf.transformations.euler_from_quaternion(rotation) thetacam = euler[2] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return # Get angle of robot with respect to the map try: (translation,rotation) = self.tf_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0)) euler = tf.transformations.euler_from_quaternion(rotation) thetarobot = euler[2] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return # Now we have pose of robot, we want to determine stop sign angle relative # to camera frame thstopsign = (wrapToPi(msg.thetaright) + wrapToPi(msg.thetaleft))/2. zstopsign = dist*np.cos(-thstopsign) xstopsign = dist*np.sin(-thstopsign) x = xcam + xstopsign*np.cos(thetacam) - zstopsign*np.sin(thetacam) y = ycam + xstopsign*np.sin(thetacam) + zstopsign*np.cos(thetacam) # Now that we have x and y coord of stop sign in world frame, append coord found = False for i in range(len(self.stopSigns[0])): xcur = self.stopSigns[0][i] ycur = self.stopSigns[1][i] thetarobotcur = self.stopSigns[2][i] distance = np.sqrt((x - xcur)**2 + (y - ycur)**2) n = self.stopSignCounts[i] if distance < .2: if n < 100: # We have found the same stop sign as before xnew = (n/(n+1.))*xcur + (1./(n+1))*x ynew = (n/(n+1.))*ycur + (1./(n+1))*y thetarobotnew = (n/(n+1.))*thetarobotcur + (1./(n+1))*thetarobot self.stopSigns[0][i] = xnew self.stopSigns[1][i] = ynew self.stopSigns[2][i] = thetarobotnew self.stopSignCounts[i] += 1 found = True if not found: # Found a new one, append it self.stopSigns[0].append(x) self.stopSigns[1].append(y) self.stopSigns[2].append(thetarobot) self.stopSignCounts.append(1)
[ "def stop_sign_detected_callback(self, msg):\n\n # distance of the stop sign\n # print \"Stop Sign Destected\"\n dist = msg.distance\n # if self.mode==Mode.TRACK:\n # if close enough and in nav mode, stop\n if dist > 0 and dist < self.stop_min_dist and self.mode == Mode.TRA...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Custom collate_fn that is called with list of multivariate samples to yield a minibatch It preserves the data structure, e.g., if each sample is a dictionary, it outputs a dictionary with the same set of keys but batched Tensors as values (or lists if the values can not be converted into Tensors).
def collate_fn(sample_list): x_ref_batch = [] x_pos_batch = [] x_negs_batch = [] label_batch = [] for sample in sample_list: x_ref_batch.append(sample["x_ref"]) x_pos_batch.append(sample["x_pos"]) x_negs_batch.append(sample["x_negs"]) label_batch.append(sample["label"]) # Use torch API for RNNs to pad samples to fixed length, L, and stack them in batch-tensor of dim (B,n_dim,L). x_ref_batch = pad_sequence( x_ref_batch, batch_first=True, padding_value=0) # (B,L,n_dim) x_ref_batch = x_ref_batch.transpose(1, 2) # (B,n_dim,L) x_pos_batch = pad_sequence( x_pos_batch, batch_first=True, padding_value=0) # (B,L,n_dim) x_pos_batch = x_pos_batch.transpose(1, 2) # (B,n_dim,L) # Pad neg tensors with varying length of first dim L, and produce batch (B,K,n_dim,L') where L' is padded length x_negs_batch = pad_sequence(x_negs_batch, batch_first=True, padding_value=0) # (B, L', K, n_dim) x_negs_batch = x_negs_batch.transpose(1, 2) # (B, K, L', n_dim) x_negs_batch = x_negs_batch.transpose(2, 3) # (B, K, n_dim, L') return { 'x_ref': x_ref_batch, 'x_pos': x_pos_batch, 'x_negs': x_negs_batch, 'label': label_batch }
[ "def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch", "def collate_fn(batch):\n data = [item[0] for item in batch]\n target = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a function from a full module path
def import_from(full_name): module_name, function_name = full_name.rsplit('.', 1) mod = import_module(module_name) return getattr(mod, function_name)
[ "def import_function(func_ref):\n module_name, _, func_name = func_ref.rpartition('.')\n module = importlib.import_module(module_name)\n return getattr(module, func_name)", "def import_function(name):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new Bloom Filter ``key`` with desired probability of false positives ``errorRate`` expected entries to be inserted as ``capacity``. Default expansion value is 2. By default, filter is autoscaling.
def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None): params = [key, errorRate, capacity] self.appendExpansion(params, expansion) self.appendNoScale(params, noScale) return self.execute_command(self.BF_RESERVE, *params)
[ "def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None):\n params = [key, capacity]\n self.appendExpansion(params, expansion)\n self.appendBucketSize(params, bucket_size)\n self.appendMaxIterations(params, max_iterations)\n\n return self.execute_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds to a Bloom Filter ``key`` an ``item``.
def bfAdd(self, key, item): params = [key, item] return self.execute_command(self.BF_ADD, *params)
[ "def add(key, item):\n hash_key = hash_function(key)\n hash_table[hash_key - 1] = item", "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def add_item(self, key, item):\n self.dict[key] = item\n self.is_empty =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds to a Bloom Filter ``key`` multiple ``items``. If ``nocreate`` remain ``None`` and ``key does not exist, a new Bloom Filter ``key`` will be created with desired probability of false positives ``errorRate`` and expected entries to be inserted as ``size``.
def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None): params = [key] self.appendCapacity(params, capacity) self.appendError(params, error) self.appendExpansion(params, expansion) self.appendNoCreate(params, noCreate) self.appendNoScale(params, noScale) self.appendItems(params, items) return self.execute_command(self.BF_INSERT, *params)
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether an ``item`` exists in Bloom Filter ``key``.
def bfExists(self, key, item): params = [key, item] return self.execute_command(self.BF_EXISTS, *params)
[ "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def has_item(self, item):\n return item in self.set", "def cfExists(self, key, item):\n params = [key, item]\n \n return self.execute...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether ``items`` exist in Bloom Filter ``key``.
def bfMExists(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MEXISTS, *params)
[ "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def has_item(self, item):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Begins an incremental save of the bloom filter ``key``. This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of ``iter`` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
def bfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.BF_SCANDUMP, *params)
[ "def cfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.CF_SCANDUMP, *params)", "def flush(self, key=None):\n raise NotImplementedError", "def save(self) -> dict:\n for pair in self._buffer:\n yield pair.save()", "def _iter(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new Cuckoo Filter ``key`` an initial ``capacity`` items.
def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None): params = [key, capacity] self.appendExpansion(params, expansion) self.appendBucketSize(params, bucket_size) self.appendMaxIterations(params, max_iterations) return self.execute_command(self.CF_RESERVE, *params)
[ "def from_key(cls, key, quantity=1):\n\n return cls(key[0], key[1], quantity)", "def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None):\n params = [key, errorRate, capacity]\n self.appendExpansion(params, expansion)\n self.appendNoScale(params, noScale)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds an ``item`` to a Cuckoo Filter ``key``.
def cfAdd(self, key, item): params = [key, item] return self.execute_command(self.CF_ADD, *params)
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add_item(self, key, item):\n self.dict[key] = item\n self.is_empty = False", "def _m_add_items_filter(filter):", "def add_item(self, key, item):\n self[ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds multiple ``items`` to a Cuckoo Filter ``key``, allowing the filter to be created with a custom ``capacity` if it does not yet exist. ``items`` must be provided as a list.
def cfInsert(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERT, *params)
[ "def _m_add_items_filter(filter):", "def add_items(self, *items):\n self.items.extend(items)", "def append_items(params, items):\n params.extend([\"ITEMS\"])\n params += items", "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether an ``item`` exists in Cuckoo Filter ``key``.
def cfExists(self, key, item): params = [key, item] return self.execute_command(self.CF_EXISTS, *params)
[ "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def has_item(self, item):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes ``item`` from ``key``.
def cfDel(self, key, item): params = [key, item] return self.execute_command(self.CF_DEL, *params)
[ "def delete_item(self, item_id):\n pass", "def delete_item(self, item):\r\n item.delete_item_from_room(self)", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def delete_dynamo_item(dynamo_client, *, table_name, key):\n dynamo_client.delete_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Begins an incremental save of the Cuckoo filter ``key``. This is useful for large Cuckoo filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of ``iter`` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
def cfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.CF_SCANDUMP, *params)
[ "def bfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.BF_SCANDUMP, *params)", "def flush(self, key=None):\n raise NotImplementedError", "def save(self) -> dict:\n for pair in self._buffer:\n yield pair.save()", "def _iter(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes a CountMin Sketch ``key`` to dimensions (``width``, ``depth``) specified by user.
def cmsInitByDim(self, key, width, depth): params = [key, width, depth] return self.execute_command(self.CMS_INITBYDIM, *params)
[ "def __init__(self, ks=None):\n self.ks = ks\n self.N = ks.N if ks is not None else None\n self.L = ks.d if ks is not None else None\n # pass", "def __init__(self, size, fill = 0):\n dict.__init__(self)\n\n if fill != -1:\n for x in xrange(size):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes a CountMin Sketch ``key`` to characteristics (``error``, ``probability``) specified by user.
def cmsInitByProb(self, key, error, probability): params = [key, error, probability] return self.execute_command(self.CMS_INITBYPROB, *params)
[ "def __init__(self, probability, nodeKeys):\n self.probability = float(probability)\n self.nodeKeys = nodeKeys", "def set_min_prob(self, disease, probability):\n self.min_probs[disease] = probability", "def __init__(self, ks=None):\n self.ks = ks\n self.N = ks.N if ks is not N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds/increases ``items`` to a CountMin Sketch ``key`` by ''increments''. Both ``items`` and ``increments`` are lists. Example cmsIncrBy('A', ['foo'], [1])
def cmsIncrBy(self, key, items, increments): params = [key] self.appendItemsAndIncrements(params, items, increments) return self.execute_command(self.CMS_INCRBY, *params)
[ "def append_items_and_increments(params, items, increments):\n for i in range(len(items)):\n params.append(items[i])\n params.append(increments[i])", "def inc(self, key, value=1):\n return self.increment({key:value})[key]", "def incr(x_c, x, inc=1):\n x_c[x] = x_c.get(x, 0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges ``numKeys`` of sketches into ``destKey``. Sketches specified in ``srcKeys``. All sketches must have identical width and depth. ``Weights`` can be used to multiply certain sketches. Default weight is 1. Both ``srcKeys`` and ``weights`` are lists.
def cmsMerge(self, destKey, numKeys, srcKeys, weights=[]): params = [destKey, numKeys] params += srcKeys self.appendWeights(params, weights) return self.execute_command(self.CMS_MERGE, *params)
[ "def __merge_keys(\n self, kv_src_bucket, kv_dest_bucket, kvs_num=1, filter_exp=None):\n valid_keys_src, deleted_keys_src = kv_src_bucket[\n kvs_num].key_set()\n valid_keys_dest, deleted_keys_dest = kv_dest_bucket[\n kvs_num].key_set()\n\n self.log.info(\"src_kv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return full list of items in TopK list of ``key```.
def topkList(self, key): return self.execute_command(self.TOPK_LIST, key)
[ "def sub_key_list(self, keyword, filter = False):\n\n assert keyword in self.key_list(), 'keyword not present: ' + keyword\n sub_head_place = self.pointers[self.keywords.index(keyword)]\n sub_kp = KP(self.fp, sub_head_place)\n return sub_kp.key_list(filter = filter)", "def get_list(key):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a new pipeline object that can queue multiple commands for later execution. ``transaction`` indicates whether all commands should be executed atomically. Apart from making a group of operations atomic, pipelines are useful for reducing the backandforth overhead between the client and server. Overridden in order to provide the right client through the pipeline.
def pipeline(self, transaction=True, shard_hint=None): p = Pipeline( connection_pool=self.connection_pool, response_callbacks=self.response_callbacks, transaction=transaction, shard_hint=shard_hint) return p
[ "def pipeline(self, transaction=True, shard_hint=None):\r\n return AsyncStrictPipeline(\r\n self.connection_pool,\r\n self.response_callbacks,\r\n transaction,\r\n shard_hint)", "def createPipe(self, transaction):\n pipe = detectPipeClass(transaction.dev, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will return the graph data for the outage module
def get_outage(self): try: assert self._db_connection, { STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR, MESSAGE_KEY: DB_ERROR} if self.equipment == COKE_DRUM_VALUE and self.module == OUTAGE_VALUE: """ This will return the graph data for the selected outage module """ query_params = { TAG_NAME_REQUEST: self.query_params.GET[TAG_NAME_REQUEST], START_DATE_REQUEST: self.query_params.GET[START_DATE_REQUEST], END_DATE_REQUEST: self.query_params.GET[END_DATE_REQUEST] } MODULE_LEVEL_MULTILINE_TAG = tuple(LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH) if MULTILINE_REQUEST in self.query_params.GET: """ This will return the graph data for the actual and predicted tags for the selected outage module """ query_params[MULTILINE_REQUEST] = self.query_params.GET[MULTILINE_REQUEST] if query_params: if START_DATE_REQUEST not in query_params or not query_params[START_DATE_REQUEST] and \ MULTILINE_REQUEST not in query_params: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH_NULL_START_DATE.format( self.module, query_params[TAG_NAME_REQUEST], query_params[END_DATE_REQUEST])) elif query_params[START_DATE_REQUEST] and MULTILINE_REQUEST not in query_params: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH.format( self.module, query_params[TAG_NAME_REQUEST], query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) elif query_params[START_DATE_REQUEST] and query_params[MULTILINE_REQUEST]: if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH: graph_data = django_search_query_all( DETAILED_OUTAGE_MODULE_MULTILINE_GRAPH.format( self.module, MODULE_LEVEL_MULTILINE_TAG, query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) else: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH.format( self.module, query_params[TAG_NAME_REQUEST], query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) df_data = pd.DataFrame(graph_data) min_max = django_search_query_all( MIN_MAX_DATA.format( self.module, query_params[TAG_NAME_REQUEST] )) df_min_max_data = pd.DataFrame(min_max) graph = [] if not df_data.empty: df_data = df_data.where(pd.notnull(df_data) == True, None) df_data.sort_values(TIMESTAMP_KEY, ascending=True, inplace=True) df_unit = df_data[UNIT].iloc[0] df_description = df_data[DESCRIPTION].iloc[0] df_timestamp = list(dict.fromkeys(list(df_data[TIMESTAMP_KEY]))) if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH: df_result = df_data.groupby(TAG_NAME_REQUEST) actual_north_data = [] predicted_north_data = [] actual_south_data = [] predicted_south_data = [] if len(df_result) == 2: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == query_params[TAG_NAME_REQUEST]][ DESCRIPTION].iloc[0] df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_north_data = list(df_north_actual['north_drum_tag_value']) df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_north_data = list(df_north_predicted['north_drum_tag_value']) df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_south_data = list(df_south_actual['south_drum_tag_value']) df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_south_data = list(df_south_predicted['south_drum_tag_value']) elif len(df_result) == 1: if df_result[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG][ DESCRIPTION].iloc[0] df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_north_data = list(df_north_actual['north_drum_tag_value']) df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_south_data = list(df_south_actual['south_drum_tag_value']) elif df_result[TAG_NAME_REQUEST] != OUTAGE_MODULE_LEVEL_ACTUAL_TAG: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_PREDICTED_TAG][ DESCRIPTION].iloc[0] df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_north_data = list(df_north_predicted['north_drum_tag_value']) df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_south_data = list(df_south_predicted['south_drum_tag_value']) temp = {"north_actual": actual_north_data, "north_predicted": predicted_north_data, "south_actual": actual_south_data, "south_predicted": predicted_south_data, "x_axis": df_timestamp, "unit": df_unit, "description": df_description} else: temp = {"y_axis": list(df_data[TAG_VALUE]), "x_axis": df_timestamp, "unit": df_unit, "description": df_description} if not df_min_max_data.empty: temp["min_data"] = df_min_max_data[MIN_VALUE].iloc[0] temp["max_data"] = df_min_max_data[MAX_VALUE].iloc[0] else: temp["min_data"] = None temp["max_data"] = None graph.append(temp) return graph except AssertionError as e: log_error("Exception due to : %s" + str(e)) return asert_res(e) except Exception as e: log_error("Exception due to : %s" + str(e)) return json_InternalServerError
[ "def get_graph(self) -> dict:\n response = requests.get(self.channel, params=\"get_graph\")\n return json_to_graph(response.content)", "def get_graph_summary(self):\n\n pass", "def export_graph(self, graph):\n pass", "def _graph(self):\n return self._anm.overlay_nx_graphs[se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a mock component of a general model.
def mock_component(): component = Mock() component.free_parameters = flex.double([1.0]) component.free_parameter_esds = None component.n_params = 1 component.var_cov_matrix = sparse.matrix(1, 1) return component
[ "def get_mock(self):\n return self.mock", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a mock data manager of a general model.
def mock_data_manager(components): dm = Mock() dm.components = components dm.fixed_components = [] return dm
[ "def setup_dummy_data_manager():\n import repoze.filesafe\n repoze.filesafe._local.manager = mgr = DummyDataManager()\n return mgr", "def _get_data_manager(self):\n\n ftype = self.conf['General']['save_as']\n if ftype == 'npz':\n return NPZDataManager(self.conf, self.log)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for the general multi_active_parameter_manage class.
def test_multi_apm(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } components_2 = {"scale": mock_component(), "decay": mock_component()} multi_apm = multi_active_parameter_manager( ScalingTarget(), [components_1, components_2], [["scale", "decay"], ["scale"]], active_parameter_manager, ) # Test correct setup of apm_list attribute. for apm in multi_apm.apm_list: assert isinstance(apm, active_parameter_manager) assert len(multi_apm.apm_list) == 2 assert multi_apm.components_list == ["scale", "decay", "scale"] assert multi_apm.n_active_params == 3 assert multi_apm.apm_data[0] == {"start_idx": 0, "end_idx": 2} assert multi_apm.apm_data[1] == {"start_idx": 2, "end_idx": 3} # Test parameter selection. multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0])) assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0]) assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5]) assert multi_apm.select_parameters(1) == flex.double([2.0]) # Test setting parameter esds. multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3])) assert components_1["scale"].free_parameter_esds == flex.double([0.1]) assert components_1["decay"].free_parameter_esds == flex.double([0.2]) assert components_2["scale"].free_parameter_esds == flex.double([0.3]) # Test setting var_cov matrices for each component. var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0]) var_cov.reshape(flex.grid(3, 3)) multi_apm.calculate_model_state_uncertainties(var_cov) assert components_1["scale"].var_cov_matrix[0, 0] == 1.0 assert components_1["decay"].var_cov_matrix[0, 0] == 2.0 assert components_2["scale"].var_cov_matrix[0, 0] == 3.0
[ "def test_scaling_active_parameter_manager():\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(2)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n assert list(scaling_apm.constant_g_values[0]) == list(\n components_2[\"2\"].calculate_scales(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the apm factory for concurrent refinement.
def test_ParameterManagerGenerator_concurrent(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } data_manager = mock_data_manager(components_1) pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) apms = pmg.parameter_managers() assert len(apms) == 1 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" in apm.components_list components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } components_2 = {"1": mock_component(), "2": mock_component()} data_manager_1 = mock_data_manager(components_1) data_manager_2 = mock_data_manager(components_2) pmg = ParameterManagerGenerator( [data_manager_1, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) multi_apms = pmg.parameter_managers() assert len(multi_apms) == 1 multi_apm = multi_apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) for apm in multi_apm.apm_list: assert isinstance(apm, active_parameter_manager) assert "scale" in multi_apm.apm_list[0].components_list assert "decay" in multi_apm.apm_list[0].components_list assert "absorption" in multi_apm.apm_list[0].components_list assert "1" in multi_apm.apm_list[1].components_list assert "2" in multi_apm.apm_list[1].components_list # now try fixing a component data_manager.fixed_components = ["absorption"] pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) apms = pmg.parameter_managers() assert len(apms) == 1 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" not in apm.components_list
[ "def test_multiple_factories(self, mocker):\n sdk_ready_flag = threading.Event()\n\n def _init(self, ready_flag, some, auth_api, streaming_enabled, telemetry_runtime_producer, telemetry_init_consumer, sse_url=None):\n self._ready_flag = ready_flag\n self._synchronizer = mocker.Mo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the apm factory for consecutive refinement.
def test_ParameterManagerGenerator_consecutive(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } data_manager = mock_data_manager(components_1) data_manager.consecutive_refinement_order = [["scale", "decay"], ["absorption"]] # Test single dataset case. pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" not in apm.components_list apm = apms[1] assert isinstance(apm, multi_active_parameter_manager) assert "scale" not in apm.components_list assert "decay" not in apm.components_list assert "absorption" in apm.components_list # Test multi dataset case. components_2 = {"1": mock_component(), "2": mock_component()} data_manager_2 = mock_data_manager(components_2) data_manager_2.consecutive_refinement_order = [["1"], ["2"]] pmg = ParameterManagerGenerator( [data_manager, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 multi_apm = apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) apm_1 = multi_apm.apm_list[0] assert "scale" in apm_1.components_list assert "decay" in apm_1.components_list assert "absorption" not in apm_1.components_list assert multi_apm.apm_list[1].components_list == ["1"] multi_apm = apms[1] assert isinstance(multi_apm, multi_active_parameter_manager) assert multi_apm.apm_list[0].components_list == ["absorption"] assert multi_apm.apm_list[1].components_list == ["2"] # Test multi dataset case with different number of cycles for each data_manager. components_2 = {"1": mock_component()} data_manager_2 = mock_data_manager(components_2) data_manager_2.consecutive_refinement_order = [["1"], ["2"]] pmg = ParameterManagerGenerator( [data_manager, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) assert pmg.param_lists[0] == [["scale", "decay"], ["absorption"]] assert pmg.param_lists[1] == [["1"]] apms = list(pmg.parameter_managers()) assert len(apms) == 2 multi_apm = apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) apm_1 = multi_apm.apm_list[0] assert "scale" in apm_1.components_list assert "decay" in apm_1.components_list assert "absorption" not in apm_1.components_list assert multi_apm.apm_list[1].components_list == ["1"] multi_apm = apms[1] assert isinstance(multi_apm, multi_active_parameter_manager) assert multi_apm.apm_list[0].components_list == ["absorption"] # Only change relative to previous test case. assert multi_apm.apm_list[1].components_list == [] # Test fixing the decay parameter. data_manager.fixed_components = ["decay"] pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" not in apm.components_list assert "absorption" not in apm.components_list apm = apms[1] assert isinstance(apm, multi_active_parameter_manager) assert "scale" not in apm.components_list assert "decay" not in apm.components_list assert "absorption" in apm.components_list
[ "def test_multi_apm():\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"scale\": mock_component(), \"decay\": mock_component()}\n\n multi_apm = multi_active_parameter_manager(\n Scal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the scalingspecific parameter manager.
def test_scaling_active_parameter_manager(): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(2)} scaling_apm = scaling_active_parameter_manager(components_2, ["1"]) assert list(scaling_apm.constant_g_values[0]) == list( components_2["2"].calculate_scales() ) assert len(scaling_apm.constant_g_values) == 1 assert scaling_apm.n_obs == [2] # Test that no constant_g_values if both components selected scaling_apm = scaling_active_parameter_manager(components_2, ["1", "2"]) assert scaling_apm.constant_g_values is None # Check that one can't initialise with an unequal number of reflections, # either within the selection or overall. with pytest.raises(AssertionError): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(1)} scaling_apm = scaling_active_parameter_manager(components_2, ["1", "2"]) with pytest.raises(AssertionError): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(1)} scaling_apm = scaling_active_parameter_manager(components_2, ["1"]) data_manager = mock_data_manager(components_2) pmg = ScalingParameterManagerGenerator( [data_manager], target=ScalingTarget(), mode="concurrent" ) assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))
[ "def test_get_measure_parameters(self):\n pass", "def test_parameters(self):\n self.assert_initialize_driver()\n #reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL)\n #self.assert_driver_parameters(reply, verify_sample_interval=True)", "def testingParameters(cal_file =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receive a request from the worker work_socket receive a request on this socket timeout if request isn't received by the timeout, raise six.moves.queue.Empty default = blocks forever This polls on both the worker and up_queue sockets and will throw an exception if there is anything available on the upqueue as this indicates that nothing is running.
def recv(self, work_socket, timeout=None): poller = zmq.Poller() poller.register(self.up_queue_recv_socket, zmq.POLLIN) poller.register(work_socket, zmq.POLLIN) for socket, state in poller.poll(timeout): if socket == self.up_queue_recv_socket and state == zmq.POLLIN: result, e = self.up_queue.get() if e is not None: raise e else: raise cellprofiler_core.pipeline.event.CancelledException( "Unexpected exit during recv" ) if socket == work_socket and state == zmq.POLLIN: return cellprofiler_core.utilities.zmq.communicable.Communicable.recv( work_socket ) raise six.moves.queue.Empty
[ "def handle_request(self):\n # Support people who used socket.settimeout() to escape\n # handle_request before self.timeout was available.\n timeout = self.socket.gettimeout()\n if timeout is None:\n timeout = self.timeout\n elif self.timeout is not None:\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Artificially set up the worker's work socket This sets self.aw.work_socket so that methods other than "run" can be tested in the worker.
def set_work_socket(self): self.analysis_id = uuid.uuid4().hex def do_set_work_socket(aw): aw.work_socket = cellprofiler_core.constants.worker.the_zmq_context.socket( zmq.REQ ) aw.work_socket.connect(self.work_addr) aw.work_request_address = self.work_addr aw.current_analysis_id = self.analysis_id self.awthread.execute(do_set_work_socket, self.awthread.aw)
[ "def setup(self) -> None:\n self.running = True\n self.listen()\n self.start_workers()\n\n # Send server socket to workers.\n assert self.socket is not None\n for work_queue in self.work_queues:\n work_queue[0].send(self.family)\n send_handle(work_queu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Announce the work address until we get some sort of a request
def send_announcement_get_work_request(self): self.analysis_id = uuid.uuid4().hex while True: self.announce_socket.send_json(((self.analysis_id, self.work_addr),)) try: return self.awthread.recv(self.work_socket, 250) except six.moves.queue.Empty: continue
[ "def announceWork(self):\n if self.queens:\n for q in self.queens:\n try:\n q.proxyAnnounceWork(self.name, self.cobraname, self.cobrad.port)\n except Exception as e:\n logger.warning('Queen Error: %s', e)\n\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the X window id of the window whose title matches regex `title_regex`
def get_window_id(title_regex): cmd = "wmctrl -l" logit(cmd) output = subprocess.check_output(cmd.split()).decode("utf-8").splitlines() logit(output) for line in output: w_id = line.split()[0] title = line.split(" ", 3)[3] if re.match(title_regex, title): return w_id raise Exception(f"Could not find window with title matching regex: {title_regex}")
[ "def find_window(title):\n return FindWindow(None, title)", "def _getWindowsByTitle(title, exact=False):\n matched = []\n windows = Quartz.CGWindowListCopyWindowInfo(Quartz.kCGWindowListExcludeDesktopElements | Quartz.kCGWindowListOptionOnScreenOnly, Quartz.kCGNullWindowID)\n for win in windows:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure we can't create a student user without academic_ fields.
def test_create_new_student_user_missing_field(self): data = { 'email': 'John@mailinator.com', 'password': 'test123!', } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED)
[ "def test_create_user_with_missing_attribute(self):\n pass", "def test_create_user_without_role(self):\n\n role = None\n self.user_data[\"role_id\"] = role\n\n with self.assertRaises(IntegrityError):\n User.objects.create_user(**self.user_data)", "def test_careers_invalid_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure we can't create a new user with an invalid phone number
def test_create_new_user_invalid_phone(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': '1fasd6dq#$%', 'phone': '12345', 'other_phone': '23445dfg', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { "phone": ['Invalid format.'], "other_phone": ['Invalid format.'] } self.assertEqual(json.loads(response.content), content)
[ "def test_new_user_invalid_phonenumber(self):\n phonenumber = None\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n 'TestFirstName',\n 'TestMiddleName',\n 'TestLastName',\n phonenumber,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure we can't list users without authentication.
def test_list_users_without_authenticate(self): response = self.client.get(reverse('user-list')) content = {"detail": "Authentication credentials were not provided."} self.assertEqual(json.loads(response.content), content) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
[ "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(respo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure we can't list users without permissions.
def test_list_users_without_permissions(self): self.client.force_authenticate(user=self.user) response = self.client.get(reverse('user-list')) content = { 'detail': 'You do not have permission to perform this action.' } self.assertEqual(json.loads(response.content), content) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
[ "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure we can send notification for membership end
def test_send_notification_end_membership(self): fixed_time = timezone.now() end_time_membership = fixed_time + relativedelta(days=28) self.user.membership = self.membership self.user.membership_end = end_time_membership self.user.save() with mock.patch( 'store.serializers.timezone.now', return_value=fixed_time ): response = self.client.get( reverse('user-execute-automatic-email-membership-end') ) content = { 'stop': False, 'email_send_count': 1 } self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( json.loads(response.content), content ) self.assertEqual(len(mail.outbox), 1) self.user.refresh_from_db() self.assertEqual(self.user.membership_end_notification, fixed_time) with mock.patch( 'store.serializers.timezone.now', return_value=fixed_time ): response = self.client.get( reverse('user-execute-automatic-email-membership-end') ) content = { 'stop': False, 'email_send_count': 0 } self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( json.loads(response.content), content ) # no new mail self.assertEqual(len(mail.outbox), 1)
[ "def test_admin_approval_complete_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure admin can credit tickets to a user
def test_credit_ticket_as_admin(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.admin) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_200_OK, ) self.assertEqual( User.objects.get(pk=user.id).tickets, 1 + nb_tickets_to_add )
[ "def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure user can't credit tickets to a user
def test_credit_ticket_as_user(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.user) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_403_FORBIDDEN, )
[ "def test_credit_ticket_negative_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = -5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure admin can't credit negative tickets to a user
def test_credit_ticket_negative_int(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = -5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.admin) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_400_BAD_REQUEST, )
[ "def test_credit_ticket_as_admin(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to generate the beta s for stimulation tests num_part1 + num_part2 = the number of beta s to be nonzero
def gen_beta(self,num_part1=54, num_part2=25,intercept=None): #intercept if intercept is not None: self.intercept = intercept #part1 num_p1_1 = num_part1 // 8 num_p2_2 = num_part1 - num_p1_1 sep_point = int(num_p2_2 * 0.8) tmp1 = np.sin([np.pi/(num_p2_2-6) * i for i in range(1,sep_point)]) * 3 tmp2 = np.ones(num_p1_1) * tmp1[-1] tmp3 = np.sin([np.pi/(num_p2_2-6) * i for i in range(sep_point,num_p2_2-3)]) * 3 tmp4 = np.linspace(tmp3[-1]+0.1,0,4,endpoint=False) part1 = np.concatenate([tmp1,tmp2,tmp3,tmp4]) #part2 num_p2_1 = num_part2 // 2 + 1 num_p2_2 = num_part2 - num_p2_1 slop = 5.5 / num_p2_1 tmp1 = np.array([slop*i for i in range(num_p2_1)]) - 0.2 tmp2 = np.array([5-slop*i for i in range(1,num_p2_2+1)]) - 0.2 part2 = np.concatenate([tmp1,tmp2]) self.beta[1:(len(part1)+1)] = part1 self.beta[(len(part1)+30):(len(part1)+30+len(part2))] = part2 return self.beta
[ "def evo_blanket(self,beta,alpha): \n evo_blanket = np.zeros(self.state_no)\n for i in range(evo_blanket.shape[0]):\n evo_blanket[i] = self.state_likelihood_markov_blanket(beta,alpha,i).sum()\n\n if self.dist in ['t']:\n evo_blanket = np.append([self.m_likelihood_markov_bl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a value in a nested associative structure, where `ks` is a sequence of keys. Returns `None`, if the key is not present, or the `default` value, if supplied.
def get_in(d, ks, default=None): *ks_, last = ks d_ = d for k in ks_: if type(d_) != dict or k not in d_: return default d_ = d_[k] if type(d_) == dict: return d_.get(last, default) return default
[ "def get(\n self, k: SeqStrType, default: Optional[T] = None\n ) -> Union[T, \"NestedDict[T]\"]:\n k = _flatten_index(k)\n\n if k not in self:\n if default is not None:\n return default\n else:\n raise KeyError(k)\n\n data_ptr = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associates a value in a nested associative structure, where `ks` is a sequence of keys and `v` is the new value, and returns a nested structure. If any levels do not exist, `dict`s will be created.
def assoc_in(d, ks, v): *ks_, last = ks d_ = d for k in ks_: if k not in d_: d_[k] = {} d_ = d_[k] d_[last] = v return d
[ "def createNestedDict(self, myDict, value, *path):\n for level in path[:-1]:\n myDict = myDict.setdefault(level, {})\n #for level -ends\n dict[path[-1]]=value\n return myDict", "def _set_nested(self, d, keys, value):\n if len(keys) > 1 and isinstance(d, dict):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a `middleware_name` with a right arrow if `_VERBOSE_MODE` is on.
def _print_inwards(middleware_name): if _VERBOSE_MODE: print('{}--->'.format(middleware_name))
[ "def _print_outwards(middleware_name):\n if _VERBOSE_MODE:\n print('<---{}'.format(middleware_name))", "def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a `middleware_name` with a left arrow if `_VERBOSE_MODE` is on.
def _print_outwards(middleware_name): if _VERBOSE_MODE: print('<---{}'.format(middleware_name))
[ "def _print_inwards(middleware_name):\n if _VERBOSE_MODE:\n print('{}--->'.format(middleware_name))", "def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is used to decorate generators with exactly two `yield` statements and turn them into middleware. For examples see documentation to this module and tests. Extra arguments beyond name are passed to the generator that is being decorated during instantiation. If they are not defined during interpretation of this module, then this function can be used as a regular callable and not as an annotation.
def middleware(name, *args, **kwargs): def new_annotate(g_fn): def new_middleware(handler): def new_handler(ctx): _print_inwards(name) g = g_fn(ctx, *args, **kwargs) changed_ctx = next(g) new_ctx = handler(changed_ctx) last_ctx = g.send(new_ctx) _print_outwards(name) return last_ctx return new_handler return new_middleware return new_annotate
[ "def consumer(func):\n\n from functools import wraps\n\n @wraps(func)\n def wrapper(*args,**kw):\n gen = func(*args, **kw)\n gen.next()\n return gen\n return wrapper", "def writer_wrapper_2(coroutine):\n yield from coroutine", "def with_outer(*args):\n def generator():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function layers `middleware` left to right around the `handler` and calls it all with `ctx` as an argument. Setting `verbose` to `True` prints when handlers start their before and after sections.
def wrap_and_call(ctx, handler, *middleware, verbose=False): global _VERBOSE_MODE _VERBOSE_MODE = verbose middleware_ = list(middleware) return compose(*reversed(middleware_))(handler)(ctx)
[ "def _print_inwards(middleware_name):\n if _VERBOSE_MODE:\n print('{}--->'.format(middleware_name))", "def _print_outwards(middleware_name):\n if _VERBOSE_MODE:\n print('<---{}'.format(middleware_name))", "def middleware(f):\n @wraps(f)\n def outer(*args):\n def inner(next_dispa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
converts kml files to open airspace files
def kml_2_open_airspace_and_json_format(self, full_path): # read file f = open(full_path,'r') kml = f.readlines() f.close() # find airspaces """Placemark > < name > Bremen - Blumenthal Thermikplatte < / name > < styleUrl > # inline10</styleUrl> < Polygon > < tessellate > 1 < / tessellate > < outerBoundaryIs > < LinearRing > < coordinates > 8.529121049900063, 53.19549566929423, 0 8.52324583919868, 53.21131939607898, 0 8.545439298799483, 53.23055800702935, 0 8.588991466114615, 53.23047069814625, 0 8.575289966189502, 53.20745451706468, 0 8.560633120477348, 53.19724609335408, 0 8.529121049900063, 53.19549566929423, 0 < / coordinates > < / LinearRing > < / outerBoundaryIs > < / Polygon > < / Placemark >""" container = [] idxLine = 0 did_not_pass_main_folder = True list_of_airspace_types_included = [] while idxLine < len(kml): #print(kml[idxLine]) #if '<Folder>' in kml[idxLine] and did_not_pass_main_folder: # # we have to jump over the first folder # print(f'Reading everything inside folder: {kml[idxLine]}') # did_not_pass_main_folder = False if '<Folder>' in kml[idxLine]: # begin of airspace as_type = kml[idxLine+1].replace('\t','').replace('<name>','').replace('</name>\n','') # <name>B</name> print('Reading AS-types: ' + as_type) list_of_airspace_types_included.append(as_type) #if not (as_type == 'A' or as_type == 'B'): # print('#### Check Folder / Airspace Types, must be "A" or "B" and try again (current %s)' % as_type) # msgbox('Check Folder / Airspace Types, are not "A" or "B" (current %s). Airspace E will be used for export.' % as_type) # as_type = 'E' if '<Placemark' in kml[idxLine]: # begin of airspace container = [] if '</Placemark' in kml[idxLine]: # end of airspace # make sure only Polygons are stored for as_line in container: if '<Polygon>' in as_line: idx_lookAt_start = None for idx, line_of_container in enumerate(container): if "<LookAt>" in line_of_container: idx_lookAt_start = idx if "</LookAt>" in line_of_container: idx_lookAt_end = idx # Remove lookAt lines if necessary if idx_lookAt_start: container = container[0:idx_lookAt_start] + container[idx_lookAt_end+1::] # cut out look at part # append airspace to airspace list as airspace class self.airspaces.append(Airspace(lines=container, file_type='kml', as_type=as_type)) container.append(kml[idxLine]) idxLine += 1 print('Loaded %d airspaces from KML-file (%s)' %(len(self.airspaces),full_path)) # summary outlines = ['* KML conversion file, rename this line'] json_dict = {"circles": [], "polygons": []} for airspace in self.airspaces: # prepare open-airspace formate outlines.append('\n\n') # separate airspaces outlines.extend(airspace.txt_lines) # prepare json json_dict['polygons'].append(airspace.json_dict) # write open airspace format target_path = full_path[:-4] + '_converted.txt' # uisave dialog target_path = filesavebox(default=target_path, filetypes="*.txt") if target_path is None: print('Airspace conversion was aborted by the user') quit() f = open(target_path,'w') f.writelines(outlines) f.close() print('Result was written to: %s' % target_path) # write json: target_path_json = target_path[:-4] + '.json' json_string = json.dumps(json_dict) json_file = open(target_path_json, "w") json_file.write(json_string) json_file.close() # write list of airspace files for index.html for leaflet map print('The following airspace types have been converted:') print(list_of_airspace_types_included)
[ "def keyholemarkup2x(file,output='df'):\n r = re.compile(r'(?<=\\.)km+[lz]?',re.I)\n try:\n extension = r.search(file).group(0) #(re.findall(r'(?<=\\.)[\\w]+',file))[-1]\n \n \n except IOError as e:\n logging.error(\"I/O error {0}\".format(e))\n if (extension.lower()=='kml') is T...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert to open airspace format
def make_open_airspace_format(self): # Extract coordinates from KML for idxline in range(len(self.kml_lines)): if '<name>' in self.kml_lines[idxline]: self.name = self.kml_lines[idxline].replace('\t', '').replace('<name>', '').replace('</name>', '').replace('\n','') if not self.name.startswith('TS'): self.name = 'TS_' + self.name print('Type: %s | Name: %s' % (self.as_type, self.name)) if '<coordinates>' in self.kml_lines[idxline]: self.coordinates_kml = self.kml_lines[idxline + 1].replace('\t', '').replace('\n', '') break # start conversion to airspace format """ AC A AN TS_Erzgeb AL FL98 AH FL99 DP 50:26:22 N 012:17:59 E DP 50:25:25 N 012:18:26 E DP 50:24:40 N 012:19:01 E DP 50:24:06 N 012:19:46 E""" # AC A self.txt_lines.append('AC %s\n' % self.as_type) # AN TS_Erzgeb self.txt_lines.append('AN %s\n' % self.name) # heights self.txt_lines.append('AL FL98\n') self.txt_lines.append('AH FL99\n') # coordinates for coo_pt in self.coordinates_kml.split(' ')[:-1]: # Target format: DP 50:26:22 N 012:17:59 E lat_long = coo_pt.split(',') # latitude latDecAsStr = lat_long[1].split('.') #if '.' not in latDecAsStr: # take care of case "51" instead of "51.123456" # latDecAsStr += '.000000' lat_degree = abs(int(latDecAsStr[0])) #print(f'latDecAsStr {latDecAsStr}') if len(latDecAsStr)==1: latDecAsStr.append('0') lat_secondDec = (float('0.' + latDecAsStr[1])*60) % 1 lat_minute = round((float('0.' + latDecAsStr[1])*60) - lat_secondDec) lat_second = round(lat_secondDec*60) cooString = ('DP %02d:%02d:%02d' %(lat_degree,lat_minute,lat_second)) if latDecAsStr[0].startswith('-'): cooString += ' S' else: cooString += ' N' # longitude #print(f'converting lat_long {lat_long}') # take care of case: no decimal sign included, case "11" instead of "11.123456" if '.' not in lat_long[0]: lat_long[0] += '.0' lonDecAsStr = lat_long[0].split('.') lon_degree = abs(int(lonDecAsStr[0])) lon_secondDec = (float('0.' + lonDecAsStr[1]) * 60) % 1 lon_minute = round((float('0.' + lonDecAsStr[1]) * 60) - lon_secondDec) lon_second = round(lon_secondDec * 60) cooString += (' %03d:%02d:%02d' % (lon_degree, lon_minute, lon_second)) if lonDecAsStr[0].startswith('-'): cooString += ' W' else: cooString += ' E' cooString += '\n' self.txt_lines.append(cooString)
[ "def kml_2_open_airspace_and_json_format(self, full_path):\n # read file\n f = open(full_path,'r')\n kml = f.readlines()\n f.close()\n # find airspaces\n \"\"\"Placemark >\n < name > Bremen - Blumenthal\n Thermikplatte < / name >\n < styleUrl > # inlin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
uses template in order to make kml format
def make_kml_format(self,kml_template): if self.as_type == 'A': self.kml_lines = kml_template['good_subdivided']['placemark'] elif self.as_type == 'B': self.kml_lines = kml_template['bad_subdivided']['placemark'] else: print('Unknown airspace type') # get idx of name and coordinates idxLine = 0 while idxLine < len(self.kml_lines): #print(self.kml_lines[idxLine] if self.kml_lines[idxLine].startswith('\t\t\t\t<name>'): # begin of airspace idx_name = idxLine if '\t\t\t\t\t\t\t<coordinates>\n' in self.kml_lines[idxLine]: # begin of airspace idx_coordinates = idxLine+1 idxLine += 1 # transform coordinates # add all coordinates: Format is: # source: 'DP 50:26:22 N 012:17:59 E\n' # target: 9.025830271397426,53.46493577242719,0 8.986157446488383,53.46952117358134,0 coo_list = [] # collect list of coorinates as strings for line in self.txt_lines: if line.startswith('AN'): self.name = line[3:].replace('\n','') self.kml_lines[idx_name] = '\t\t\t\t<name>%s</name>\n' % self.name if line.startswith('DP'): # lon lon_deg = float(line[14:17]) lon_min = float(line[18:20]) lon_sec = float(line[21:23]) lon_dec = (lon_sec / 60 + lon_min) / 60 + lon_deg if line[24] == 'W': lon_dec *= -1 # negative if west # lat lat_deg = float(line[3:5]) lat_min = float(line[6:8]) lat_sec = float(line[9:11]) lat_dec = (lat_sec / 60 + lat_min) / 60 + lat_deg if line[12] == 'S': lat_dec *= -1 # negative if west # attach coordinates coo_list.append('%1.16f,%1.16f,0 ' % (lon_dec,lat_dec)) # store for later plotting self.lat_dec.append(lat_dec) self.lon_dec.append(lon_dec) # make sure that shape is closed --> first an last point must be the same if coo_list[0] != coo_list[-1]: coo_list.append(coo_list[0]) self.lat_dec.append(self.lat_dec[0]) self.lon_dec.append(self.lon_dec[0]) # write coordinate strings into kml self.kml_lines[idx_coordinates] = '\t\t\t\t\t\t\t\t' # is prefix. Coordinates to be added as string below for pt in coo_list: self.kml_lines[idx_coordinates] += pt print('Converted airspace %s' % self.name)
[ "def generate_document_kml(self, title, content):\n return \"\"\"\\\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<kml xmlns=\"http://earth.google.com/kml/2.1\">\n <Document>\n <name>%s</name>\n <description></description>\n <Style>\n <ListStyle id=\"hideChildren\">\n <listItemType>checkHideCh...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterID2ID2ID2ID2_Superclass Create a new object of the class itkTernaryAddImageFilterID2ID2ID2ID2_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIUL2IUL2IUL2IUL2_Superclass.__New_orig__()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterID2ID2ID2ID2_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterID2ID2ID2ID2_Superclass
def itkTernaryAddImageFilterID2ID2ID2ID2_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID2ID2ID2ID2_Superclass_cast(*args)
[ "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def itkNotImageFilterIUS2IUS2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIUS2IUS2_Superclass *\":\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterID3ID3ID3ID3_Superclass Create a new object of the class itkTernaryAddImageFilterID3ID3ID3ID3_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterID3ID3ID3_Superclass.__New_orig__()\n import itkT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterID3ID3ID3ID3_Superclass
def itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterID3ID3ID3ID3_Superclass_cast(*args)
[ "def itkNotImageFilterIF3IF3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF3IF3_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF3IF3_Superclass_cast(obj)", "def itkNotImageFilterISS3ISS3_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterISS3ISS3_Superclass *\":\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass Create a new object of the class itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID2ID2ID2ID2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF2IF2IF2_Superclass.__New_orig__()\n import itkT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass_cast(itkLightObject obj) > itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass
def itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass_cast(*args): return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF2IF2IF2IF2_Superclass_cast(*args)
[ "def itkNotImageFilterIF2IF2_Superclass_cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPython.itkNotImageFilterIF2IF2_Superclass_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkNotImageFilterIF2IF2_Superclass *\":\n return _itkNotImageFilterPytho...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New() > itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass Create a new object of the class itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass and set the input and the parameters if some named or nonnamed arguments are passed to that method. New() tries to assign all the non named parameters to the input of the new objects the first non named parameter in the first input, etc. The named parameters are used by calling the method with the same name prefixed by 'Set'.
def New(*args, **kargs): obj = itkTernaryAddImageFilterIF3IF3IF3IF3_Superclass.__New_orig__() import itkTemplate itkTemplate.New(obj, *args, **kargs) return obj
[ "def New(*args, **kargs):\n obj = itkTernaryAddImageFilterID3ID3ID3ID3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkAddImageFilterIF3IF3IF3_Superclass.__New_orig__()\n import itkT...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }