query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Downloads file from website.
def download_file(url): downloaded_file = requests.get(url) return downloaded_file
[ "def download(url, file_name):\n with open(file_name, \"wb\") as file:\n response = get(url)\n file.write(response.content)", "def download_file(self, url, filename):\n with open(filename, 'wb') as f:\n f.write(self.get_read(url))", "def download(url, filename):\n response ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rename source file to destination filename; replace destination file.
def rename_and_overwrite_file(source_filename, destination_filename): os.replace(source_filename, destination_filename)
[ "def rename_file(source, dest):\r\n os.rename(source, dest)\r\n __remove_pyc_pyo(source)", "def Rename(src, dst):\n os.rename(src, dst)", "def rename_file(self, file_id, name):\n pass", "def rename(src, dst):\n if sys.platform == \"win32\":\n return win32_rename(src, dst)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Groups data by selected criteria (cf. groupby method).
def group_data_by_selection(dataframe, grouping_selection_list): grouped_data = dataframe.groupby(grouping_selection_list) return grouped_data
[ "def grouping_attributes(self, grouping_info, data):\n print(\"************* start grouping some attributes **************\")\n for grouping in grouping_info:\n attributes = grouping['attributes']\n new_attr = grouping['grouped_name']\n\n # group attribute values into ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print all asyncio tasks continuosly in debug mode.
def print_tasks(): while True: yield from asyncio.sleep(10) for task in asyncio.Task.all_tasks(): if task.done(): exception = task.exception() if exception is None: logger.info("Task DONE: %s = %s", task, task.result()) else: logger.error("Task FAILED: %s = %s", task, exception) else: logger.debug("Tasks RUNNING: %s", task)
[ "async def debug_task(self, ctx, memory_id: hex_value):\n task = object_at(memory_id)\n if task is None or not isinstance(task, asyncio.Task):\n return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')\n\n if ctx.invoked_with == 'cancel_task':\n task.cance...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record pairs generated by Unify's binning model. Pairs are displayed on the "Pairs" page in the Unify UI.
def pairs(self): alias = self.api_path + "/recordPairs" return Dataset(self.client, None, alias)
[ "def fixed_pairs(\n self,\n ) -> Tuple[\n List[Tuple[str, str, Union[int, float]]],\n List[Tuple[str, str, Union[int, float]]],\n List[Tuple[str, str, Union[int, float]]],\n ]:\n assert (\n self.train_pairs is not None and self.test_pairs is not None\n ), \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Machine learning model for pairmatching for this Mastering project. Learns from verified labels and predicts categorization labels for unlabeled pairs.
def pair_matching_model(self): alias = self.api_path + "/recordPairsWithPredictions/model" return MachineLearningModel(self.client, None, alias)
[ "def predict_NN(self):\r\n data = self.data_train1\r\n labels = self.labels_train\r\n data_test = self.data_test1\r\n labels_test = self.labels_test\r\n \r\n model = MLPClassifier()\r\n model.fit(data, labels.iloc[:,0])\r\n prediction = model.predict(data_test...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record Clusters as a dataset. Unify clusters labeled pairs using pairs model. These clusters populate the cluster review page and get transient cluster ids, rather than published cluster ids (i.e., "Permanent Ids")
def record_clusters(self): alias = self.api_path + "/recordClusters" return Dataset(self.client, None, alias)
[ "def _cluster(self):\n self.kmeans = KMeans(n_clusters=self.cluster_num).fit(self.vectors)\n self.k_books = pd.DataFrame(list(zip(list(self.kmeans.labels_),\n list(self.reviews.index))),\n columns=['k_label', 'book_id'])", "def re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns pair estimate information for a mastering project
def estimate_pairs(self): alias = self.api_path + "/estimatedPairCounts" estimate_json = self.client.get(alias).successful().json() info = EstimatedPairCounts.from_json(self.client, estimate_json, api_path=alias) return info
[ "def _optimNodePairs(self):\n trialKtauSum = np.zeros(len(self.tree.nodes()))\n trialPairings = []\n # Generate all possible node pairings\n for i, rootNodeID in enumerate(self.tree.nodes()):\n trialPairings.append([])\n for nodeID in self.tree.nodes():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Project's unified dataset with associated clusters.
def record_clusters_with_data(self): unified_dataset = self.unified_dataset() # Replace this workaround with a direct API call once API # is fixed. APIs that need to work are: fetching the dataset and # being able to call refresh on resulting dataset. Until then, we grab # the dataset by constructing its name from the corresponding Unified Dataset's name name = unified_dataset.name + "_dedup_clusters_with_data" return self.client.datasets.by_name(name) # super.__repr__ is sufficient
[ "def published_clusters_with_data(self):\n\n unified_dataset = self.unified_dataset()\n name = unified_dataset.name + \"_dedup_published_clusters_with_data\"\n return self.client.datasets.by_name(name)", "def get_dataset(self, user, name):\n\n return super().get_repo(\"dataset\", user,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Project's unified dataset with associated clusters.
def published_clusters_with_data(self): unified_dataset = self.unified_dataset() name = unified_dataset.name + "_dedup_published_clusters_with_data" return self.client.datasets.by_name(name)
[ "def record_clusters_with_data(self):\n unified_dataset = self.unified_dataset()\n\n # Replace this workaround with a direct API call once API\n # is fixed. APIs that need to work are: fetching the dataset and\n # being able to call refresh on resulting dataset. Until then, we grab\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Binning model for this project.
def binning_model(self): alias = self.api_path + "/binningModel" # Cannot get this resource and so we hard code resource_json = {"relativeId": alias} return BinningModel.from_json(self.client, resource_json, alias)
[ "def model(self) -> Path:\n return self.path.parent / f\"{self.path.stem}.bin\"", "def build_model(self):\n ...", "def convert_to_binary(self):\n convert_to_binary_params = copy(self.base_params)\n convert_to_binary_params.update(\n {\n \"app\": \"ConvertToB...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tests self.root of the ktree
def test_ktree_root(ktree_four_nodes): assert ktree_four_nodes.root.val == 1
[ "def test_ktree_empty_root(ktree_empty):\n assert ktree_empty.root == None", "def test_root_node_on_init(five_bst):\n assert five_bst.root.val == 5", "def test_root_tree_manually(): # ***Incomplete test\n ##########################\n # Arrange.\n t = \"t\"\n tx = \"tx\"\n\n ###############...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tests for self.root on passed empty ktree
def test_ktree_empty_root(ktree_empty): assert ktree_empty.root == None
[ "def empty_ktree():\n return KTree()", "def test_root_node_none():\n bst = BinarySearchTree()\n assert not bst.root", "def test_empty(test_empty_tree):\n assert find(test_empty_tree) == False", "def test_bst_initialized(bst_empty):\n assert bst_empty.root is None", "def test_tree_is_empty(emp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default request keyword arguments to be passed to the requests library.
def _default_request_kwargs(self): default_kwargs = { 'headers': { 'Content-Type': 'application/json' }, 'hooks': { 'response': self._verify_auth } } return default_kwargs
[ "def _default_request_kwargs(self):\n default_kwargs = {\n 'headers': {\n 'Content-Type': 'application/json'\n },\n 'hooks': {}\n }\n return default_kwargs", "def __init__(self, **requests_kwargs):\n self.requests_kwargs = requests_kwargs...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the data from a ``requests.Response`` object.
def _get_response_data(self, response): try: _json = response.json() data = _json.get('data') return data except ValueError as ex: log.exception(ex) return None
[ "def __get_response(self):\n\n response = requests.get(self.URL)\n data = response.text\n parsed_data = BeautifulSoup(data, \"html.parser\")\n return parsed_data", "def extract_content(self, response):\n content = response.content\n return content", "def get_response_bo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A callback handler to verify that the given response object did not receive a 401.
def _verify_auth(self, resp, *args, **kwargs): if resp.status_code == 401: raise errors.AuthFailure( 'Received response code 401 from {} {}.' .format(resp.request.method, resp.request.path_url) )
[ "def assertResponseUnauthorized(self, response):\n self.assertResponseCodeEquals(response, status.HTTP_401_UNAUTHORIZED)", "def unauthorized_handler(self, callback):\n self.unauthorized_callback = callback\n return callback", "def unauthorized():\n return make_response(jsonify({'error': ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default request keyword arguments to be passed to the requests library.
def _default_request_kwargs(self): default_kwargs = { 'headers': { 'Content-Type': 'application/json' }, 'hooks': {} } return default_kwargs
[ "def _default_request_kwargs(self):\n default_kwargs = {\n 'headers': {\n 'Content-Type': 'application/json'\n },\n 'hooks': {\n 'response': self._verify_auth\n }\n }\n return default_kwargs", "def __init__(self, **requ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adjust Amazon Web Services and/or Rackspace Acl Sync feature for this instance.
def acl_sync(self, aws_sync=None, rackspace_sync=None): url = self._url + 'acl_sync' data = {"aws_acl_sync_enabled": False, "rackspace_acl_sync_enabled": False} # Let's get current status of acl sync for this intance to set proper defaults. response = requests.get(url, **self._instances._default_request_kwargs) if response.status_code == 200: resp_json = response.json() current_status = resp_json.get('data', {}) current_aws_sync_status = current_status.get("aws_acl_sync_enabled", False) current_rax_sync_status = current_status.get("rackspace_acl_sync_enabled", False) data.update({ "aws_acl_sync_enabled": current_aws_sync_status, "rackspace_acl_sync_enabled": current_rax_sync_status }) if aws_sync is not None: data.update({"aws_acl_sync_enabled": aws_sync}) if rackspace_sync is not None: data.update({"rackspace_acl_sync_enabled": rackspace_sync}) response = requests.put(url, json=data, **self._instances._default_request_kwargs) return response.json() else: raise errors.ObjectRocketException( "Couldn't get current status of instance, failing. Error: {}".format(response.text) )
[ "def update_autoload(self):\n if self.autoload_enabled and not self.get_current_configuration()[\"host_connectivity_reporting_enabled\"]:\n try:\n rc, host_connectivity_reporting = self.request(\"storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true\" % se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The service this instance provides.
def service(self): return self._service
[ "def service(self):\n return self[0]", "def service_constructor(self):\n raise NotImplementedError", "def getHttpService(self):\n # type: () -> IHttpService", "def service(self) -> \"IngressServiceBackend\":\n return typing.cast(\n \"IngressServiceBackend\",\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The settings on this instance's service.
def settings(self): return self._settings
[ "def settings(self) -> Settings:\n return self.injector.settings", "def get_settings():\n return settings", "def settings(self) -> SettingsManager:\n return self._settings", "def system_settings(self):\n return self._system_settings", "def settings(self) -> pulumi.Input['Exchange...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The document used to construct this Instance object.
def _instance_document(self): return self.__instance_document
[ "def create_document_(self, init_dict = None):\n if init_dict is None:\n initV = {}\n else:\n initV = init_dict\n\n return self.document_class(self, initV)", "def newDocument():\n return Document(HopperLowLevel.newDocument())", "def __init__(self, document):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The service specific URL of this instance object.
def _service_url(self): return self._client._url + '{}/{}/'.format(self.service, self.name)
[ "def get_service_url():\n return config.SERVICE_URL", "def resource_url(self):\n return \"/services/\" + self.slug", "def service_endpoint(self) -> str:\n pass", "def endpointurl(self):\n return self._endpointurl", "def api_url(self):\n return f\"{self.instance_url}/api/0/\"",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register any extensions under the given namespace.
def _register_extensions(self, namespace): # Register any extension classes for this class. extmanager = ExtensionManager( 'extensions.classes.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_class, base=self) # Register any extension methods for this class. extmanager = ExtensionManager( 'extensions.methods.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_method, base=self)
[ "def register_extensions(app):\n bcrypt.init_app(app)\n ma.init_app(app)\n socketio.init_app(app)", "def register_extensions(self, extensions=[]):\n try:\n for extension, config in self.config['extensions'].items():\n\n # extension module base string\n ext_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all ACLs for this instance.
def all(self): return self._instance._client.acls.all(self._instance.name)
[ "def get_acls(self):\n return self.access_list_manager.get_objects()", "def acl(self):\n # type: () -> list[AclEntry]\n return self._acl", "def getACLs(self, account):\n return EquipmentACLInfo.getRulesForEquipment(account, self)", "def get_network_acls(self):\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the ACL specified by ID belonging to this instance.
def get(self, acl): return self._instance._client.acls.get(self._instance.name, acl)
[ "def get_network_acl_by_id(self, id):\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}?version={}&generation={}\".format(\n id, self.cfg[\"version\"], self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The SHA1 of the file.
def sha1(self): filehash = sha1() with open(self.path, 'rb') as infile: while True: data = infile.read(BUF_SIZE) if not data: break filehash.update(data) return filehash.hexdigest()
[ "def sha1(self):\n return hashlib.sha1(self.get_bytes()).hexdigest()", "def get_hash_sha1(self):\n\n if sha1 is not None:\n return sha1( self.get_data() ).hexdigest()", "def SHA1Sum(klass, filename):\n return hashlib.sha1(path(filename).text()).hexdigest()[:8]", "def sha1_file(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Full crawl within the omniglot domain.
def crawl_omniglot(outputdir): homepage = urllib2.urlopen(OMNIGLOT).read() crawled = [] for i in re.findall(AHREF_REGEX,homepage): if not i.startswith("http://") and not i.endswith("/") and \ not i.startswith('https://'): if OMNIGLOT+i not in crawled: print OMNIGLOT+i x = urllib2.urlopen(OMNIGLOT+i).read() filename = (OMNIGLOT+i).rpartition('/')[2] print filename print>>codecs.open(outputdir+filename,'w','utf8'), x time.sleep(random.randrange(5,10)) crawled.append(OMNIGLOT+i)
[ "def crawl(self):\n counter = 0\n to_visit = [self.base_url]\n while counter != self.max_links:\n if to_visit[0] in self.visited_pages:\n to_visit.pop(0)\n \n else:\n w = WebPage(to_visit[0])\n for item in list(w....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of linked pages from Omniglot's numbers page.
def get_num_pages(): NUMBERS = "http://www.omniglot.com/language/numbers/" num = urllib2.urlopen(MULTILING_URLS['num']).read() return list(set([NUMBERS+str(re.findall(AHREF_REGEX,str(i))[0]) \ for i in bs(num).findAll('dd')]))
[ "def pageNumbers(self):\n\t pages = self.listFolderContents(contentFilter={\"portal_type\":\"Broadsheet\"})\n\t linkPages = []\n\t for page in pages:\n\t\tlinkPages.append(page.pageNumber())\n\n\t return linkPages", "def _wikipedia_Page_linkedPages(self):\n return [page for page in toolserver.Gener...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of linked pages from Omniglot's babel page.
def get_babel_pages(): BABEL = "http://www.omniglot.com/babel/" babel = urllib2.urlopen(MULTILING_URLS['babel']).read() return [(unicode(lang.text), BABEL+lang.get('href')) for lang in \ bs(unicode(bs(babel).findAll('ol')[0])).findAll('a')]
[ "def _wikipedia_Page_linkedPages(self):\n return [page for page in toolserver.Generators.getPagelinks(self)]", "def crawl_babel_pages(outputdir=DATADIR+\"omniglot/babel/\"):\n babel = get_babel_pages()\n # Creates output directory if it doesn't exist.\n if not os.path.exists(outputdir):\n os.makedirs(out...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawls Omniglot for babel stories pages and save in outputdir.
def crawl_babel_pages(outputdir=DATADIR+"omniglot/babel/"): babel = get_babel_pages() # Creates output directory if it doesn't exist. if not os.path.exists(outputdir): os.makedirs(outputdir) for lang, page in babel: html = urllib2.urlopen(page).read() if outputdir != None: with codecs.open(outputdir,'w','utf8') as fout: print>>fout, html time.sleep(random.randrange(5,10))
[ "def crawl_omniglot(outputdir):\n homepage = urllib2.urlopen(OMNIGLOT).read()\n crawled = []\n \n for i in re.findall(AHREF_REGEX,homepage): \n if not i.startswith(\"http://\") and not i.endswith(\"/\") and \\\n not i.startswith('https://'): \n if OMNIGLOT+i not in crawled:\n print OMNIGLOT+i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yield source and tranlsation sentences from the clean Omniglot tarball.
def phrases(intarfile=parentddir+'/data/omniglot/omniglotphrases.tar', \ onlysource=False): for infile in read_tarfile(intarfile): language = infile.split('/')[-1].split('-')[1].split('.')[0].split('_')[0] with codecs.open(infile,'r','utf8') as fin: for line in fin.readlines(): sentence, translation = line.strip().split('\t') if onlysource and sentence: yield language, sentence.strip() else: yield language, sentence, translation
[ "def iter_sentences(self):\n _, filename_tail = split(self.filename)\n filename_base, _ = splitext(splitext(filename_tail)[0])\n with tarfile.open(self.filename, \"r:gz\") as tar:\n sentence_filename = join(filename_base, filename_base +\n '-senten...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of languages available from original data source.
def num_languages(): return len(languages())
[ "def available_languages(self):\r\n return Language.objects.filter(\r\n id__in=RLStats.objects.by_resource(\r\n self\r\n ).order_by().values('language').query\r\n )", "def get_country_count():\r\n\r\n lines = country_pop.split('\\n')\r\n return len(lines)-1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse data. Returns list of dicts with prices for each day in downloaded period.
def parse_data(self, initial_rates: List[bs4.element.Tag]) -> List[Dict[str, str]]: rates = [] for rate in initial_rates: day_prices = {} for value, column in zip(rate.find_all("td"), COLUMN_TITLES): text = value.text.split("$")[1] if value.text.startswith("$") else value.text if column == "date": day_prices[column] = self.convert_date(text) else: day_prices[column] = text.replace(",", "") rates.append(day_prices) return rates
[ "def _collect_price_time_series(self):\n r = requests.get(self.GRAPH_URL)\n #dictionary of 2 dictionaries, \"daily\" and \"average\"\n response = r.json()\n daily_series = TimeSeries.from_dictionary(response[\"daily\"])\n average_series = TimeSeries.from_dictionary(response[\"aver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a devicegroup and optionally add devices to it
def create_device_group(self, devicegroup, devices=None): self._logger.debug("Create device-group: %s" % (devicegroup,)) if devices is not None: self.set_device_group(devicegroup, devices, exclusive=True) else: self.xapi.set(pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']" % (devicegroup,))
[ "def test_create_services_device_groups_device_group_by_device_group_name(self):\n pass", "def add_to_group(self, device_group_id):\n\n # Conditionally setup the message body, fields which have not been set will not be sent to the API.\n # This avoids null fields being rejected and allows the...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For Panorama, set the device group for a device
def set_device_group(self, devicegroup, devices, exclusive=False): # TODO: Implement 'exclusive' self._logger.debug("Set device-group to '%s'" % (devicegroup)) if issubclass(devices.__class__, pandevice.base.PanDevice): devices = [devices] device_refresh_needed = False for device in devices: if device.serial is None or device.devicegroup is None: device_refresh_needed = True break if device_refresh_needed: self.refresh_devices_from_panorama(devices) # All devices have serial numbers now, so start setting devicegroup for device in devices: # If the device was in a group, and that group changed, pull it out of the current group if device.devicegroup != devicegroup and \ device.devicegroup is not None: self._logger.debug("Moving device %s out of device-group %s" % (device.hostname, device.devicegroup)) self.set_config_changed() self.xapi.delete( pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']/devices" "/entry[@name='%s']" % (device.devicegroup, device.serial) ) device.devicegroup = None # If assigning device to a new group if devicegroup is not None: self.set_config_changed() self._logger.debug("Moving device %s into device-group %s" % (device.hostname, devicegroup)) self.xapi.set( pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']/devices" % (devicegroup,), "<entry name='%s'/>" % (device.serial,) ) device.devicegroup = devicegroup
[ "def test_update_device_group(self):\n pass", "def set_group(self, group: t.Optional[jank.graphics.Group]):", "def setGroups(self): \n deviceCounterFile = pyscript.app_config['devive_counter_file']\n deviceCounterMap = readYaml(deviceCounterFile) \n #Set key suffix (in order to write...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that 'rm q' outputs no progress indications.
def test_rm_quiet(self): bucket_uri = self.CreateBucket() key_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo') stderr = self.RunGsUtil(['-q', 'rm', suri(key_uri)], return_stderr=True) self.assertEqual(stderr.count('Removing '), 0)
[ "def test_qual_del(self):\n self.check_fails(\"Quality/error_qual_del.fastq\", 3)\n self.check_general_passes(\"Quality/error_qual_del.fastq\", 5)", "def clear_test_result(self, test):", "def __del__(self):\n # Check first that project Q garbage collector not already removed qubits\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ask_question method with password.
def test_ask_question__password(self, _): input_value = self.user_manager.ask_question('field', password=True) self.assertEqual(input_value, 'password')
[ "def test_prompting(self):\n pass", "def vqa_prompt(self, question, answer=None) -> str:", "def test_check_guess(self):\n question = Question(\"Test\", \"correct\", [\"correct\", \"incorrect\"])\n self.assertTrue(question.check_guess(\"correct\"))\n self.assertFalse(question.check_gu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup and run one demo controller for Vic
def runDemo(vcfile, remote=5621, expire=0.0): secrets = [ 'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY' 'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw', 'AKuYMe09COczwf2nIoD5AE119n7GLFOVFlNLxZcKuswc', 'A1-QxDkso9-MR1A8rZz_Naw6fgaAtayda8hrbkRVVu1E', 'Alntkt3u6dDgiQxTATr01dy8M72uuaZEf9eTdM-70Gk8', 'AcwFTk-wgk3ZT2buPRIbK-zxgPx-TKbaegQvPEivN90Y', 'A6zz7M08-HQSFq92sJ8KJOT2cZ47x7pXFQLPB0pckB3Q', 'ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc', ] doers = setupController(secrets=secrets, remotePort=remote, indirect=True, vcfile=vcfile) directing.runController(doers=doers, expire=expire)
[ "def test_demo(self):\n self.cbct.run_demo(show=False)", "def test_demo(self):\n self.run_in_dir(\"/experiments/demo/**/\", start_idx=0)", "def main():\n pv_simulator = PVSimulator()\n pv_simulator.consume()", "def run(self):\n segments = self.controller.split('.')\n controll...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints an 'instaterecord' (input state record) the state of input at a given level of the input stack.
def printInStateRecord(x): # print(x) assert type(x) == gdb.Value assert str(x.type) == 'instaterecord' assert x.type == gdb.lookup_type('instaterecord') t = x.type assert t.name == 'instaterecord' assert t.code == gdb.TYPE_CODE_TYPEDEF field_names = ['statefield', 'indexfield', 'startfield', 'locfield', 'limitfield', 'namefield'] assert [field.name for field in t.fields()] == field_names # for field in t.fields(): # value = x[field.name] # # print(field.name, value, field.type, int(value)) # print(field.name, int(value)) TOKEN_LIST = 0 MID_LINE = 1 SKIP_BLANKS = 17 NEW_LINE = 33 state = int(x['statefield']) assert state in {TOKEN_LIST, MID_LINE, SKIP_BLANKS, NEW_LINE}, 'Unexpected state: %s' % state if state == TOKEN_LIST: startNode = x['startfield'] tokenList = showTokenList(startNode if x['indexfield'] < MACRO else link(startNode), x['locfield']) return { # See §307 for an explanation of these 'statefield': int(x['statefield']), # is just going to be 0, indicating this is a token list 'indexfield': int(x['indexfield']), # tokenListType 'startfield': int(x['startfield']), # startNode 'locfield': int(x['locfield']), # currentNodeLoc 'limitfield': int(x['limitfield']), # where params start, if MACRO 'namefield': int(x['namefield']), # where in eqtb, if MACRO 'tokens': tokenList, # the actual tokens in the token list! } else: start = int(x['startfield']) limit = int(x['limitfield']) buffer_slice = [int(gdb.parse_and_eval('buffer[%d]' % i)) for i in range(start, limit + 1)] return { # See §303 for an explanation of these 'statefield': int(x['statefield']), # Scanner state 'indexfield': int(x['indexfield']), # Index (files open depth) 'startfield': int(x['startfield']), # where current line starts in buffer 'locfield': int(x['locfield']), # next char to read in buffer (or > limit meaning buffer is read) 'limitfield': int(x['limitfield']), # where current line ends in buffer 'namefield': int(x['namefield']), # file name 'filename': gettexstring(x['namefield']), 'buffertext': buffer_slice, }
[ "def __repr__(self, level = 0, visited = []):\n\t\toffset = \" \" * level\n\t\trep = offset + str(id(self)) + \"\\n\"\n\n\t\tif id(self) not in visited:\n\t\t\t# We add the id of the state to the list of visited states.\n\t\t\tvisited.append(id(self))\n\t\t\tfor (value, state) in self.connections:\n\t\t\t\t# The i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that the edge descriptors do not reference nonexistent vertices or columns.
def validate_edge_descriptors(vertex_name_to_table, direct_edges): for edge_name, direct_edge_descriptor in six.iteritems(direct_edges): for vertex_name, column_name in ((direct_edge_descriptor.from_vertex, direct_edge_descriptor.from_column), (direct_edge_descriptor.to_vertex, direct_edge_descriptor.to_column)): if vertex_name not in vertex_name_to_table: raise InvalidSQLEdgeError("SQL edge {} with edge descriptor {} references a " "non-existent vertex {}".format(edge_name, direct_edge_descriptor, vertex_name)) if column_name not in vertex_name_to_table[vertex_name].columns: raise InvalidSQLEdgeError("SQL edge {} with edge descriptor {} references a " "non-existent column {}".format(edge_name, direct_edge_descriptor, column_name))
[ "def _validate_graph(self, G):\n for (v1, v2) in G.edges():\n if 'object' not in G.edges[v1, v2].keys():\n raise ValueError(\"edge_object for ({}, {}) is missing\".format(v1, v2))\n edge_object = G.edges[v1, v2]['object']\n if 'col' not in edge_object.keys():\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the physical dimensions of the context's viewport.
async def get_physical_viewport_dimensions(bidi_session, context): viewport = await get_viewport_dimensions(bidi_session, context) dpr = await get_device_pixel_ratio(bidi_session, context) return (floor(viewport["width"] * dpr), floor(viewport["height"] * dpr))
[ "def getViewportSizePixels(self) -> \"SbVec2s const &\":\n return _coin.SbViewportRegion_getViewportSizePixels(self)", "def viewport (self):\n return self._viewport", "def getViewportSize(self) -> \"SbVec2f const &\":\n return _coin.SbViewportRegion_getViewportSize(self)", "def _get_viewp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from cursors. Can raise ConnectionFailure, OperationFailure, etc.
def run_operation( self, conn: Connection, operation: Union[_Query, _GetMore], read_preference: _ServerMode, listeners: Optional[_EventListeners], unpack_res: Callable[..., List[_DocumentOut]], ) -> Response: duration = None assert listeners is not None publish = listeners.enabled_for_commands if publish: start = datetime.now() use_cmd = operation.use_command(conn) more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come if more_to_come: request_id = 0 else: message = operation.get_message(read_preference, conn, use_cmd) request_id, data, max_doc_size = self._split_message(message) if publish: cmd, dbn = operation.as_command(conn) assert listeners is not None listeners.publish_command_start( cmd, dbn, request_id, conn.address, service_id=conn.service_id ) start = datetime.now() try: if more_to_come: reply = conn.receive_message(None) else: conn.send_message(data, max_doc_size) reply = conn.receive_message(request_id) # Unpack and check for command errors. if use_cmd: user_fields = _CURSOR_DOC_FIELDS legacy_response = False else: user_fields = None legacy_response = True docs = unpack_res( reply, operation.cursor_id, operation.codec_options, legacy_response=legacy_response, user_fields=user_fields, ) if use_cmd: first = docs[0] operation.client._process_response(first, operation.session) _check_command_response(first, conn.max_wire_version) except Exception as exc: if publish: duration = datetime.now() - start if isinstance(exc, (NotPrimaryError, OperationFailure)): failure: _DocumentOut = exc.details # type: ignore[assignment] else: failure = _convert_exception(exc) assert listeners is not None listeners.publish_command_failure( duration, failure, operation.name, request_id, conn.address, service_id=conn.service_id, ) raise if publish: duration = datetime.now() - start # Must publish in find / getMore / explain command response # format. if use_cmd: res: _DocumentOut = docs[0] elif operation.name == "explain": res = docs[0] if docs else {} else: res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] if operation.name == "find": res["cursor"]["firstBatch"] = docs else: res["cursor"]["nextBatch"] = docs assert listeners is not None listeners.publish_command_success( duration, res, operation.name, request_id, conn.address, service_id=conn.service_id, ) # Decrypt response. client = operation.client if client and client._encrypter: if use_cmd: decrypted = client._encrypter.decrypt(reply.raw_command_response()) docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) response: Response if client._should_pin_cursor(operation.session) or operation.exhaust: conn.pin_cursor() if isinstance(reply, _OpMsg): # In OP_MSG, the server keeps sending only if the # more_to_come flag is set. more_to_come = reply.more_to_come else: # In OP_REPLY, the server keeps sending until cursor_id is 0. more_to_come = bool(operation.exhaust and reply.cursor_id) if operation.conn_mgr: operation.conn_mgr.update_exhaust(more_to_come) response = PinnedResponse( data=reply, address=self._description.address, conn=conn, duration=duration, request_id=request_id, from_command=use_cmd, docs=docs, more_to_come=more_to_come, ) else: response = Response( data=reply, address=self._description.address, duration=duration, request_id=request_id, from_command=use_cmd, docs=docs, ) return response
[ "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def query( self, path_suffix, get_params ):\n #\n # Do sleep for delay query if necessary...\n if self.query_interval is not None:\n while time.time() < self.earliest_query_time:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Code to apply a savgol filter to the recorded body pose
def savgol_filter(body_3D_pose, left_hand_3D_pose,right_hand_3D_pose, threshold = 0.2): #Define properties of the savgol filter window_length, polyorder = 11, 2 #iterate over the hands for hand_pose in right_hand_3D_pose, left_hand_3D_pose: #iterate over the joints in each hand for joint in HAND: #Apply savgol filter to x, y and z position lists of that joint seperately x_filtered = signal.savgol_filter(list(zip(*hand_pose[joint.value]))[0], window_length, polyorder) y_filtered = signal.savgol_filter(list(zip(*hand_pose[joint.value]))[1], window_length, polyorder) z_filtered = signal.savgol_filter(list(zip(*hand_pose[joint.value]))[2], window_length, polyorder) #Define a list of whether the point we have found is believed to be valid, for use in the step below lost_track_list = list(zip(*hand_pose[joint.value]))[3] #Define a list of (x,y,z) points using the filtered list above smoothed_list = [list(elem) for elem in list(zip(x_filtered,y_filtered,z_filtered, lost_track_list))] #Update the hand_pose list for that joint with the new smoothed list of said joint's positions hand_pose[joint.value] = smoothed_list #iterate over the body's joints for joint in BODY: #Apply savgol filter to x, y and z position lists of that joint seperately x_filtered = signal.savgol_filter(list(zip(*body_3D_pose[joint.value]))[0], window_length, polyorder) y_filtered = signal.savgol_filter(list(zip(*body_3D_pose[joint.value]))[1], window_length, polyorder) z_filtered = signal.savgol_filter(list(zip(*body_3D_pose[joint.value]))[2], window_length, polyorder) #Define a list of whether the point we have found is believed to be valid, for use in the step below lost_track_list = list(zip(*body_3D_pose[joint.value]))[3] #Define a list of (x,y,z) points using the filtered list above smoothed_list = [list(elem) for elem in list(zip(x_filtered,y_filtered,z_filtered, lost_track_list))] #Update the hand_pose list for that joint with the new smoothed list of said joint's positions body_3D_pose[joint.value] = smoothed_list return body_3D_pose, left_hand_3D_pose,right_hand_3D_pose
[ "def get_arithmetic_mean_filter(self, kernel):\n kernel= np.array([[-1,-1,-1],[-1, 9,-1],[-1,-1,-1]])\n sharpened_img = cv2.filter2D(sp_05, -1, kernel_sharpening) \n return sharpened_img", "def apply(self, sed):\n\t\tWaveLength = np.array(sed['wavelength'])\n\t\tFluxLam = np.array(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to overwrite a specific invalid point with its last good value, defining the last valid point, and the replacement point, from the corresponding offset joint
def overwrite_position(joint, point_bad, frame_num, last_good_list, pose, body_3D_pose, last_good_body_list, offset_joint = False): #If the point is not invalid, just update the last good list if point_bad == False: last_good_list[joint.value] = [pose[joint.value][frame_num][0], pose[joint.value][frame_num][1], pose[joint.value][frame_num][2], pose[joint.value][frame_num][3], frame_num] #If the point is invalid and we have a previous valid point elif point_bad == True and last_good_list[joint.value] != []: #Treat the offsets from the wrists seperately as they are stored on seperate lists to the 'pose' list if we call for them from the left or right hand pose lists if offset_joint == BODY.LEFT_WRIST or offset_joint == BODY.RIGHT_WRIST: #If we have a previous offset valid point if last_good_body_list[offset_joint.value] != []: #Define the frame that the previous valid point is from offset_original_frame = last_good_list[joint.value][4] #Find the previous valid offset point location offset_position = body_3D_pose[offset_joint.value][offset_original_frame] #Find said location in relation to the offset joint old_pos_from_offset = np.subtract(last_good_list[joint.value][0:3], offset_position[0:3]) #Use the current position of the offset joint to find the position to overwrite the invalid point with new_pos = np.add(old_pos_from_offset[0:3], last_good_body_list[offset_joint.value][0:3]) #Update the invalid point on the pose list with the valid point pose[joint.value][frame_num] = [new_pos[0], new_pos[1], new_pos[2], True] #If you have no previous valid point, do nothing else: pass #For any other offset joint elif offset_joint != False: if last_good_list[offset_joint.value] != []: #Define the frame that the previous valid point is from offset_original_frame = last_good_list[joint.value][4] #Find the previous valid offset point location offset_position = pose[offset_joint.value][offset_original_frame] #Find said location in relation to the offset joint old_pos_from_offset = np.subtract(last_good_list[joint.value][0:3], offset_position[0:3]) #Use the current position of the offset joint to find the position to overwrite the invalid point with new_pos = np.add(old_pos_from_offset[0:3], last_good_list[offset_joint.value][0:3]) #Update the invalid point on the pose list with the valid point pose[joint.value][frame_num] = [new_pos[0], new_pos[1], new_pos[2], True] #If we do not have a previous valid point else: pass #If we call this without an offset joint, just use the last good value elif offset_joint == False: pose[joint.value][frame_num] = last_good_list[joint.value] #If we do not have a previous valid point else: pass return last_good_list, pose
[ "def error_correction(y_original, ind, labels, threshold):\n\n# WARNING, this function has not been tested and may not improve the\n# accuracy of the reconstruction, use with caution\n\n y = y_original.copy()\n ind = np.array(ind)\n # gives the position of the points that have a label == 1\n ind_var = [...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to crop a pose list to a certain length
def crop(listtocrop, length, start = 0): croppedlist = [] for row in listtocrop: croppedlist.append(row[start:length+start]) return croppedlist
[ "def cropImage():", "def crop_receptive(batch, crop_size):\n n,hx,wx,_ = batch.shape\n hy,wy = crop_size\n dhq, dhr = (hx-hy)//2, (hx-hy)%2\n dwq, dwr = (wx-wy)//2, (wx-wy)%2\n return batch[:, dhq: hx - (dhq + dhr), dwq: wx - (dwq + dwr) ]", "def crop(image):\r\n return image[60:-25, :, :] # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the percent change of daily close price.
def change_price_precent(self): stock_firstday = self.closeprice[0] self.dataframe['stock_%chg'] = (self.closeprice - stock_firstday)/stock_firstday change_price_precent = self.dataframe['stock_%chg'] return change_price_precent
[ "def daily_pct_change(self, save=False):\n logging.info('daily_pct_change() called')\n byYear = pd.DataFrame(self.group_by_year())\n dailyPctChange = pd.DataFrame()\n for year in byYear:\n # Here I use numpy to calculate the pct change\n dailyPctChange[year] = round...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare and plot the percent change of the stock close price and that of the actual market over time.
def plot_changeprice_comparison(self): fig = plt.figure() self.change_price_precent().plot(color = 'b',label = self.stock) market = Market(self.starttime,self.endtime) market.change_price_precent().plot(color = 'r',label = 'market') plt.legend() plt.xticks(rotation=45) plt.title('The Comparison between {} and market close price '.format(self.stock)) return fig
[ "def set_price_changes(self):\n self.market_data['pricechange'] = self.market_data['adj_close'].diff(1)\n self.market_data['percentchange'] = (np.log(self.market_data['adj_close']) - np.log(self.market_data['adj_close'].shift(1))).fillna(0)", "def plot_stock(df):\n fig,ax = plt.subplots(3)\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that divides equally and a positive number of lists to divide in to returns 3 lists all with 3 elements.
def test_equally_divisible_list_and_positive_int(self): result = split_list(self.equally_divisible_list, self.positive_int) self.assertEqual(len(result), 3) self.assertEqual(len(result[0]), 3) self.assertEqual(len(result[1]), 3) self.assertEqual(len(result[2]), 3)
[ "def testListDivide():\n test_a = listDivide([1, 2, 3, 4, 5])\n test_b = listDivide([2, 4, 6, 8, 10])\n test_c = listDivide([30, 54, 63, 98, 100], divide=10)\n test_d = listDivide([])\n test_e = listDivide([1, 2, 3, 4, 5], 1)\n\n tests = (test_a, test_b, test_c, test_d, test_e)\n while test_a =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that divides equally and number of lists to divide in to set as 1 returns 1 list with the same elements as the original list.
def test_equally_divisible_list_and_1(self): result = split_list(self.equally_divisible_list, 1) self.assertEqual(len(result), 1) self.assertEqual(result[0], self.equally_divisible_list)
[ "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.unequally_divisible_list)", "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that divides equally and number of lists to divide in to set as 0 returns an empty list.
def test_equally_divisible_list_and_zero(self): result = split_list(self.equally_divisible_list, 0) self.assertEqual(len(result), 0)
[ "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that does not divide equally and number of lists to divide in to set as 1 returns 1 list with the same elements as the original list.
def test_unequally_divisible_list_and_1(self): result = split_list(self.unequally_divisible_list, 1) self.assertEqual(len(result), 1) self.assertEqual(result[0], self.unequally_divisible_list)
[ "def test_equally_divisible_list_and_1(self):\n result = split_list(self.equally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.equally_divisible_list)", "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequally_di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that does not divide equally and number of lists to divide in to set as 0 returns an empty list.
def test_unequally_divisible_list_and_zero(self): result = split_list(self.unequally_divisible_list, 0) self.assertEqual(len(result), 0)
[ "def test_equally_divisible_list_and_zero(self):\n result = split_list(self.equally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns key information about current state of the ricepile. Returns tuple (L,t,z,z_c) if single == False. If single == i for any i in {0,1,2,3}, returns (L,t,z,z_c)[i].
def info(self,single = False): if single not in [False,1,2,3,4]: raise ValueError("single must take value in [False,1,2,3,4]") data = (self.__L, self.__t, self.__z, self.__z_c) if single == False: return data else: return data[single]
[ "def utility(self, state):\n line_sums = self._calculate_line_sums(state)\n\n # Player 1 wins\n if np.any(line_sums == 4):\n return {1: 1, 2: -1}\n\n # Player 2 wins\n if np.any(line_sums == -4):\n return {1: -1, 2: 1}\n\n # Draw, if all squares are fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of a selection of binary operators.
def test_compile_binary_operators(self): op_map = { operators.and_: ' AND ', operators.or_: ' OR ', operators.add: ' + ', operators.mul: ' * ', operators.sub: ' - ', operators.div: ' / ', operators.mod: ' MOD ', operators.truediv: ' / ', operators.lt: ' < ', operators.le: ' <= ', operators.ne: ' <> ', operators.gt: ' > ', operators.ge: ' >= ', operators.eq: ' = ', operators.concat_op: ' || ', operators.like_op: ' LIKE ', operators.is_: ' IS ', operators.isnot: ' IS NOT ' } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1, text('arg'))) assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')
[ "def test_compile_any_all_operators(self):\n op_map = {\n operators.any_op: 'ANY ',\n operators.all_op: 'ALL ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(\n op(sql.select([self.table.c.c1]).as_scalar()))\n\n assert(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of the IN and NOT IN binary operators.
def test_compile_in_operators(self): op_map = { operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1, (0, 0))) assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')
[ "def query_in(self, params):\n def make_list(value):\n if not isinstance(value, (tuple, list)):\n return [value]\n return value\n\n return self.get_conditions(params, 'in', '$in', make_list)", "def in_(field: FieldProxyAny, sequence: Sequence) -> QueryExpression:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of a selection of unary operators.
def test_compile_unary_operators(self): op_map = { operators.distinct_op: 'DISTINCT ', operators.inv: 'NOT ' } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1)) assert(self.last_compiled == op_map[op] + 't_test.c1')
[ "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of the ANY and ALL unary operators.
def test_compile_any_all_operators(self): op_map = { operators.any_op: 'ANY ', operators.all_op: 'ALL ', } for op in op_map.keys(): self.td_engine.execute( op(sql.select([self.table.c.c1]).as_scalar())) assert(self.last_compiled == op_map[op] + '(SELECT t_test.c1 \nFROM t_test)')
[ "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of a selection of modifier operators.
def test_compile_modifier_operators(self): op_map = { operators.desc_op: ' DESC', operators.asc_op: ' ASC', operators.nullsfirst_op: ' NULLS FIRST', operators.nullslast_op: ' NULLS LAST', } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1)) assert(self.last_compiled == 't_test.c1' + op_map[op])
[ "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def test_compile_binary_o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of the negative operator.
def test_compile_negative_operator(self): self.td_engine.execute(operators.neg(self.table.c.c1)) assert(self.last_compiled == '-t_test.c1')
[ "def test_negation_interaction(self):\n query, sort = beets.library.parse_query_string('-bar+',\n beets.library.Item)\n self.assertEqual(len(query.subqueries), 1)\n self.assertTrue(isinstance(query.subqueries[0],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of nested operators.
def test_compile_nested_operators(self): self.td_engine.execute( operators.and_( operators.ne(self.table.c.c1, 0), operators.mod(self.table.c.c1, 0))) assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')
[ "def test_deeply_nested_primitive_operators(self):\n self.assert_to_cnf_transformation(\n '(A or (B and (C or (D and (E or (F and (G or (H and I))))))))',\n '(A or B) and (A or C or D) and (A or C or E or F) and '\n '(A or C or E or G or H) and (A or C or E or G or I)')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the stream of messages for symbol. An empty list is returned if the corresponding JSON file doesn't exist yet.
def read_stream(symbol): try: with open("stream{}.json".format(symbol), "r", encoding="utf-8") as f: stream = json.load(f) return(stream) except FileNotFoundError: print("Stream not found for {}.".format(symbol)) return([])
[ "def load_received_messages(username):\n lst = []\n\n # Open each json file in messages directory\n for file in glob('messages/*.json'):\n # Load message, append to list if 'to' matches username\n dict = _load_message(file)\n if dict['to'] == username:\n lst.append(dict)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format the sns topic into and aws ARN
def formatted_sns_topic_arn(cls, config): prefix = config['global']['account']['prefix'] topic = config['lambda']['rule_promotion_config'].get( 'digest_sns_topic', cls.DEFAULT_STATS_SNS_TOPIC_SUFFIX.format(prefix) ) return 'arn:aws:sns:{region}:{account_id}:{topic}'.format( region=config['global']['account']['region'], account_id=config['global']['account']['aws_account_id'], topic=topic )
[ "def get_sns_arn():\n try:\n response = sns.list_topics()\n while True:\n for res in response['Topics']:\n if \"QSTopicSNSEmail\" in res[\"TopicArn\"]:\n LOGGER.info('-- SNS Topic ARN: ' + res[\"TopicArn\"])\n return res[\"TopicArn\"]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute a query for all alerts for a rule so the user can be sent the results
def _query_alerts(self, stat): info_statement = stat.sql_info_statement LOGGER.debug('Querying alert info for rule \'%s\': %s', stat.rule_name, info_statement) response = self._athena_client.run_async_query(info_statement) return response['QueryExecutionId']
[ "def list_alerts(request):\n return request.db.query(Alert).all()", "def search_alerts():\n payload = {} # type: dict\n handle_time_filter(payload, {'type': 'relative', 'value': {'amount': 7, 'unit': 'day'}})\n handle_filters(payload)\n response = req('POST', 'alert', payload, {'detailed': 'true'}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish the alert statistics message to SNS
def _publish_message(self, stats): LOGGER.info('Sending daily message digest at %s', self._current_time) sns_client = boto3.resource('sns').Topic(self._topic_arn) subject = 'Alert statistics for {} staged rule(s) [{} UTC]'.format( len(stats), self._current_time ) sns_client.publish( Message=self._format_digest(stats), Subject=subject )
[ "def publish_sns(sns_message):\n\n print(\"Publishing message to SNS topic...\")\n sns_client.publish(TargetArn=environ['SNSArn'], Message=sns_message)\n return", "def send_statistics(self, payload):\n self.logger.debug('send Statistics {}'.format(payload))\n self.socket.send_pyobj((Interna...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the individual lines from the output A response cannot necessarily know ahead of time if its output contents are individual string lines or binary data that might contain line endings. A user of a response is responsible for knowing the context of the response, and thus can decide to interpret the output as individual lines using this property, which will return the output contents as an array of lines with their line endings stripped.
def lines(self): return self.output.split(self._newLine)
[ "def _get_lines(self, output):\n\n return output.decode(self.encoding).split('\\n')", "def _get_log_lines(self):\n return [\n log_line\n for log_line in self.captured_output.getvalue().split(\"\\n\")\n if log_line\n ]", "def getLinesContent(self):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads and uncompresses dataset from url, expects tar.gz file
def download_dataset_and_uncompress(dataset_dir: str, url: str, filename: str=None): filename = filename or url.split('/')[-1] if not os.path.isfile(filename): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='download dataset') as pbar: urlretrieve( url, filename, pbar.hook) if not os.path.exists(dataset_dir): os.mkdir(dataset_dir) with tarfile.open(filename, 'r:gz') as tar: tar.extractall(dataset_dir) tar.close() statinfo = os.stat(filename) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
[ "def download_and_uncompress_tarball(tarball_url, dataset_dir):", "def download(url, dataset):\n print(\"Downloading data\")\n r = requests.get(url, allow_redirects=True)\n data_file_path = get_directory() + \"/data/raw/\"\n open(data_file_path + dataset, \"wb\").write(r.content)\n tar = tarfile.op...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should return an Iterable of services initialized for the current instance. You should override this when subclassing IRCClient.
def services(self): return []
[ "def services(self):\n return self.__services", "def _QueryServices(self):\n init_prop_header = 'init.svc.'\n props = self._Props()\n return dict([(k[len(init_prop_header):], v) for k, v in props.iteritems()\n if k.startswith(init_prop_header)])", "def getStatefulServices(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the given configuration parameter. The following order of precedence is used to return the parameter in order to deal with
def cfgget(self, name, default = NOPARAM): try: return self.params[name] except KeyError: pass if default != NOPARAM: return default try: return default_params[name] except KeyError: pass return None
[ "def fetchConfigParam(self):\r\n pass", "def _get_from_backend(self, parameter, section):\n value = None\n try:\n value = self.config_backend.get(section, parameter.id)\n except (NoOptionError, NoSectionError):\n # Ignore, we return None.\n pass\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends several messages at once. Because the bot is heavily threaded and threads are evil, it's probably best to call sendmany() instead of calling send multiple times when sending series of messages, so unrelated series don't occur at the same time. There's no danger of two messages sending inside of each other midstream, mind, as Transport.write() and self.send() are both highly threadsafe.
def sendmany(self, messages): with self.lock_many: for i in messages: self.send(i)
[ "def send_multiple_emails(cls, *messages):\n pass", "def send_messages(self, messages):\r\n if not messages:\r\n return\r\n self._lock.acquire()\r\n try:\r\n # The try-except is nested to allow for\r\n # Python 2.4 support (Refs #12147)\r\n t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for the switch to be released (waits for a raising edge). YES. This is important with magnetic switches. And wait_released is already blocking so leave me alone with the time.sleep!
def wait_released(self): while not self.is_released(): time.sleep(0.01)
[ "def wait_released(self):\n GPIO.wait_for_edge(self.pin, GPIO.RISING)\n return", "def wait_pressed(self):\n GPIO.wait_for_edge(self.pin, GPIO.FALLING)\n return", "def _hw_wait(self):\n while self.read_status()[0] == Drivable.Status.BUSY:\n sleep(0.3)", "def wait()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the cross section of a single MRMSGrib object's data from point1 to point2 using cubic interpolation
def get_cross_cubic(grb, point1, point2): lons = grb.grid_lons lats = grb.grid_lats x, y = np.meshgrid(lons, lats) z = grb.data # [(x1, y1), (x2, y2)] line = [(point1[0], point1[1]), (point2[0], point2[1])] # cubic interpolation y_world, x_world = np.array(list(zip(*line))) col = z.shape[1] * (x_world - x.min()) / x.ptp() row = z.shape[0] * (y.max() - y_world ) / y.ptp() num = 100 row, col = [np.linspace(item[0], item[1], num) for item in [row, col]] valid_date = grb.validity_date valid_time = grb.validity_time # Extract the values along the line, using cubic interpolation zi = scipy.ndimage.map_coordinates(z, np.vstack((row, col)), order=1, mode='nearest') return zi
[ "def __curve_splicing(self):", "def plot_cross_cubic_single(grb, point1, point2, first=False):\n lons = grb.grid_lons\n lats = grb.grid_lats\n\n x, y = np.meshgrid(lons, lats)\n z = grb.data\n\n # [(x1, y1), (x2, y2)]\n line = [(point1[0], point1[1]), (point2[0], point2[1])]\n\n # cubic inter...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the cross section of a single MRMSGrib object's data from point1 to point2 using nearestneighbor interpolation
def get_cross_neighbor(grb, point1, point2): lons = grb.grid_lons lats = grb.grid_lats x, y = np.meshgrid(lons, lats) # Read the MRMS reflectivity data from the grib object's memory-mapped array z = np.memmap(grb.get_data_path(), dtype='float32', mode='r', shape=grb.shape) # Calculate the coordinates of a line defined by point1 & point2 to sample line = [(point1[0], point1[1]), (point2[0], point2[1])] y_world, x_world = np.array(list(zip(*line))) col = z.shape[1] * (x_world - x.min()) / x.ptp() row = z.shape[0] * (y.max() - y_world ) / y.ptp() num = 1000 row, col = [np.linspace(item[0], item[1], num) for item in [row, col]] valid_date = grb.validity_date valid_time = grb.validity_time d_lats, d_lons = calc_coords(point1, point2, num) # Sample the points along the line in order to get the reflectivity values zi = z[row.astype(int), col.astype(int)] return (zi, d_lats, d_lons)
[ "def getCrossings(x1,y1, x2,y2):\r\n x, y = _intersections(x1,y1, x2,y2)\r\n return x, y", "def corresponding_roi(rpc1, rpc2, x, y, w, h):\n m, M = altitude_range(rpc1, x, y, w, h, 0, 0)\n\n # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M]\n a = np.array([x, x, x, x, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes a vertical cross section slice of MRMS reflectivity data along the line defined by point1 & point2
def process_slice(base_path, slice_time, point1, point2): cross_sections = np.array([]) scans = fetch_scans(base_path, slice_time) # z = 33 grbs = get_grib_objs(scans, base_path, point1, point2) cross_sections, lats, lons = np.asarray(get_cross_neighbor(grbs[0], point1, point2)) for grb in grbs[1:]: x_sect, _, _ = get_cross_neighbor(grb, point1, point2) cross_sections = np.vstack((cross_sections, x_sect)) return (cross_sections, lats, lons)
[ "def run_mrms_xsect2(base_path, slice_time, point1, point2, wtlma_obj, wtlma_coords,\n show=False, save=False, outpath=None):\n print('Warning: Depricated')\n cross_data, lats, lons = process_slice(base_path, slice_time, point1, point2)\n plot_mrms_cross_section2(data=cross_data, lons=lons, lats=lat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filters the WTLMA dataframe to only include events that are within a certain distance of the line that defines the MRMS crosssection
def filter_by_dist(lma_df, dist, start_point, end_point, num_pts): if (not isinstance(dist, int)): raise TypeError('dist must be of type int') s_lat = start_point[0] s_lon = start_point[1] e_lat = end_point[0] e_lon = end_point[1] idxs = [] coords = [] alts = lma_df['alt'].tolist() xsect_az = int(calc_bearing(start_point, end_point)) for pt1 in calc_geod_pts(start_point, end_point, num_pts=num_pts): for idx, pt2 in enumerate(list(zip(lma_df['lat'].tolist(), lma_df['lon'].tolist()))): # reverse the order of pt1 since the function returns the coordinates # as (lon, lat) and calc_dist wants (lat, lon) curr_az = int(calc_bearing((pt1[1], pt1[0]), pt2)) if ((calc_dist((pt1[1], pt1[0]), pt2, units='m') <= dist) and (idx not in idxs) and (alts[idx] < 19000)): idxs.append(idx) coords.append([pt1[1], pt1[0]]) # Remove repeat indexes from list # MUCH faster to use a set than another conditional inside the nested loops #idxs = list(set(idxs)) subs_df = lma_df.iloc[idxs] return subs_df, coords
[ "def low_pass_filter_anomaly_detection(df,\n column_name,\n number_of_stdevs_away_from_mean):\n #60-day rolling average\n df[column_name+'_Rolling_Average']=df[column_name].rolling(window=60, center=True).mean()\n #60-day standard de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the bearing between two points
def calc_bearing(point1, point2): lat1 = math.radians(point1[0]) lat2 = math.radians(point2[0]) diffLong = math.radians(point2[1] - point1[1]) x = math.sin(diffLong) * math.cos(lat2) y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diffLong)) initial_bearing = math.atan2(x, y) # Now we have the initial bearing but math.atan2 return values # from -180° to + 180° which is not what we want for a compass bearing # The solution is to normalize the initial bearing as shown below initial_bearing = math.degrees(initial_bearing) bearing = (initial_bearing + 360) % 360 return bearing
[ "def get_bearing(aLocation1, aLocation2): \n off_x = aLocation2.lon - aLocation1.lon\n off_y = aLocation2.lat - aLocation1.lat\n bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795\n if bearing < 0:\n bearing += 360.00\n return bearing", "def get_bearing(self, aLocation1, aLocation2):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the coordinates for a number, num, of points along the line defined by point1 and point2
def calc_coords(point1, point2, num): xs = [point1[1], point2[1]] ys = [point1[0], point2[0]] lons = np.linspace(min(xs), max(xs), num) lats = np.linspace(min(ys), max(ys), num) return (lats, lons)
[ "def pixel_points(self,y1, y2, line):\r\n if line is None:\r\n return None\r\n slope, intercept = line\r\n x1 = int((y1 - intercept)/slope)\r\n x2 = int((y2 - intercept)/slope)\r\n y1 = int(y1)\r\n y2 = int(y2)\r\n return ((x1, y1), (x2, y2))", "def pixe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets NWS WWA polygons for a specified date & time
def get_wwa_polys(abs_path, date, time, wwa_type=['SV', 'TO']): polys = {} target_dt = _format_wwa_time(date, time) wwa_reader = shpreader.Reader(abs_path) if ('SV' in wwa_type): filtered_wwa_sv = [rec.geometry for rec in wwa_reader.records() if (rec.attributes['GTYPE'] == 'P') and (_valid_wwa_time(rec.attributes['ISSUED'], rec.attributes['EXPIRED'], target_dt)) and (rec.attributes['PHENOM'] == 'SV')] polys['SV'] = filtered_wwa_sv if ('TO' in wwa_type): filtered_wwa_to = [rec.geometry for rec in wwa_reader.records() if (rec.attributes['GTYPE'] == 'P') and (_valid_wwa_time(rec.attributes['ISSUED'], rec.attributes['EXPIRED'], target_dt)) and (rec.attributes['PHENOM'] == 'TO')] polys['TO'] = filtered_wwa_to return polys
[ "def generate_wind():\n# Taken by converting UTM Zone 11 coordinates on\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# These values specific to files called yosemite_landscape_12-03-2019_0900_120m\n west_lon = -120.006255\n east_lon = -119.4736\n south_lat = 37.464649\n nort...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BRIEF Get the average word count for all rows & versions
def AnalyticsQuery(table, full_col_name): total = 0 count = 0.0 for row in table.fetch_all_rows(): total += len(Regex.WORD.findall(row[full_col_name])) count += 1.0 print("(Analytics) AverageWordCount({0}) = {1}".format(full_col_name, total / count)) print(' ') sys.stdout.flush()
[ "def get_average_word_length(self):\n\n if self.word_count_list is None:\n self.tokenize_documents()\n\n return self.word_count_list.apply(lambda x: np.average([len(w) for w in x]))", "def avg_word_vectors(wordlist,size): \n sumvec=np.zeros(shape=(1,size))\n wordcnt=0\n for w ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that root resource works correctly with API format.
def testRootAsAPIView(self): response = self.client.get(self.url(), data={'format': 'api'}) self.assertEqual(response.status_code, 200) self.assertIn("Resource Instance", response.content.decode('utf-8'))
[ "def testRootAPITrailingSlash(self):\n response = self.client.get(self.url().rstrip('/'),\n data={'format': 'api'})\n self.assertEqual(response.status_code, 301)\n self.assertEqual(response.url.replace('http://testserver', ''), self.url())", "def test_default...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks redirect to root resource with trailing slash.
def testRootAPITrailingSlash(self): response = self.client.get(self.url().rstrip('/'), data={'format': 'api'}) self.assertEqual(response.status_code, 301) self.assertEqual(response.url.replace('http://testserver', ''), self.url())
[ "def _HandleRoot(self, request):\n \n raise HttpRedirect, WebRequest(\"/static/index.html\")", "def warn_trailing_slash(self, dest, uri):\n if uri == '%s/' % self.get_uri(dest):\n self.log.warning(\n 'It seems that the url given do not need the trailing slash (%s). '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that is_collection flag can't be changed by PATCH request.
def testChangeResourceTypeForbidden(self): response = self.client.patch(self.url(self.dir.path), data={ "is_collection": False }) self.assertEqual(response.status_code, 400) error = 'Resource type cannot be changed after creation' self.assertDictEqual(response.data, {'is_collection': [error]})
[ "def is_collection(path):\n return not is_root(path) and not is_doc(path)", "def test_disallow_patch_many(self):\r\n response = self.app.patch('/api/person', data=dumps(dict(name='foo')))\r\n assert response.status_code == 405", "def isFromCollection(self) -> bool:\n return self._is_from...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a batch norm module in target_model that corresponds immediately following a given convolution node in the model's NNCFGraph representation.
def get_bn_for_conv_node_by_name(target_model: NNCFNetwork, conv_node_name: NNCFNodeName) -> Optional[torch.nn.Module]: graph = target_model.nncf.get_original_graph() conv_node = graph.get_node_by_name(conv_node_name) bn_node = get_bn_node_for_conv(graph, conv_node) if bn_node is None: return None bn_module = target_model.nncf.get_containing_module(bn_node.node_name) return bn_module
[ "def forward_pass_on_convolutions(self, x):\n conv_output = None\n for module_pos, module in self.model.encoder.encoder._modules.items():\n \n if int(module_pos) == self.target_block:\n# if int(module_pos) == self.target_layer:\n for sub_module_pos, sub_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize masks in graph for mask propagation algorithm
def init_output_masks_in_graph(graph: NNCFGraph, nodes: List): for node in graph.get_all_nodes(): node.attributes.pop("output_mask", None) for minfo in nodes: mask = minfo.operand.binary_filter_pruning_mask nncf_node = graph.get_node_by_id(minfo.nncf_node_id) nncf_node.attributes["output_mask"] = PTNNCFTensor(mask)
[ "def _initialize_mask(self):\n if 'locally_connected' in self.mask_type:\n assert self.neighbour_matrix is not None\n L = self.neighbour_matrix.T\n assert L.shape == (self.in_joints, self.in_joints)\n if 'learnable' not in self.mask_type:\n self.mask...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates output shape of convolution layer by input edge.
def _calculate_output_shape(graph: NNCFGraph, node: NNCFNode) -> Tuple[int, ...]: in_edge = graph.get_input_edges(node)[0] shape = list(in_edge.tensor_shape) attrs = node.layer_attributes if isinstance(attrs, ConvolutionLayerAttributes): shape = shape[2:] for i, _ in enumerate(shape): if attrs.transpose: shape[i] = (shape[i] - 1) * attrs.stride[i] - 2 * attrs.padding_values[i] + attrs.kernel_size[i] else: shape[i] = (shape[i] + 2 * attrs.padding_values[i] - attrs.kernel_size[i]) // attrs.stride[i] + 1 elif isinstance(attrs, LinearLayerAttributes): shape = shape[:-1] + [attrs.out_features] else: raise RuntimeError(f"Unexpected node type {node.node_type} is fed to _calculate_output_shape") return tuple(shape)
[ "def _conv_output_shape(cls, h_w: Union[tuple, int],\n kernel_size: Union[tuple, int],\n stride: Union[tuple, int],\n pad: Union[tuple, int] = 0,\n dilation=1):\n # source https://discuss.pytorch.org/t/uti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects output dimension shapes for convolutions and fully connected layers from the connected edges in the NNCFGraph.
def collect_output_shapes(graph: NNCFGraph) -> Dict[NNCFNodeName, List[int]]: modules_out_shapes = {} output_shape_collecting_info = [ (NNCF_GENERAL_CONV_MODULES_DICT, slice(2, None)), (NNCF_LINEAR_MODULES_DICT, slice(None)), ] for nncf_module_type, shape_slice in output_shape_collecting_info: for node in graph.get_nodes_by_types([v.op_func_name for v in nncf_module_type]): output_edges = graph.get_output_edges(node) if output_edges: out_edge = output_edges[0] out_shape = out_edge.tensor_shape[shape_slice] else: # For disconnected NNCFGraph when node have no output edge out_shape = _calculate_output_shape(graph, node) nncf_logger.debug(f"Node {node.node_name} has no output edge in NNCFGraph") modules_out_shapes[node.node_name] = out_shape return modules_out_shapes
[ "def output_shape(self, l_in):\r\n out_channel, l_out = self.in_channel, l_in\r\n for conv1d_unit in self.conv_layers:\r\n out_channel, l_out = conv1d_unit.output_shape(l_out)\r\n return l_out, out_channel", "def output_shapes(self, l_in):\r\n shapes = [(self.in_channel, l_i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Each function in list_func_per_ax takes an ax as input and draw something on it
def ax_func_to_plot( list_func_per_ax, n_per_row=3, title=None, title_font_size=10, width=15, height_row=10, saving_path=None, rec_padding=(0, 0, 0, 0), x_labels=None, y_labels=None, outer_axis_labels_only=False, show=True, ): n_rows = int(np.ceil(len(list_func_per_ax) / n_per_row)) fig, axes = plt.subplots( nrows=n_rows, ncols=n_per_row, figsize=(width, height_row * n_rows) ) for ax, func in zip(axes.flatten(), list_func_per_ax): func(ax) # fig.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1) fig.suptitle(title, fontsize=title_font_size) for ax in axes.flat: ax.set(xlabel=x_labels, ylabel=y_labels) if outer_axis_labels_only: for ax in axes.flat: ax.label_outer() handles, labels = ax.get_legend_handles_labels() fig.legend(handles, labels, loc=1) if rec_padding: fig.tight_layout(rect=rec_padding) if saving_path: fig.savefig(saving_path) if show: plt.show()
[ "def subax_call(self, method, args, kwargs):\n result = []\n for ax in self.axs:\n if ax.xaxis.get_scale() == \"log\":\n ax.xaxis.set_major_locator(ticker.LogLocator())\n else:\n ax.xaxis.set_major_locator(ticker.AutoLocator())\n if ax.yax...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the dimension of the entity this DOF is associated with.
def entity_dim(self) -> int: return self.entity[0]
[ "def dimension(self):\n return self.field(Field.POSITION).shape[1]", "def getDimension():\n ierr = c_int()\n api__result__ = lib.gmshModelGetDimension(\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGetDimension returned non-z...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of the entity this DOF is associated with.
def entity_number(self) -> int: return self.entity[1]
[ "def entity_dim(self) -> int:\n return self.entity[0]", "def num_entities(self) -> int:\n # TODO: Need to add functions in pymilvus-distributed\n return 0\n # raise NotImplementedError", "def num_entities(self) -> int:\n conn = self._get_connection()\n status = conn.get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the entity the DOF is associated with in TeX format.
def entity_tex(self) -> str: if self.entity[0] == self.reference.tdim: return "R" else: return f"{'vefc'[self.entity[0]]}_{{{self.entity[1]}}}"
[ "def _get_entity(self) -> \"adsk::core::Ptr< adsk::core::Base >\" :\n return _core.Selection__get_entity(self)", "def Entity(self) -> _n_0_t_1:", "def _get_entity_element(e, t, key, cols=[], ins=None):\n if cols:\n output = '<Entity>\\n'\n output += _static_columns(e, t, key, ins)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a representation of the functional as TeX, and list of terms involved.
def get_tex(self) -> typing.Tuple[str, typing.List[str]]: assert isinstance(self.point, VectorFunction) if len(self.point) == 1: desc = "v\\mapsto " desc += f"v'({','.join([_to_tex(i, True) for i in self.point])})" return desc, [] desc = "v\\mapsto" desc += "\\frac{\\partial" if sum(self.derivative) > 1: desc += f"^{{{sum(self.derivative)}}}" desc += "}{" for v, i in zip("xyz", self.derivative): if i > 0: desc += f"\\partial {v}" if i > 1: desc += f"^{{{i}}}" desc += "}" desc += f"v({','.join([_to_tex(i, True) for i in self.point])})" return desc, []
[ "def get_tex(self) -> typing.Tuple[str, typing.List[str]]:\n desc = \"\\\\boldsymbol{v}\\\\mapsto\"\n desc += \"\\\\nablaa\\\\cdot\\\\boldsymbol{v}\"\n desc += \"(\" + \",\".join([_to_tex(i, True) for i in self.dof_point()]) + \")\"\n return desc, []", "def as_tex(self) -> str:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a representation of the functional as TeX, and list of terms involved.
def get_tex(self) -> typing.Tuple[str, typing.List[str]]: desc = "\\boldsymbol{v}\\mapsto" desc += "\\nablaa\\cdot\\boldsymbol{v}" desc += "(" + ",".join([_to_tex(i, True) for i in self.dof_point()]) + ")" return desc, []
[ "def get_tex(self) -> typing.Tuple[str, typing.List[str]]:\n assert isinstance(self.point, VectorFunction)\n if len(self.point) == 1:\n desc = \"v\\\\mapsto \"\n desc += f\"v'({','.join([_to_tex(i, True) for i in self.point])})\"\n return desc, []\n desc = \"v\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic terminal state check, true when maximum depth is reached or the game has ended.
def is_terminal(depth, board): return depth <= 0 or board.is_game_over()
[ "def terminal_test(self, state) -> bool:\n return state.depth == self.depth_limit or is_end_game(state)", "def _is_exit_from_terminal_state(self, curr_state, next_state, curr_is_done, next_is_done):\n return next_is_done and curr_is_done# and (next_state == curr_state)", "def is_win(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do a minimax search to the specified depth on the specified board. board the ConnectFourBoard instance to evaluate depth the depth of the search tree (measured in maximum distance from a leaf to the root) eval_fn (optional) the evaluation function to use to give a value to a leaf of the tree; see "focused_evaluate" in the lab for an example Returns an integer, the column number of the column that the search determines you should add a token to
def minimax(board, depth, eval_fn = basic_evaluate, get_next_moves_fn = get_all_next_moves, is_terminal_fn = is_terminal, verbose = True): raise NotImplementedError
[ "def minimax(board, depth, eval_fn=basic_evaluate,\n verbose=False):\n\n best_val = None\n\n for move, new_board in board.get_all_next_moves():\n val = -1 * minimax_find_board_value(new_board, depth - 1, eval_fn)\n if best_val == None or val > best_val[0]:\n best_val = (val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes distance between self and a test image. Returns scalar value
def compute_distance(self, image): diffs = image - self._array total_dist = np.sqrt(np.sum(diffs**2)) return total_dist
[ "def get_distance(self, index):\n return (np.linalg.norm(self.image.astype('float') - self.population[index].image.astype('float'))) / (\n self.image.shape[0] * self.image.shape[1])", "def testDistance(self):\n\n # testList holds a couple 3-tuple (variable1, variable2, result)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare one image to learned images, and update values for correct and wrongly predicted images
def learn_one_image(images_learned, image_features, image_value, lam_val): for image in images_learned: # compare image pass
[ "def predict_single():\n path = 'outputs/gray/img-8-epoch-29.jpg'\n img = Image.open(path)\n img = img.resize((224,224))\n img_original = np.array(img)\n\n gray = rgb2gray(img_original)\n x = TF.to_tensor(gray).float()\n x.unsqueeze_(0)\n model = ColorizationUpsampling()\n model.load_stat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Command line tool for adding virtual makers to Coda Motion C3D files from the information in exported Mat files. The tool assume the c3d and mat files have the same filename but different extensions. If called without arguments the tool will find all matching c3d/mat files in the working directory.
def main(c3dfile, overwrite): if not c3dfile: click.confirm('Combine all C3D/Mat files in current dir?', abort=True) filelist = [(f, os.path.splitext(f)[0]+'.mat') for f in os.listdir('.') if f.upper().endswith('.C3D')] elif os.path.isfile(c3dfile): matfile = os.path.splitext(c3dfile)[0]+'.mat' if not os.path.isfile(matfile): raise click.UsageError('No mat file found matching {}' ''.format(c3dfile)) filelist = [(c3dfile, matfile)] else: raise click.UsageError('No such file {}'.format(c3dfile)) filelist = [(str(f), str(m)) for f, m in filelist if os.path.exists(f) and os.path.exists(m)] for c3dfile, matfile in filelist: postfix = '' if overwrite else '_updated' new_c3d = combine_files(c3dfile, matfile, postfix=postfix) print('Updated: {}'.format(new_c3d))
[ "def main():\n dirlist = ['./']\n dir_path = os.getcwd()\n names = os.listdir(dir_path)\n for n in names:\n if 'Location' in n:\n dirlist.append(n)\n if '-fmt' in sys.argv:\n ind = sys.argv.index(\"-fmt\")\n fmt = sys.argv[ind+1]\n else:\n fmt = 'png'\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the parameter of this ParameterStatusDTO. The name of the Parameter
def parameter(self, parameter): self._parameter = parameter
[ "def set_parameter(self, parameter):\n self.parameter = parameter", "def set_parameter(self, a_name, a_value):\n self.parameters[str(a_name)] = a_value\n return self", "def setup_parameter(self, parameter, value):\n self.__dict__[parameter] = value", "def set_parameter(cls, param_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a 3d plot of the given 3d points.
def plot_3d(pts): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') xs, ys, zs = zip(*pts) ax.scatter(xs, ys, zs, c='r', marker='o') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show()
[ "def plot_3d(x, y, z, df, cmap = plt.cm.seismic_r):\n\n fig = plt.figure(figsize = (10, 10))\n \n ax = fig.add_subplot(111, projection='3d')\n \n # 3d scatterplot\n ax.scatter(df[x], df[y],\n df[z], c = df[z], \n cmap = cmap, s = 40)\n\n # Plot labeling\n ax.set_x...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds whether product is available or not in particular warehouse.
def get_product_available(self): print("\n\n\n\n in get_product_available") if self._context is None: self._context = {} location_obj = self.env['stock.location'] warehouse_obj = self.env['stock.warehouse'] shop_obj = self.env['sale.shop'] states = self._context.get('states', []) what = self._context.get('what', ()) if not self._ids: ids = self.search([]) res = {}.fromkeys(ids, 0.0) if not self._ids: return res if self._context.get('shop', False): warehouse_id = shop_obj.read(['warehouse_id'])['warehouse_id'][0] if warehouse_id: self._context['warehouse'] = warehouse_id if self._context.get('warehouse', False): lot_id = warehouse_obj.read(['lot_stock_id'])['lot_stock_id'][0] if lot_id: self._context['location'] = lot_id if self._context.get('location', False): if type(self._context['location']) == type(1): location_ids = [self._context['location']] elif type(self._context['location']) in (type(''), type(u'')): location_ids = location_obj.search( [('name', 'ilike', self._context['location'])]) else: location_ids = self._context['location'] else: location_ids = [] wids = warehouse_obj.search([]) if not wids: return res for w in warehouse_obj.browse(wids): location_ids.append(w.lot_stock_id.id) # build the list of ids of children of the location given by id if self._context.get('compute_child', True): child_location_ids = location_obj.search( [('location_id', 'child_of', location_ids)]) location_ids = child_location_ids or location_ids # this will be a dictionary of the product UoM by product id product2uom = {} uom_ids = [] for product in self.read(['uom_id']): product2uom[product['id']] = product['uom_id'][0] uom_ids.append(product['uom_id'][0]) # this will be a dictionary of the UoM resources we need for conversion # purposes, by UoM id uoms_o = {} for uom in self.env['uom.uom'].browse(uom_ids): uoms_o[uom.id] = uom results = [] results2 = [] from_date = self._context.get('from_date', False) to_date = self._context.get('to_date', False) date_str = False date_values = False where = [tuple(location_ids), tuple( location_ids), tuple(ids), tuple(states)] if from_date and to_date: date_str = "date>=%s and date<=%s" where.append(tuple([from_date])) where.append(tuple([to_date])) elif from_date: date_str = "date>=%s" date_values = [from_date] elif to_date: date_str = "date<=%s" date_values = [to_date] if date_values: where.append(tuple(date_values)) prodlot_id = self._context.get('prodlot_id', False) prodlot_clause = '' if prodlot_id: prodlot_clause = ' and prodlot_id = %s ' where += [prodlot_id] # TODO: perhaps merge in one query. if 'in' in what: # all moves from a location out of the set to a location in the set self._cr.execute( 'select sum(product_qty), product_id, product_uom ' 'from stock_move ' 'where location_id NOT IN %s ' 'and location_dest_id IN %s ' 'and product_id IN %s ' 'and state IN %s ' + (date_str and 'and ' + date_str + ' ' or '') + ' ' + prodlot_clause + 'group by product_id,product_uom', tuple(where)) results = self._cr.fetchall() if 'out' in what: # all moves from a location in the set to a location out of the set self._cr.execute( 'select sum(product_qty), product_id, product_uom ' 'from stock_move ' 'where location_id IN %s ' 'and location_dest_id NOT IN %s ' 'and product_id IN %s ' 'and state in %s ' + (date_str and 'and ' + date_str + ' ' or '') + ' ' + prodlot_clause + 'group by product_id,product_uom', tuple(where)) results2 = self._cr.fetchall() # Get the missing UoM resources uom_obj = self.env['uom.uom'] uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2) if self._context.get('uom', False): uoms += [self._context['uom']] uoms = filter(lambda x: x not in uoms_o.keys(), uoms) if uoms: uoms = uom_obj.browse(list(set(uoms))) for o in uoms: uoms_o[o.id] = o # TOCHECK: before change uom of product, stock move line are in old # uom. self._context.update({'raise-exception': False}) # Count the incoming quantities for amount, prod_id, prod_uom in results: amount = uom_obj._compute_qty_obj(uoms_o[prod_uom], amount, uoms_o[self._context.get('uom', False) or product2uom[prod_id]]) res[prod_id] += amount # Count the outgoing quantities for amount, prod_id, prod_uom in results2: amount = uom_obj._compute_qty_obj(uoms_o[prod_uom], amount, uoms_o[self._context.get('uom', False) or product2uom[prod_id]]) res[prod_id] -= amount return res
[ "def get_product_available(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n states = context.get('states',[])\n what = context.get('what',())\n if not ids:\n #ids = self.search(cr, uid, [])\n ids = self.pool.get('product.product')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inject game events as test data to PubSub.
def _inject_pubsub_game_events(self, topic, message_count): logging.debug( 'Injecting %d game events to topic %s', message_count, topic.name) for _ in range(message_count): self.pub_client.publish( topic.name, (self.INPUT_EVENT % self._test_timestamp).encode('utf-8'))
[ "def test_generate_event(self):\n pass", "def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(dri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }