query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Returns the index corresponding to the given class label.
def lookup_class_idx(self,label): return self.class_labels[label]
[ "def get_class_index(label):\n if isinstance(label,str) is False:\n basic.outputlogMessage('input label must be a string')\n assert(False)\n length = len(class_label)\n for i in range(0,length):\n if label.lower()==class_label[i]:\n return i\n #if not found\n basic.out...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a function mapping to each element in the feature data.
def apply_fn(self,fn): self.check_Data() for split,data_ in self.processed_data.items(): x = data_['x'] x = np.array([fn(xi) for xi in x]) data_['x'] = x
[ "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map(self, function):\n pass", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def appl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a new MLP using the nn.Sequential class. Returns
def generate(self): components = [] components.append(nn.Linear(self.n_features,self.hidden_sizes[0])) self._activation(components,self.activation) self._dropout(components,self.dropout) for i in range(1,len(self.hidden_sizes)): components.append(nn.Linear(self.hidden_sizes[i-1],self.hidden_sizes[i])) self._activation(components,self.activation) self._dropout(components,self.dropout) components.append(nn.Linear(self.hidden_sizes[-1],self.n_classes)) mlp = nn.Sequential(*components) num_params = sum(p.numel() for p in mlp.parameters() if p.requires_grad) print("Created MLP with "+str(num_params)+" learnable params") return mlp
[ "def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])", "def mlpModel(input1_shape, layers=[4]):\n model = Sequential()\n last_idx = len(layers) - 1\n for (idx, num_units) in enumerate(layers):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new activation function and adds it to the list of components.
def _activation(self,components,activation): if activation == "ReLU": components.append(nn.ReLU()) elif activation == "Sigmoid": components.append(nn.Sigmoid()) else: raise Exception("Invalid activation fn: "+activation)
[ "def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a dropout object to the list of components
def _dropout(self,components,dropout=None): if dropout is not None: components.append(nn.Dropout(dropout))
[ "def add(self, component) -> None:\n pass", "def addComponent(self,component):\r\n self.append(component)", "def add(self, component):\n self.components.add(component)", "def add_depot(self, depot):\n self.destination_list.append(depot)", "def add_component(self, componentInstanc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits a DataFrame into 3 distinct DataFrames based on the given percentages and returns a dict of the data.
def split_data(text_df,splits=None,rand_perm=True): if splits is None: splits = {'train':0.6,'val':0.1,'test':0.3} if np.round(np.sum(list(splits.values())),4) != 1: raise Exception("Split percentages do not sum to 1") size = len(text_df) if rand_perm: perm_idx = np.random.permutation(size) else: perm_idx = np.arange(size) text_df = text_df.iloc[perm_idx,:] all_data = dict() keys = list(splits.keys()) pct = list(splits.values()) count = np.round(np.array(pct) * size).astype(np.int32) split_idx = np.cumsum(count)[:-1] data_list = np.split(text_df,split_idx,axis=0) all_data = {keys[i]:data for i,data in enumerate(data_list)} return all_data
[ "def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }", "def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a English > French text file and filters the lines based on the given filter_fn. If filter_fn is None, the default filter will be
def filter_nmt_file(filename,filter_fn=None): if filter_fn is None: filter_fn = lambda en : en.lower().startswith('i am') or \ en.lower().startswith('he is') or \ en.lower().startswith('she is') or \ en.lower().startswith('they are') or \ en.lower().startswith('you are') or \ en.lower().startswith('we are') filtered_lines = [] with open(filename) as file: lines = file.readlines() for line in lines: text = line.split('\t') en = text[0] fra = text[1] if filter_fn(en): filtered_lines.append(en.lower() + '\t' + fra.lower()) return filtered_lines
[ "def on_filter_process(self, filter_terms=None):\n if not filter_terms:\n filter_terms = self.ui_filterLine.text()\n # break up the string term based common separator characters\n terms = lists.fragment(terms=filter_terms, splits=list(' ,'),\n clean=True...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of lines of English/French text, creates a DataFrame with train/val/test split labels.
def create_nmt_data(text,train_pct=0.7,val_pct=0.15): if train_pct + val_pct >= 1: raise Exception("train_pct + val_pct must be < 1.0") source = [] target = [] for line in text: text = line.split('\t') source.append(text[0]) target.append(text[1]) text_df = pd.DataFrame({'source_language':source,'target_language':target}) text_df['split'] = 'train' text_df = text_df.sample(frac=1).reset_index(drop=True) idx = int(len(text_df)*train_pct) text_df.loc[:idx,'split'] = 'train' idx2 = idx + int(len(text_df)*val_pct) text_df.loc[idx:idx2,'split'] = 'val' text_df.loc[idx2:,'split'] = 'test' return text_df
[ "def ucf_read_train_test_split(self, path):\n # get the test train split txt file\n train = []\n test = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n train += [os.path.join(path, file) for file in filenames if file.startswith('trainlist')]\n test += [o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a glove word embedding text file and generates a DataFrame with the embeddings.
def process_glove_data(filename): word_list = [] embed_list = [] with open(filename,encoding="utf8") as file: lines = file.readlines() for line in lines: toks = line.split(' ') word_list.append(toks[0]) vec = [float(tok) for tok in toks[1:]] embed_list.append(vec) embed = np.array(embed_list,dtype=float) embed_df = pd.DataFrame(embed,index=word_list) embed_df.index = embed_df.index.str.lower() return embed_df
[ "def read_glove_source(self):\n embeddings = []\n word2vec = {}\n idx2word = []\n with open(self.source) as file:\n lines = file.readlines()\n for line in lines:\n data = line.split()\n word = data[0]\n vector = np.asarra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date.
def earn_dividends(self, cash_dividends, stock_dividends): for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.positions[cash_dividend.instrument].earn_dividend( cash_dividend, ) try: self._unpaid_dividends[cash_dividend.pay_date].append(div_owed) except KeyError: self._unpaid_dividends[cash_dividend.pay_date] = [div_owed] for stock_dividend in stock_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend div_owed = self.positions[ stock_dividend.instrument ].earn_stock_dividend(stock_dividend) try: self._unpaid_stock_dividends[stock_dividend.pay_date].append( div_owed, ) except KeyError: self._unpaid_stock_dividends[stock_dividend.pay_date] = [ div_owed, ]
[ "def _calculate_next_dividend(self, symbols):\n yahoo_financials = YahooFinancials(symbols)\n logging.debug(\"[_calculate_next_dividend] Fetching get_exdividend_date\")\n data = yahoo_financials.get_exdividend_date()\n logging.debug(\"[_calculate_next_dividend] Finished fetching get_exdividend_date\")\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
def pay_dividends(self, next_trading_day): net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_instrument = stock_payment['payment_instrument'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the instrument if payment_instrument in self.positions: position = self.positions[payment_instrument] else: position = self.positions[payment_instrument] = Position( payment_instrument, ) position.amount += share_count return net_cash_payment
[ "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def get_cash(self):\n\n\t\tpass", "def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Force a computation of the current portfolio state.
def update_portfolio(self): if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_stats.net_value ) portfolio.positions_exposure = position_stats.net_exposure self._cash_flow(self._get_payout_total(pt.positions)) start_value = portfolio.portfolio_value # update the new starting value portfolio.portfolio_value = end_value = portfolio.cash + position_value pnl = end_value - start_value if start_value != 0: returns = pnl / start_value else: returns = 0.0 portfolio.pnl += pnl portfolio.returns = ( (1 + portfolio.returns) * (1 + returns) - 1 ) # the portfolio has been fully synced self._dirty_portfolio = False
[ "def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed.
def portfolio(self): self.update_portfolio() return self._immutable_portfolio
[ "def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override fields on ``self.account``.
def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, total_positions_value=not_overridden, total_positions_exposure=not_overridden, regt_equity=not_overridden, regt_margin=not_overridden, initial_margin_requirement=not_overridden, maintenance_margin_requirement=not_overridden, available_funds=not_overridden, excess_liquidity=not_overridden, cushion=not_overridden, day_trades_remaining=not_overridden, leverage=not_overridden, net_leverage=not_overridden, net_liquidation=not_overridden): # mark that the portfolio is dirty to override the fields again self._dirty_account = True self._account_overrides = kwargs = { k: v for k, v in locals().items() if v is not not_overridden } del kwargs['self']
[ "def fix_account(self, account):\n pass", "def account(self, account):\n\n self._account = account", "def onAccountUpdate(self, data):\n pass", "def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_accoun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed.
def __del__(self): # subarray = getattr(self, '_subarray', None) subarray = self._subarray # If the subarray is unique it will have 2 references to # it plus 1 within this method, making 3. If it has more # than 3 references to it then it is not unique. if getrefcount is not None: self._decrement_file_counter() if subarray is None or getrefcount(subarray) > 3: return else: # getrefcount has itself been deleted or is in the process # of being torn down return _partition_file = getattr(subarray, "_partition_file", None) if _partition_file is not None: # This partition contains a temporary file which is not # referenced by any other partition on this process, so if # there are no lock files present remove the file from # disk. _remove_temporary_files(_partition_file) else: try: if FileArray is not None and isinstance(subarray, FileArray): try: filename = subarray.get_filename() except Exception: filename = None if self.file_counter.get(filename, 999) <= 0: # This partition contains a non-temporary file # which is not referenced by any other # partitions, so close the file. subarray.close() except Exception: # If we're here then it is likely that FileArray has been # torn down, so just do nothing. pass # --- End: if
[ "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def __del__(self):\n if self.has_temp_file:\n logging.warning('Tem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray.
def _add_to_file_counter(self, i): # subarray = getattr(self, '_subarray', None) subarray = self._subarray if subarray is None: return try: if isinstance(subarray, FileArray) and not isinstance( subarray, CachedArray ): try: filename = subarray.get_filename() except Exception: filename = None if filename is None: return file_counter = self.file_counter # count = file_counter.get(filename, 0) # file_counter[filename] = count + i # if file_counter[filename] <= 0: count = file_counter.get(filename, 0) + i if count <= 0: # Remove the file from the dictionary if its count has # dropped to zero file_counter.pop(filename, None) else: file_counter[filename] = count except Exception: # If we're here then it is likely that FileArray has been # torn down, so just do nothing. pass
[ "def append_subint_array(self,table):\n fits_to_append = F.FITS(table)", "def iterappend(self, arrayiterable):\n if self._accessmode != 'r+':\n raise OSError(f\"Accesmode should be 'r+' \"\n f\"(now is '{self._accessmode}')\")\n if not hasattr(arrayiterable...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists.
def _configure_auxiliary_mask(self, auxiliary_mask): indices = self.indices new = [ mask[ tuple( [ (slice(None) if n == 1 else index) for n, index in zip(mask.shape, indices) ] ) ] for mask in auxiliary_mask ] # # If the partition is to be parallelised then get rid of mask # # components which are all False so the mask component does # # not get copied to the child process # if not config['serial']: # new = [mask for mask in new if not mask.any()] self.config["auxiliary_mask"] = new
[ "def hybrid_dict_mask(self, test=False, a='6', msg=msgs.m_hydi_atk):\n self.argv = self.build_args()\n mask = self.masks_file or self.mask\n if not mask:\n return\n try:\n self.argv.insert(0, mask)\n self.common_attack_pattern(test, a, msg)\n excep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if and only if the partition's subarray is in memory as opposed to on disk.
def in_memory(self): return hasattr(self._subarray, "__array_interface__")
[ "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if and only if the partition's subarray is on disk as opposed to in memory.
def on_disk(self): return isinstance(self._subarray, FileArray)
[ "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # ------------------------------------------...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The partition's subarray of data.
def subarray(self): return self._subarray
[ "def partition(self, sep):\n return asarray(partition(self, sep))", "def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)", "def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n # convert array_slice int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order.
def change_axis_names(self, axis_map): axes = self.axes # Partition axes self.axes = [axis_map[axis] for axis in axes] # Flipped axes flip = self.flip if flip: self.flip = [axis_map[axis] for axis in flip]
[ "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the
def close(self, **kwargs): config = getattr(self, "config", None) if config is None: return if kwargs: config.update(kwargs) original = getattr(self, "_original", None) logger.partitioning("Partition.close: original = {}".format(original)) if not original: originally_on_disk = False original_subarray = None else: originally_on_disk = not original.in_memory original_subarray = original._subarray config = self.config logger.partitioning(" config = {}".format(config)) if config["serial"]: # -------------------------------------------------------- # SERIAL # -------------------------------------------------------- logger.partitioning(" serial") if config["readonly"]: logger.partitioning(" readonly=True") if originally_on_disk: logger.partitioning(" subarray originally on disk") if config.get("to_disk", False): # 1.1.1.1 The original subarray was on disk, # we don't want to keep the current # subarray in memory, and we are happy # to discard any changes that may have # been made to the subarray. logger.partitioning(" 1.1.1.1 revert") self.revert() elif free_memory() <= cf_fm_threshold(): # 1.1.1.2 The original subarray was on disk, # we are happy to keep the current # subarray in memory, but there is not # enough free memory to do so. logger.partitioning( " 1.1.1.2 revert ({} <= {})".format( free_memory(), cf_fm_threshold() ) ) self.revert() else: # 1.1.1.3 The original subarray was on disk # and there is enough memory to keep # the current subarray in memory if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # The original subarray was a temporary # file which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) del self.masked logger.partitioning( " 1.1.1.3 del masked ({} > {})".format( free_memory(), cf_fm_threshold() ) ) else: logger.partitioning(" subarray originally in memory") if config.get("to_disk", False): # 1.1.2.1 Original subarray was in memory and # we don't want to keep the current # subarray in memory logger.partitioning(" 1.1.2.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.1.2.2 Original subarray was in memory and # unique but there is not enough # memory to keep the current subarray logger.partitioning(" 1.1.2.2 to_disk") self.to_disk(reopen=False) else: # 1.1.2.3 Original subarray was in memory and # unique and there is enough memory to # keep the current subarray in memory logger.partitioning(" 1.1.2.3 pass") pass else: # config['readonly'] is False if originally_on_disk: if config.get("to_disk", False): # 1.2.1.1 Original subarray was on disk and # there and we don't want to keep the # array if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # Original subarray was a temporary file # on disk which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) logger.partitioning(" 1.2.1.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.2.1.2 Original subarray was on disk but # there is not enough memory to keep # it if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # Original subarray was a temporary file # on disk which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) logger.partitioning(" 1.2.1.2 to_disk") self.to_disk(reopen=False) else: # 1.2.1.3 Original subarray was on disk and # there is enough memory to keep it logger.partitioning(" 1.2.1.3 pass") del self.masked else: if config.get("to_disk", False): # 1.2.2.1 Original subarray was in memory but # we don't want to keep it logger.partitioning(" 1.2.2.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.2.2.2 Original subarray was an in memory # but there is not enough memory to # keep it logger.partitioning(" 1.2.2.2 to_disk") self.to_disk(reopen=False) else: # 1.2.2.3 Original subarray was in memory and # there is enough memory to keep it logger.partitioning(" 1.2.2.3 del masked") del self.masked else: logger.partitioning("Partition.close: parallel") # -------------------------------------------------------- # PARALLEL # -------------------------------------------------------- pass # if hasattr(self, '_original'): # del self._original # print(hasattr(self, 'config')), try: del self.config except AttributeError: pass
[ "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def close_all(self):\n self.partition_map.close_all()", "def close(self):\n self.drill = None", "def close(self):\n self.dataset.close()", "def exit(self):\n for acc in self.to_close:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if the subarray contains datetime objects.
def isdt(self): return self.Units.isreftime and self._subarray.dtype == _dtype_object
[ "def _contains_cftime_datetimes(array) -> bool:\n # Copied / adapted from xarray.core.common\n from xarray.core.pycompat import is_duck_dask_array\n\n if cftime is None:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the file containing the subarray, if there is one.
def file_close(self): if self.on_disk: self._subarray.close()
[ "def closeFile():\r\n global datafile\r\n if datafile is not None:\r\n datafile.close()", "def close(self):\n if self.closed:\n return\n\n self.closed = True\n try:\n if self.mode in (\"a\", \"w\", \"x\"):\n self.fileobj.write(NUL * (BLOCKSIZE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterator over indices of the master array which are spanned by the data array.
def master_ndindex(self): # itermaster_indices(self): return itertools_product( *[range(*r) for r in self.location] ) # TODO check
[ "def indicesIter(self):\n \n pass", "def indices(self):", "def getArrayIndices(self):\n \n pass", "def __iter__(self):\n start = 0\n for i, dist in enumerate(self.dists):\n count = self.ndims[i]\n if count == 1:\n idx = start\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the `!part` attribute inplace for new indices of the master array.
def new_part(self, indices, master_axis_to_position, master_flip): shape = self.shape if indices == [slice(0, stop, 1) for stop in shape]: return # ------------------------------------------------------------ # If a dimension runs in the wrong direction then change its # index to account for this. # # For example, if a dimension with the wrong direction has # size 10 and its index is slice(3,8,2) then after the # direction is set correctly, the index needs to changed to # slice(6,0,-2): # # >>> a = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] # >>> a[slice(3, 8, 2)] # [6, 4, 2] # >>> a.reverse() # >>> print(a) # >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # >>> a[slice(6, 0, -2)] # [6, 4, 2] # ------------------------------------------------------------ if self._subarray.size > 1: indices = indices[:] p_flip = self.flip for axis, i in master_axis_to_position.items(): if (axis not in p_flip and axis not in master_flip) or ( axis in p_flip and axis in master_flip ): # This axis runs in the correct direction continue # Still here? Then this axis runs in the wrong # direction. # Reset the direction p_flip = p_flip[:] if axis in self.flip: p_flip.remove(axis) else: p_flip.append(axis) # Modify the index to account for the changed # direction size = shape[i] if isinstance(indices[i], slice): start, stop, step = indices[i].indices(size) # Note that step is assumed to be always +ve here div, mod = divmod(stop - start - 1, step) start = size - 1 - start stop = start - div * step - 1 if stop < 0: stop = None indices[i] = slice(start, stop, -step) else: size -= 1 indices[i] = [size - j for j in indices[i]] # --- End: for self.flip = p_flip # --- End: if slice_None = slice(None) # Reorder the new indices indices = [ ( indices[master_axis_to_position[axis]] if axis in master_axis_to_position else slice_None ) for axis in self.axes ] part = self.part if not part: self.part = indices return # Still here? update an existing part p_part = [] for part_index, index, size in zip( part, indices, self._subarray.shape ): if index == slice_None: p_part.append(part_index) continue if isinstance(part_index, slice): if isinstance(index, slice): start, stop, step = part_index.indices(size) size1, mod = divmod(stop - start - 1, step) start1, stop1, step1 = index.indices(size1 + 1) size2, mod = divmod(stop1 - start1, step1) if mod != 0: size2 += 1 start += start1 * step step *= step1 stop = start + (size2 - 1) * step if step > 0: stop += 1 else: stop -= 1 if stop < 0: stop = None p_part.append(slice(start, stop, step)) continue else: new_part = list(range(*part_index.indices(size))) new_part = [new_part[i] for i in index] else: if isinstance(index, slice): new_part = part_index[index] else: new_part = [part_index[i] for i in index] # --- End: if # Still here? Then the new element of p_part is a list of # integers, so let's see if we can convert it to a slice # before appending it. new_part0 = new_part[0] if len(new_part) == 1: # Convert a single element list to a slice object new_part = slice(new_part0, new_part0 + 1, 1) else: step = new_part[1] - new_part0 if step: if step > 0: start, stop = new_part0, new_part[-1] + 1 else: start, stop = new_part0, new_part[-1] - 1 if new_part == list(range(start, stop, step)): if stop < 0: stop = None new_part = slice(start, stop, step) # --- End: if p_part.append(new_part) # --- End: for self.part = p_part
[ "def _update_assessment_parts_map(self, part_list):\n for part in part_list:\n # perhaps look for a \"level offset\"?\n level = part._level_in_section # plus or minus \"level offset\"?\n if str(part.get_id()) not in self._part_ids():\n self._insert_part_map(ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The extra memory required to access the array.
def extra_memory(self): if not self.in_memory: # -------------------------------------------------------- # The subarray is on disk so getting the partition's data # array will require extra memory # -------------------------------------------------------- extra_memory = True else: # -------------------------------------------------------- # The subarray is already in memory # -------------------------------------------------------- config = self.config p_part = self.part if p_part: extra_memory = True elif not config["unique_subarray"]: extra_memory = True else: p_data = self._subarray if not numpy_ma_isMA(p_data): # The p_data is not a masked array extra_memory = isinstance(p_data.base, numpy_ndarray) else: # The p_data is a masked array memory_overlap = isinstance( p_data.data.base, numpy_ndarray ) if not ( p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(p_data) ): # There is at least one missing data point memory_overlap |= isinstance( p_data.mask.base, numpy_ndarray ) extra_memory = memory_overlap # --- End: if p_dtype = p_data.dtype if not extra_memory: if config["func"] is not None: extra_memory = True else: p_units = self.Units units = config["units"] if ( not p_units.equals(units) and bool(p_units) is bool(units) and not ( p_data.flags["C_CONTIGUOUS"] and p_dtype.kind == "f" ) ): extra_memory = True # ------------------------------------------------------------ # Extra memory is required if the dtype needs changing # ------------------------------------------------------------ if not extra_memory: dtype = config["dtype"] if dtype is not None and dtype != p_data.dtype: extra_memory = True # --- End: if # ------------------------------------------------------------ # Amount of extra memory (in bytes) required to access the # array # ------------------------------------------------------------ return self.nbytes if extra_memory else 0
[ "def allocated_memory(self):\n return self._allocated_memory", "def memory(self):\n return self._memory", "def get_array_size(self):\r\n return conf.lib.clang_getArraySize(self)", "def arraysize(self):\n return self._arraysize", "def MAXMEM(self):", "def memory_size(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move the partition's subarray to a temporary file on disk.
def to_disk(self, reopen=True): # try: tfa = CachedArray(self.array) # except Exception: # return False fd, _lock_file = mkstemp( prefix=tfa._partition_file + "_", dir=tfa._partition_dir ) close(fd) self.subarray = tfa _temporary_files[tfa._partition_file] = ( tfa._partition_dir, _lock_file, set(), ) if reopen: # Re-open the partition self.open(self.config) return True
[ "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(ori...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a temporary file on this rank that has been created on another rank.
def _register_temporary_file(self): _partition_file = self._subarray._partition_file _partition_dir = self._subarray._partition_dir if _partition_file not in _temporary_files: fd, _lock_file = mkstemp( prefix=_partition_file + "_", dir=_partition_dir ) close(fd) _temporary_files[_partition_file] = ( _partition_dir, _lock_file, set(), ) else: _, _lock_file, _ = _temporary_files[_partition_file] return _lock_file
[ "def register_tmp_file(self, tmp_file: str):\n self.temp_files.add(pathlib.Path(tmp_file))", "def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]", "def _upload_temp(cls, path, token, rtype):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the lock files listed in lock_files to the list of lock files managed by other ranks.
def _update_lock_files(self, lock_files): _, _lock_file, _other_lock_files = _temporary_files[ self._subarray._partition_file ] _other_lock_files.update(set(lock_files)) if _lock_file in _other_lock_files: # If the lock file managed by this rank is in the list of # lock files managed by other ranks, remove it from there _other_lock_files.remove(_lock_file)
[ "def LockFiles(self, entries):\n self._model.lock(entries)", "def addFiles(self, file_list):\n \n # Add the files to the queue\n for file_name in file_list:\n self.file_queue.put(file_name)\n \n # Write the queue to disk\n self.saveQueue()\n \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Samples a 2d function f over specified intervals and returns two arrays (X, Y) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot2d.py. f is a function of one variable, such as x2. x_args is an interval given in the form (var, min, max, n)
def sample2d(f, x_args): try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args except (TypeError, IndexError): raise ValueError("x_args must be a tuple of the form (var, min, max, n)") x_l = float(x_max - x_min) x_d = x_l/float(x_n) X = np.arange(float(x_min), float(x_max) + x_d, x_d) Y = np.empty(len(X)) for i in range(len(X)): try: Y[i] = float(f.subs(x, X[i])) except TypeError: Y[i] = None return X, Y
[ "def PlotF(f,start,stop,x_label='x axis',y_label='y axis',lab='f(x)',\\\r\n my_title='Graph',arguments=()):\r\n \r\n #initiate figure object, plot object\r\n my_fig, my_plot = pyplot.subplots();\r\n\r\n #if no additional arguments, args is an empty tuple, so just plot\r\n\r\n if arguments==...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Samples a 3d function f over specified intervals and returns three 2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot3d.py. f is a function of two variables, such as x2 + y2. x_args and y_args are intervals given in the form (var, min, max, n)
def sample3d(f, x_args, y_args): x, x_min, x_max, x_n = None, None, None, None y, y_min, y_max, y_n = None, None, None, None try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args y, y_min, y_max, y_n = y_args except (TypeError, IndexError): raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)") x_l = float(x_max - x_min) x_d = x_l/float(x_n) x_a = np.arange(float(x_min), float(x_max) + x_d, x_d) y_l = float(y_max - y_min) y_d = y_l/float(y_n) y_a = np.arange(float(y_min), float(y_max) + y_d, y_d) def meshgrid(x, y): """ Taken from matplotlib.mlab.meshgrid. """ x = np.array(x) y = np.array(y) numRows, numCols = len(y), len(x) x.shape = 1, numCols X = np.repeat(x, numRows, 0) y.shape = numRows, 1 Y = np.repeat(y, numCols, 1) return X, Y X, Y = np.meshgrid(x_a, y_a) Z = np.ndarray((len(X), len(X[0]))) for j in range(len(X)): for k in range(len(X[0])): try: Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k])) except (TypeError, NotImplementedError): Z[j][k] = 0 return X, Y, Z
[ "def frontiere_3d(f, data, step=20):\n ax = plt.gca(projection='3d')\n xmin, xmax = data[:, 0].min() - 1., data[:, 0].max() + 1.\n ymin, ymax = data[:, 1].min() - 1., data[:, 1].max() + 1.\n xx, yy = np.meshgrid(np.arange(xmin, xmax, (xmax - xmin) * 1. / step),\n np.arange(ymin, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a 2d or 3d function over specified intervals and returns a dataset suitable for plotting with matlab (matplotlib) syntax. Wrapper for sample2d and sample3d. f is a function of one or two variables, such as x2. var_args are intervals for each variable given in the form (var, min, max, n)
def sample(f, *var_args): if len(var_args) == 1: return sample2d(f, var_args[0]) elif len(var_args) == 2: return sample3d(f, var_args[0], var_args[1]) else: raise ValueError("Only 2d and 3d sampling are supported at this time.")
[ "def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
iterate through each restaurant name from restaurant names and aggregate to results
def results_aggregator(self, names): for name in names: result = self.main(name) self.results.append(result) print("'%s' has been written to the file." % result[0]) """result is formatted name, number, rating, review count"""
[ "def resolveResult(self, restaurants):\n restaurant_list = []\n for restaurant in restaurants:\n restaurant_list.append({'Name': restaurant['restaurant']['name'], \"cuisines\": [x.strip() for x in restaurant['restaurant']['cuisines'].split(',')],\n \"lat\": restaurant['restaurant...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère la date de publication d'un CVE si celleci est disponible
def parser_cve_date_publi(self,cve): try: pageCVE = opener.open(cve.get('href')) except(ssl.CertificateError) as e: return None soupCVE = BeautifulSoup(pageCVE, 'html.parser') res = soupCVE.find('strong', text=re.compile("Last Modified")) if res != None: res = res.next_sibling.next_sibling.next_sibling res = res.getText() res = re.sub("[A-Za-z0-9/\s]*: ", "", res) res = res.split("/") res = res[2] + '-' + res[0] + '-' + res[1] return res return None
[ "def receive_date(self):\n if 'v112' in self.data['article']:\n return tools.get_publication_date(self.data['article']['v112'][0]['_'])\n return None", "def acceptance_date(self):\n if 'v114' in self.data['article']:\n return tools.get_publication_date(self.data['article...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère les CVE, leurs liens vers NVD, dates de création, et les texte associées chaque vulnérabilité Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
def cve_parser(self): cves = self.soup.findAll('a', text=re.compile("CVE-")) for cve in cves: id = cve.getText() self.cve+=[id] cve_date = re.sub("CVE-","",id) cve_date = re.sub("-[0-9\s]*","",cve_date) cve_date += "-01-01" self.cve_date += [cve_date] cve_link = cve.get('href') self.cve_link += [cve_link] cve_date_publi = self.parser_cve_date_publi(cve) self.cve_date_publi += [cve_date_publi] #text = cve.parent.previous_sibling.getText() #self.cve_text += [text]
[ "def _initCvsVersion(self):\n\n output = _exec('cvs -vf')\n m = re.match(\n r'Concurrent Versions System \\(CVS\\) '\n r'(?P<numericpart>(\\d+\\.)+\\d+)(?P<rest>\\S*)'\n r' \\(client\\/server\\)',\n output[1]\n )\n if m:\n v = [i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère les vecteur CVSS, les split dans des listes ; récupère aussi les scores CVSS Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
def cvss_parser(self): cvsss = self.soup.findAll('a', text=re.compile("AV:")) for cvss in cvsss: id = cvss.getText() id = re.sub("[()]*","",id) id = id.split('/') self.cvss += [id] score = cvss.parent.getText() score = re.sub("[A-Za-z0-9\-.,;\s]* score of ","",score) score = re.sub(" has[A-Za-z0-9\-.,;\s\(\):/]*","",score) self.score += [score]
[ "def __init__(self, n):\r\n self.lcs = [LearningCurve(name=f\"cv_{i}\") for i in range(n)]", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def test_split_data_cv():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère les références CWE, leurs lien, abstraction et structures répertoriées et leurs arborescences SFP1 et SFP2 Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
def cwe_parser(self): cwes = self.soup.findAll('a', text=re.compile("CWE-")) for cwe in cwes: id = cwe.getText() id = re.sub("[A-Za-z0-9;,:\s\-\(\)\"\']* CWE-","CWE-",id) # Lien possiblement cassé try: pageCWE = opener.open(cwe.get('href')) except(urllib.error.URLError) as e: continue soupCWE = BeautifulSoup(pageCWE, 'html.parser') self.cwe += [id] # Si l'id désigne une classe de CWE : if soupCWE.find('h2', text=re.compile('CWE-')) == None: cwe_link = cwe.get('href') self.cwe_link += [cwe_link] self.abstraction += ["None"] self.structure += ["class"] self.sfp2 += ["None"] self.sfp1 += ["None"] # Si l'id désigne un CWE classique : else: cwe_link = cwe.get('href') self.cwe_link += [cwe_link] abstruct=soupCWE.find("div",text=re.compile('Weakness')).next_sibling.getText() abs = re.sub("Abstraction: ","",abstruct) abs = re.sub("Structure: [A-Za-z:\s]*","",abs) abs = re.sub(" ", "", abs) struct = re.sub("[A-Za-z:\s]*: ","",abstruct) self.abstraction += [abs] self.structure += [struct] sfp2 = soupCWE.find("a",text=re.compile('SFP Secondary')) # Possible absence de SFP2 : if sfp2 != None: addr = re.sub("/data[A-Za-z0-9/\-:.\s]*",sfp2.get('href'),cwe.get('href')) pageSFP1 = opener.open(addr) soupSFP1 = BeautifulSoup(pageSFP1, 'html.parser') sfp2 = sfp2.getText() sfp2 = re.sub("[A-Z-a-z\s]*: ","",sfp2) self.sfp2 += [sfp2] sfp1 = soupSFP1.find('a', text=re.compile('SFP Primary')) sfp1 = sfp1.getText() sfp1 = re.sub('[A-Za-z\s]*: ','',sfp1) self.sfp1 += [sfp1] else: self.sfp2 += ["None"] self.sfp1 += ["None"]
[ "def __init__(self):\n self._declarations = self.get_declarations()", "def __init__(self):\n # just the list of class/construct types\n self.lut = {}\n self.lut[\"struct\"] = structure\n self.lut[\"typedef\"] = typedef\n self.lut[\"define\"] = def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Default reducer for distinctions. Expects all distinctions to follow
def __reduce__(self): return instanceReducer(self)
[ "def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG", "def _reduce(self, action):\n assert len(self.stack) >= 2, \"ERROR: Cannot reduce with stack length less than 2\"\n \n # STUDENT\n # hint: use list.pop()\n # END STUDENT\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For conjugate distinctions this should be overridden and return the base distinctions used. For none conjugate it will automatically return an empty list.
def getBaseDistinctions(self): return []
[ "def get_conjugate_bases_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is conjugate base of\")]\n else:\n return []", "def conjugate(self):\n pass", "def conjugate(self):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a random distinction of this type than is valid for the schema config.schema and for the given graphs. This function for must take graphs as its first argument, and if its a conjugate distinction it must then take, as separate args, not a tuple,
def getRandomDistinction(config, graphs, *base_distinctions): raise AbstractMethodException(Distinction)
[ "def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an estimate of the number of different subtypes for this distinction. This is used to estimate a PDF for randomly sampling the distinction space. Examine the code of other distinctions to get a feel for how things are estimated.
def getNumberOfSubtypes(config, low_estimate=True): raise AbstractMethodException(Distinction)
[ "def test_type_distribution(self):\n np.random.seed(SEED)\n total = 100000\n tolerance = 0.02\n astro_generator = Generator(1e9, source='astrophysical')\n counts = Counter(astro_generator.get_particle_type()\n for _ in range(total))\n assert (counts[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a schema return True if this type of distinction is valid for the schema. Default is True. Should be overridden if there are any schemas a distinction is not valid for.
def isValidForSchema(schema): return True
[ "def is_a_dde_schema(self, schema):\n return schema in self.registered_dde_schemas", "def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/1014916...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Matrix multiplication of chains of square matrices
def chain_matmul_square(As): As_matmul = As while As_matmul.shape[0] > 1: if As_matmul.shape[0] % 2: A_last = As_matmul[-1:] else: A_last = None As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2]) if A_last is not None: As_matmul = torch.cat([As_matmul, A_last], dim=0) return As_matmul.squeeze(0)
[ "def matrix_chain_multiply(A: List[np.ndarray], s: List[List[int]], i: int, j: int) -> np.ndarray:\n if i == j:\n return A[i]\n if i + 1 == j:\n return np.dot(A[i], A[j])\n Ak = matrix_chain_multiply(A, s, i, s[i][j])\n Ak1 = matrix_chain_multiply(A, s, s[i][j]+1, j)\n prod = np.dot(Ak,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print Bento details by providing the bento_tag. \b
def get(bento_tag: str, output: str) -> None: # type: ignore (not accessed) bento = bento_store.get(bento_tag) if output == "path": console.print(bento.path) elif output == "json": info = json.dumps(bento.info.to_dict(), indent=2, default=str) console.print_json(info) else: info = yaml.dump(bento.info, indent=2, sort_keys=False) console.print(Syntax(info, "yaml"))
[ "def print_entity(entity):\n print 'entity.original_text:', entity.original_text\n print 'entity.display_text:', entity.display_text\n print 'entity.display_html:', entity.display_html\n print 'entity.start_index:', entity.start_index\n print 'entity.end_index:', entity.end_index", "def print_tags(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List Bentos in local store \b show all bentos saved $ bentoml list \b show all verions of bento with the name FraudDetector $ bentoml list FraudDetector
def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed) bentos = bento_store.list(bento_name) res = [ { "tag": str(bento.tag), "path": display_path_under_home(bento.path), "size": human_readable_size(calc_dir_size(bento.path)), "creation_time": bento.info.creation_time.astimezone().strftime( "%Y-%m-%d %H:%M:%S" ), } for bento in sorted( bentos, key=lambda x: x.info.creation_time, reverse=True ) ] if output == "json": info = json.dumps(res, indent=2) console.print(info) elif output == "yaml": info = yaml.safe_dump(res, indent=2) console.print(Syntax(info, "yaml")) else: table = Table(box=None) table.add_column("Tag") table.add_column("Size") table.add_column("Creation Time") table.add_column("Path") for bento in res: table.add_row( bento["tag"], bento["size"], bento["creation_time"], bento["path"], ) console.print(table)
[ "def view_command():\n list1.delete(0,END)\n for row in AppbookstoredbBACKEND.view_data():\n list1.insert(END,row)", "async def list(self, ctx: commands.Context, name: str = None):\n if not name:\n try:\n data = self.memory[ctx.guild.id]\n except KeyError:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export a Bento to an external file archive \b
def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed) bento = bento_store.get(bento_tag) out_path = bento.export(out_path) logger.info("%s exported to %s.", bento, out_path)
[ "def export_obo(path_to_file, connection=None):\n db = DbManager(connection)\n db.export_obo(path_to_export_file=path_to_file)\n db.session.close()", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a previously exported Bento archive file \b
def import_bento_(bento_path: str) -> None: # type: ignore (not accessed) bento = import_bento(bento_path) logger.info("%s imported.", bento)
[ "def import_archive(self):\n if self.archive:\n archive = IrkruTildaArchive(self.archive, material=self)\n archive.process()", "def import_into_beets(self):\n # TODO: Rework this and properly call the beets API.\n os.system(f'beet import {self.downloader.temp_path.name}'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pull Bento from a yatai server.
def pull(bento_tag: str, force: bool) -> None: # type: ignore (not accessed) yatai_client.pull_bento(bento_tag, force=force)
[ "def pull():", "def pull_from_postmaster(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push Bento to a yatai server.
def push(bento_tag: str, force: bool, threads: int) -> None: # type: ignore (not accessed) bento_obj = bento_store.get(bento_tag) if not bento_obj: raise click.ClickException(f"Bento {bento_tag} not found in local store") yatai_client.push_bento(bento_obj, force=force, threads=threads)
[ "def push(context_service: ContextService):\n cli_output: CliOutput = context_service.get_cli_output()\n cli_output.info(\"Pushing changes to remote...\")", "def push(self, obj):\n pass", "def push(self):\n logger.debug('PUSHING...')\n self._rest()\n self._trigger(self.STATE.PU...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a new Bento from current directory.
def build(build_ctx: str, bentofile: str, version: str) -> None: # type: ignore (not accessed) if sys.path[0] != build_ctx: sys.path.insert(0, build_ctx) build_bentofile(bentofile, build_ctx=build_ctx, version=version)
[ "def newproject(self):\n \n self.path = os.path.join(self.base, self.name)\n subpath = os.path.join(self.path, self.lowname)\n check_build_path(subpath)\n \n for filename, content in self.files.items():\n self.buildfile(filename, content, self.path)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value. (And calls the base class) This will also check for Options to set the bools. FAULTS_ACTIVE FAULTS_CURRENT >>> BIT_FAULT_PROBE = 0 >>> BIT_FAULT_OVERTEMP = 1 >>> BIT_FAULT_PANEL_OPEN = 2 >>> BIT_FAULT_HIGH_VOLTAGE = 3 >>> BIT_FAULT_RAM_CRC = 4 >>> BIT_FAULT_EEPROM_CRC = 5 >>> BIT_FAULT_GPIO_ERROR = 6 >>> BIT_FAULT_LTFAULT_ERROR = 7 >>> BIT_FAULT_TRIGGER_ERROR = 8 >>> BIT_FAULT_HARDWARE_EXC = 9 >>> BIT_FAULT_TRIGGER_GLITCH = 10 >>> BIT_FAULT_OVERVOLTAGE = 11 >>> BIT_FAULT_TEMP_SENSOR = 12
def set_value(self, item, value): super(t_16_Bit_Options, self).set_value(item, value) if(item == t_16_Bit_Options.FAULT_ACTIVE): self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX ) if(item == t_16_Bit_Options.FAULT_LATCHED): self.set_bools(value, self.faults_latched, t_16_Bit_Options.BIT_FAULT_MAX )
[ "def set(self, value): # interface for BlueSky plans\n if str(value).lower() not in (\"fly\", \"taxi\", \"return\"):\n msg = \"value should be either Taxi, Fly, or Return.\"\n msg + \" received \" + str(value)\n raise ValueError(msg)\n\n if self.busy.value:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value. (And calls the base class) This will also check for Options to set the bools. BOOLEAN_CONFIG_1 >>> BIT_PROBE_TERMINATION = 0 >>> BIT_TMODE = 1 >>> BIT_EMODE = 2 >>> BIT_MUTE = 3 >>> BIT_PATTERN_TRIGGER = 4 >>> BIT_DEBUG_REALTIME = 5 >>> BIT_DEBUGPRINT = 6 >>> BIT_DEBUG_HW_OVERRIDE = 7
def set_value(self, item, value): super(t_8_Bit_Options, self).set_value(item, value) if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1): self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX)
[ "def setbool(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_bool(value)\n self.lib.AT_SetBool(self.AT_H, command, value)", "def set_bool_node_value(node_name,value):\n\n\timport Mgmt\n code, msg = Mgmt.set((node_name,'bool', value))\n return code,m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a command packet
def build_command_packet(self, command): packet = bytearray() # All option fields are 0 packet.append(0) packet.append(0) packet.append(0) packet.append(command) return packet
[ "def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardwar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will get the current faults on the system.
def get_faults_current(self): request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16) return self.__get_faults_list(self.config_16.faults_current)
[ "def get_faults(self):\n status = self.get_status()\n return [k for k in status if k.endswith('_FAULT') and status[k]]", "def get_faults(self):\n try:\n faults_parent = self.rootelement.findall(\"{\"+self.xmlns+\"}Faults\")[0]\n self.faults = faults_parent.findall(\"{\"+...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will get the latched faults on the system.
def get_faults_latched(self): request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16) return self.__get_faults_list(self.config_16.faults_latched)
[ "def get_faults(self):\n status = self.get_status()\n return [k for k in status if k.endswith('_FAULT') and status[k]]", "def get_faults_current(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the pattern wave pat_wave 101011110011 .... >>> Request >>> 0> >>> Pattern Wave [More to follow] >>> >> Request Next block >>> 0> >>> Pattern Wave [More to follow] >>> >> >>> ..... >>> >>> Request Next block >>> 0> >>> Pattern Wave [No More to follow] >>> <)
def __request_pat_wave(self, r_number): packet = bytearray() packet.append(0) # 16 bit options packet.append(0) # 8 bit options packet.append(1) # Request the 1 option # --------------------------------------------------------------------- # Request the variable length options. pattern wave. packet.append(0x01 << t_var_size_Options.PATTERN_WAVE) # --------------------------------------------------------------------- # Packets to follow packet.append(r_number) # --------------------------------------------------------------------- # Length of the bytes to follow packet.append(0) rval = self.interact_with_shouter(packet) if rval != False: return rval return []
[ "def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")", "def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n except wave.Error, err:\n err.message += \"\\nInvalid wave file: %s\" % self\n err.args = (err...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The resource ID of the Network Fabric l3IsolationDomain.
def l3_isolation_domain_id(self) -> pulumi.Input[str]: return pulumi.get(self, "l3_isolation_domain_id")
[ "def l3_isolation_domain_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")", "def l3_id(self):\n return self._l3_id", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def cluster_resource_id(self) -> str:\n return pulu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
def interface_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "interface_name")
[ "def get_default_iface_name():\n return netifaces.gateways()['default'][netifaces.AF_INET][1]", "def interface_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"interface_name\")", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The name of the L3 network.
def l3_network_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "l3_network_name")
[ "def name(self) -> str:\n return self.__configuration['network']['name']", "def computer_network_name(self) -> str:\n return self._computer_network_name", "def network_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_instance_name\")", "def name(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing L3Network resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'L3Network': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = L3NetworkArgs.__new__(L3NetworkArgs) __props__.__dict__["associated_resource_ids"] = None __props__.__dict__["cluster_id"] = None __props__.__dict__["detailed_status"] = None __props__.__dict__["detailed_status_message"] = None __props__.__dict__["extended_location"] = None __props__.__dict__["hybrid_aks_clusters_associated_ids"] = None __props__.__dict__["hybrid_aks_ipam_enabled"] = None __props__.__dict__["hybrid_aks_plugin_type"] = None __props__.__dict__["interface_name"] = None __props__.__dict__["ip_allocation_type"] = None __props__.__dict__["ipv4_connected_prefix"] = None __props__.__dict__["ipv6_connected_prefix"] = None __props__.__dict__["l3_isolation_domain_id"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["system_data"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_machines_associated_ids"] = None __props__.__dict__["vlan"] = None return L3Network(resource_name, opts=opts, __props__=__props__)
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Layer':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = LayerArgs.__new__(LayerArgs)\n\n __props__.__dict__[\"attributes\"]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The list of resource IDs for the other Microsoft.NetworkCloud resources that have attached this network.
def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]: return pulumi.get(self, "associated_resource_ids")
[ "def resource_ids(self):\n return self._resource_ids", "def network_ids(self):\n return self._network_ids", "def get_resource_identifiers(self):\n return self.__resourceIdentifiers", "def resource_share_ids(self):\n return self._resource_share_ids", "def resource_list(self):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The more detailed status of the L3 network.
def detailed_status(self) -> pulumi.Output[str]: return pulumi.get(self, "detailed_status")
[ "def status(ctx):\n return show_network_status()", "def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res", "def status(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The extended location of the cluster associated with the resource.
def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']: return pulumi.get(self, "extended_location")
[ "def cluster_extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"cluster_extended_location\")", "def cluster_location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_location\")", "def cluster_location(self) -> Option...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
def interface_name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "interface_name")
[ "def get_default_iface_name():\n return netifaces.gateways()['default'][netifaces.AF_INET][1]", "def interface_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interface_name\")", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The type of the IP address allocation, defaulted to "DualStack".
def ip_allocation_type(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "ip_allocation_type")
[ "def ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address_type\")", "def _get_address_type(self):\n return self.__address_type", "def IpType(self):\n\t\treturn self._get_attribute('ipType')", "def address_type(self):\n return self._address_type", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The resource ID of the Network Fabric l3IsolationDomain.
def l3_isolation_domain_id(self) -> pulumi.Output[str]: return pulumi.get(self, "l3_isolation_domain_id")
[ "def l3_isolation_domain_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")", "def l3_id(self):\n return self._l3_id", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def cluster_resource_id(self) -> str:\n return pulum...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
join the input string
def my_join(iters, string): out = "" for i in range(iters): out += "," + string return out
[ "def join(self, iterable): # real signature unknown; restored from __doc__\n return \"\"", "def join(self, iterable) -> String:\n pass", "def join_strings(words):\n joined_string = ''\n for word in words:\n joined_string += word\n\n return joined_string", "def my_join(iters, string):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2.
def _get_updated_endpoints(original_end_points, name): end_points = dict(original_end_points) end_points['logits'] = tf.squeeze(end_points[name], [1, 2]) end_points['probs'] = tf.nn.softmax(end_points['logits']) return end_points
[ "def extend_network_dict(self, session, base_model, result):\n self._call_on_dict_driver(\"extend_network_dict\", session, base_model,\n result)", "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
[ "def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)", "def load_weights(model, checkpoint_path):\n # Your code here\n \n model.load_state_dict(torch.load(checkpoint_path))\n model.eval()", "def load_weights(self, path=None):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
[ "def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)", "def load_weights(model, checkpoint_path):\n # Your code here\n \n model.load_state_dict(torch.load(checkpoint_path))\n model.eval()", "def load_weights(self, path=None):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Warn about unused static variables.
def _find_unused_static_warnings(filename, lines, ast_list): static_declarations = { node.name: node for node in ast_list if (isinstance(node, ast.VariableDeclaration) and 'static' in node.type.modifiers) } def find_variables_use(body): for child in body: if child.name in static_declarations: static_use_counts[child.name] += 1 static_use_counts = collections.Counter() for node in ast_list: if isinstance(node, ast.Function) and node.body: find_variables_use(node.body) elif isinstance(node, ast.Class) and node.body: for child in node.body: if isinstance(child, ast.Function) and child.body: find_variables_use(child.body) for name in sorted(static_declarations): if not static_use_counts[name]: print("{}:{}: unused variable '{}'".format( filename, lines.get_line_number(static_declarations[name].start), name))
[ "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the parsed contents of the config file.
def get_config(): return json.loads(CONFIG_FILE.read_text())
[ "def read(self):\n if self.default_file:\n self.read_default_config()\n return self.read_config_files(self.all_config_files())", "def read_config():\n\n\tfilename = \"config.json\"\n\n\tfile_object = open(filename, \"r\")\n\n\treturn json.loads(file_object.read())", "def _get_config(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
seed users. by defualt set to 5 users
def seed_User(number=5, overwrite=False): if overwrite: print('Overwriting all users') User.objects.all().delete() count = 0 for i in range(number): username = fake.first_name() User.objects.create_user( email=username + "@blogmail.com", password="vns12345", name=username, date_joined=datetime.datetime.now(), is_active=1, is_superadmin=0, avatar='', is_staff=1 ) count += 1 percent_complete = count / number * 100 print( "Adding {} new Users: {:.2f}%".format( number, percent_complete), end='\r', flush=True ) print()
[ "def populate(self, nbUsers):\n users = []\n f = faker.Faker()\n\n for i in range(nbUsers):\n user, addr = self.create_user(f.name(), f.address())\n users.append(user)\n\n self.session.add_all(users)\n self.session.commit()", "def generate_users(count=10):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_score increments the score by change can be negative
def set_score(self, change): self._score = self._score + change
[ "def set_score(self, score: float):\n self.score = score", "def set_score(self, score):\n self._score = score", "def update_score():\n pass", "def increase_score(self, score):\r\n self.score += score", "def set_score(self, score):\n self.score_function = score", "def sco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS
def move_ray(self, ray): # look to the next spot in the ray's trajectory next_coordinates = ray.get_next_location() next_location = self._board.get_board_square(next_coordinates) # check for a collisition - return if it occurs if ray.check_for_collision(next_location): return # if we didn't collide as we moved we need to look to check our # diagonals for atoms ccw_diag_coordinates, cw_diag_coordinates = ray.get_diagonals() ccw_diagonal = self._board.get_board_square(ccw_diag_coordinates) cw_diagonal = self._board.get_board_square(cw_diag_coordinates) if ccw_diagonal.is_atom() or cw_diagonal.is_atom(): # If we're on our first move and the immediately diagonals contain an atom we have a reflection if ray.get_current_location() == ray.get_origin_location(): terminal_square = self._board.get_board_square( ray.get_current_location()) # let's the ray know it's finished and the square that it's an endpoint # self.end_ray(ray, terminal_square) return ray.record_edge_collision(terminal_square) # otherwise they cause a bend in the path else: # we have to calculate our trajectory based on the pull # of the atoms in our path ray.recalculate_trajectory(ccw_diagonal, cw_diagonal) # get the coordinates of the next location in our new trajectory next_coordinates = ray.get_next_location() # determine the next coordinate will result in a collision - return if it would if ray.check_for_collision( self._board.get_board_square(next_coordinates)): return # move the ray to the next step forward in its current trajectory ray.set_current_location(next_coordinates) # finally, recursively call our current function from the next step in its path. self.move_ray(ray)
[ "def shoot_ray(self, row, column):\n # check if row/column is an allowed entry point\n if (row, column) not in self._allowed_entry_points:\n return False\n\n # add entry to entry/exit point list and deduct point if entry hasn't already been used\n if (row, column) not in self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
shoot_ray shoots a ray from a given row and column if possible
def shoot_ray(self, origin_row, origin_column): # get the the square object at row x column origin = self._board.get_board_square((origin_row, origin_column)) # check that it is a valid "edge" to send a ray from origin_check = origin.is_edge() # if it's not then return false if origin_check == False: return False # if we pass the origin check create shoot a new Ray.Ray object from row x column new_ray = Ray.Ray(origin_row, origin_column) # let the square we shot from know its an orign square origin.set_originating_ray(new_ray) # Deduct 1 from the score since we now have on exit point self.set_score(-1) # while the ray object has a direction (will be set to none when it reaches an endpoint) # send it to the helper function that will move it while new_ray.get_direction() != None: self.move_ray(new_ray) # if we hit an exit point (other than through reflection) deduct the point for that terminus = new_ray.get_terminal_location() # check the the terminal point is an edge (hitting an atom returns none as terminus) if terminus != None: # check that the terminus is not a reflection, which shouldn't be counted twice terminal_square = self._board.get_board_square(terminus) terminal_square.set_terminating_ray(new_ray) if terminus != (origin_row, origin_column): self.set_score(-1) return terminus
[ "def shoot_ray(self, row, column):\n\n # check if ray is being shot from corner square\n if (row == 0 or row == 9) and (column == 0 or column == 9):\n return False\n\n # check if ray is being shot from non-border square\n if row in range(1, 9) and column in range(1, 9):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
atoms_left returns the number of unguessed atoms still left
def atoms_left(self): return len(self._atoms)
[ "def atoms_left(self):\n return self._atoms_remaining", "def _get_mark_count_left(self):\n return self._number_of_bombs - sum([sum([1 for c in row if c.is_marked]) for row in self._cells])", "def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces", "def get_num_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test GRU gnmt encoder. time_major=True
def runGRUEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, sequence_inputs=inputs_ph, sequence_length=inputs_length_ph) num_bi_layers = 1 num_uni_layers = num_layers - num_bi_layers if num_uni_layers == 1: states_bi_bw, states_uni = states # states_bi_bw = (states_bi_bw,) self.assertEqual(1, len(states_bi_bw)) self.assertEqual(num_uni_layers, len(states_uni)) # unlike lstm, whose states is a tuple of (c,h), # gru states has only one element # states_bi_bw[0] is a states tensor states_list = [states_bi_bw[0]] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) else: states_uni = states self.assertEqual(num_uni_layers, len(states_uni)) states_list = [] for i in range(num_uni_layers): states_list.append(states_uni[i]) states = tf.convert_to_tensor(states_list) inputs, inputs_length = common_utils.get_encoder_test_inputs() with self.test_session() as sess: sess.run(tf.global_variables_initializer()) outputs, states = sess.run( [outputs, states], feed_dict={ inputs_ph: inputs, inputs_length_ph: inputs_length }) self.assertAllEqual( [common_utils.TIME_STEPS, common_utils.BATCH_SIZE, common_utils.DEPTH], outputs.shape) if num_uni_layers == 1: self.assertEqual(num_layers, len(states)) self.assertAllEqual( [num_layers, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape) else: self.assertEqual(num_uni_layers, len(states)) self.assertAllEqual( [num_uni_layers, common_utils.BATCH_SIZE, common_utils.DEPTH], states.shape)
[ "def test_agilent_2d_rnmrtk():\n # prepare agilent converter\n vdic, vdata = ng.varian.read(os.path.join(DATA_DIR, \"agilent_2d\"))\n uvdic = ng.varian.guess_udic(vdic, vdata)\n vC = ng.convert.converter()\n vC.from_varian(vdic, vdata, uvdic)\n\n # prepare rnmrtk converter\n rdic, rdata = ng.rn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database # Get dict mapping courses to unitary weights unitary_dict = db.get_unitary_dict(session) # Get dict mapping courses to adjacent courses and weights edge_dict = db.get_edges_dict(session) # Create CourseNodes for courseid in unitary_dict: courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid]) self._nodes[courseid] = courseNode # Create course edge dict for each CourseNode for courseid in edge_dict: node = self._nodes[courseid] # get node of interest adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight} for otherid in adj_courses: other_node = self._nodes[otherid] node.addEdge(other_node, adj_courses[otherid])
[ "def build_computational_graph():\n pass", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the crosslistings of the top edges from a course
def getTopEdgesFrom(self, session, courseid): node = self.getNode(courseid) # get CourseNode if not node: return [] edges = node.getEdges() # get its Edge dict return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5]
[ "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes turtle instance for turtle game.
def initialize(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_instance = turtle.Turtle() turtle_instance.shape(turtle_shape) turtle.bgcolor(bg_color) turtle_instance.color(turtle_color) turtle_instance.speed(turtle_speed) return turtle_instance
[ "def init_turtle():\n turtle.up()\n turtle.home()", "def __init__(self):\r\n turtle.setup()\r\n turtle.screensize(100000, 100000)\r\n self.__risi_pot = turtle.Turtle()\r\n self.__risi_prijatelje = turtle.Turtle()\r\n self.__risi_pot.color('red')\r\n self.__risi_pot....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the turtle movement for the initialized turtle instance and executes that movement.
def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_name = initialize(turtle_shape, bg_color, turtle_color, turtle_speed) for i in range(36): for i in range(4): turtle_name.forward(200) turtle_name.right(90) turtle_name.right(10)
[ "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def movement(self):", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves summary statistics as a csv file in the current directory and returns the output filename.
def save_summary_statistics_csv( experiment_name, roi_summary_data, save_directory_path: str = "" ): # Create directories on the path if they don't already exist Path(save_directory_path).mkdir(parents=True, exist_ok=True) csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime_for_filename(datetime.now())}).csv" csv_filepath = Path(save_directory_path) / csv_filename roi_summary_data.to_csv(csv_filepath, index=False) print(f"Summary statistics saved to: {csv_filepath}\n") return csv_filepath
[ "def save_csv(dir_out, no_of_files, result):\n try:\n np.savetxt(f\"{dir_out}_results_from_{no_of_files}-files.csv\",\n result.T, delimiter=\",\", header='Time(h), Avrg_int, SD, SE, Sum_int, Max_int')\n except:\n print(\"Existing csv file is not accessible!\")\n exit()",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index
def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame: return pd.concat(dataframes).reset_index(drop=True)
[ "def split_and_stack(df,new_names):\n\n half = int(len(df.columns)/2)\n left = df.iloc[:, :half]\n right = df.iloc[:,half:]\n\n return pd.DataFrame(data = np.vstacks([left.values, right.values], columns = new_names))", "def make_sub_df(src_df, index_col, cols):\n cols.append(index_col)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stack pandas Series logically into a DataFrame
def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame: return pd.concat(serieses, axis="columns").T
[ "def stack(*series):\n _timeseriescompat_multiple(*series)\n return time_series(MA.column_stack(series), series[0]._dates,\n **_attrib_dict(series[0]))", "def series_to_frame(series: pd.Series, new_col_names: Dict[Any, Any]) -> pd.DataFrame:\n return series.to_frame().reset_index()....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load instruments from configpath
def _load(self) -> list[Instrument]: logger.info("Loading config...") self._config = yml.load(self.configpath) instruments, modespec = self._config["instruments"], self._config["modes"] logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes")
[ "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expose unique instrument classes found in config
def _expose(self) -> None: classes = {instrument.__class__ for instrument in self._config["instruments"]} for class_ in classes: pyro.expose(class_) logger.success(f"Exposed {len(classes)} instrument class(es): {classes}")
[ "def config(self) -> InstrumentConfig:\n ...", "def configure_instrumented_models(self):\n # Expose Pyramid configuration to classes\n from websauna.system.model.meta import Base\n Base.metadata.pyramid_config = self.config", "def instrument_configs(self) -> list:\n from .rss ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register instrument instances and self with daemon and storing uris
def _serve(self) -> None: for instrument in self._config["instruments"]: uri = self._daemon.register(instrument, objectId=str(instrument)) self._services[instrument.id] = str(uri) logger.success(f"Registered {instrument} at {uri}") self.uri = self._daemon.register(self, objectId=self.servername) logger.success(f"Registered self at {self.uri}")
[ "def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)", "def _Register(self):\r\n self._persistor.AddHandler(self)", "def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instrumen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disconnect instruments and shutdown daemon
def shutdown(self) -> None: logger.info("Disconnecting instruments...") for instrument in self._config["instruments"]: instrument.disconnect() logger.info(f"Shutting down {self}...") self._daemon.shutdown()
[ "def shutdown(self):\n os.remove('/tmp/mimic_daemon')\n for address, p in self._connections.iteritems():\n if not p.returncode:\n p.terminate()\n self.daemon.shutdown()", "def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Terminatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py
def test_lineno_failcase_called_code(): text = _run_case(utils.codeblock( r''' def func(a): """ Example: >>> func(0) >>> # this doesnt do anything >>> print('this passes') this passes >>> # call the failing code >>> func(3) """ if a > 0: nested_failure(a) return a def nested_failure(a): if a > 0: nested_failure(a - 1) else: raise Exception('fail case') ''')) assert 'rel: 6, abs: 9,' in text assert text
[ "def test_expected_failures(modpath, expected_failure):\n code = os.path.dirname(expected_failure)\n retcode, out = flake8(join(modpath, expected_failure))\n assert retcode, \"expected failure (%s), got success\" % code\n needle = \": %s \" % code\n assert needle in out\n\n with open(os.path.join(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add to the list of describing adjectives.
def add_adjectives(self, adjective): self.adjectives += [adjective]
[ "def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)", "def add_adnotation(self, adnotation):\n adnotations = list([self.decoding_dict[v] for v in self.get_item_list()])\n adnotations.append(adnotation)\n adnotations.sort(key = lambda item: item.pos)\n self.decodi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective.
def get_adjectives(self): random.shuffle(self.adjectives) return self.adjectives
[ "def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives", "def adjectives_sorted(lyrics):\n adjectives = get_adjectives(lyrics)\n sorted_adjectives = Counter(adjectives)\n return sorted_adjective...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the noun, including all its describing adjectives, as a string.
def full_string(self): return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives]))
[ "def sentence():\r\n return nounPhrase() + \" \" + verbPhrase()", "def getNouns(self):\n return self.nouns", "def nounPhrase():\r\n return random.choice(articles) + \" \" + random.choice(nouns)", "def replaceNouns(self):\n textacy.extract.named_entities\n return self.sentence", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a noun object from a data file containing nouns and their describing adjectives.
def parse(text): parts = text.split(' ') noun = Noun(parts[0], int(parts[1])) parts = parts[2:] while len(parts) > 0: noun.add_adjectives(Word(parts[0], int(parts[1]))) parts = parts[2:] return noun
[ "def extractNouns(filepath, debug=False):\n try:\n text = open(filepath).read()\n except:\n print(\"No such file found. Aborting...\")\n exit()\n \n is_noun = lambda pos: pos[:2] == 'NN'\n # do the nlp stuff\n tokenized = nltk.word_tokenize(text)\n nouns = [word for (word, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the self.guessed_by and self.metaphors_used data as a readable string.
def get_str_metadata(self): return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)])
[ "def to_strings(self):\n str1 = \"Matches: {0}\".format(self.matches)\n str2 = \"Inliers: {0}\".format(self.inliers)\n str3 = \"Inlier ratio: {0:.2f}\".format(self.ratio)\n str4 = \"Keypoints: {0}\".format(self.keypoints)\n str5 = \"FPS: {0:.2f}\".format(self.fps)\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Quick plot of a `tick.base.TimeFunction`
def plot_timefunction(time_function, labels=None, n_points=300, show=True, ax=None): if ax is None: fig, ax = plt.subplots(1, 1, figsize=(4, 4)) else: show = False if time_function.is_constant: if labels is None: labels = ['value = %.3g' % time_function.border_value] t_values = np.arange(10).astype('float') ax.plot(t_values, time_function.value(t_values), label=labels[0]) else: if labels is None: interpolation_to_legend = { TimeFunction.InterLinear: 'Linear', TimeFunction.InterConstLeft: 'Constant on left', TimeFunction.InterConstRight: 'Constant on right' } border_to_legend = { TimeFunction.Border0: 'border zero', TimeFunction.BorderConstant: 'border constant at %.3g' % time_function.border_value, TimeFunction.BorderContinue: 'border continue', TimeFunction.Cyclic: 'cyclic' } labels = [ 'original points', '%s and %s' % (interpolation_to_legend[time_function.inter_mode], border_to_legend[time_function.border_type]) ] original_t = time_function.original_t if time_function.border_type == TimeFunction.Cyclic: cycle_length = original_t[-1] original_t = np.hstack((original_t, original_t + cycle_length, original_t + 2 * cycle_length)) t_values = _extended_discrete_xaxis(original_t, n_points=n_points) ax.plot(time_function.original_t, time_function.original_y, ls='', marker='o', label=labels[0]) ax.plot(t_values, time_function.value(t_values), label=labels[1]) ax.legend() if show is True: plt.show() return ax.figure
[ "def test_plot_time_data():\n fig, ax = GlobalData.plot_time_data(timeStart=-1e-3, timeEnd=1e-3, units='ms', show_fig=False)\n return fig", "def cistime_py():\n timing.plot_scalings(compare='python')", "def plotTime(data,rate):\n t = np.arange(len(data))*1.0/rate\n \n #Plot time domain\n pl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates mapping from water measurements column names to indices of the given header.
def get_water_index_map(archive, header): column_re = { 'surface': { 'flow': 'pretok', 'level': 'vodostaj' }, 'ground': { 'altitude': 'nivo', 'level': 'vodostaj' } } column_map = {key: -1 for key in column_re[archive].keys()} empty = True # Do regex search of every db column for every CSV file column heading. for i, column in enumerate(header): for column_name in column_re[archive].keys(): if re.search(column_re[archive][column_name], column, re.IGNORECASE): if column_map[column_name] != -1: continue column_map[column_name] = i empty = False return None if empty else column_map
[ "def indices(header):\n return dict((n,i) for i,n in enumerate(header))", "def _create_field_header_index_dictionary(header):\n field_header_index_dict = {}\n for name in header:\n if name in call_data_field_dict.keys():\n field_header_index_dict[name] = header.index(name)\n\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }