code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _chk_docunknown(args, exp): unknown = [] for arg in args: if arg[:2] == '--': val = arg[2:] if val not in exp: unknown.append(arg) elif arg[:1] == '-': val = arg[1:] if val not in exp: unknown.append(arg) if '-h' in unknown or '--help' in unknown: return [] return unknown
Return any unknown args.
def _copy_from(self, rhs): self._manager = rhs._manager self._rlist = type(rhs._rlist)(rhs._rlist) self._region = rhs._region self._ofs = rhs._ofs self._size = rhs._size for region in self._rlist: region.increment_client_count() if self._region is not None: self._region.increment_client_count()
Copy all data from rhs into this instance, handles usage count
def predict(self,param_dict): encoder_dict = self._designmatrix_object.encoder X, col_names = self._designmatrix_object.run_encoder(param_dict, encoder_dict) Y_pred = self._compute_prediction(X) return Y_pred
predict new waveforms using multivar fit
def _from_dict(cls, _dict): args = {} if 'nodes_visited' in _dict: args['nodes_visited'] = [ DialogNodesVisited._from_dict(x) for x in (_dict.get('nodes_visited')) ] if 'log_messages' in _dict: args['log_messages'] = [ DialogLogMessage._from_dict(x) for x in (_dict.get('log_messages')) ] if 'branch_exited' in _dict: args['branch_exited'] = _dict.get('branch_exited') if 'branch_exited_reason' in _dict: args['branch_exited_reason'] = _dict.get('branch_exited_reason') return cls(**args)
Initialize a MessageOutputDebug object from a json dictionary.
def export_to_xml(self, block, xmlfile): root = etree.Element("unknown_root", nsmap=XML_NAMESPACES) tree = etree.ElementTree(root) block.add_xml_to_node(root) for aside in self.get_asides(block): if aside.needs_serialization(): aside_node = etree.Element("unknown_root", nsmap=XML_NAMESPACES) aside.add_xml_to_node(aside_node) block.append(aside_node) tree.write(xmlfile, xml_declaration=True, pretty_print=True, encoding='utf-8')
Export the block to XML, writing the XML to `xmlfile`.
def install(python=PYTHON): local( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build".format( library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python, )) local("sudo {python} setup.py install".format(python=python))
Install into site-packages
def content_id(self) -> Optional[UnstructuredHeader]: try: return cast(UnstructuredHeader, self[b'content-id'][0]) except (KeyError, IndexError): return None
The ``Content-Id`` header.
def _get_host(name, array): host = None for temp in array.list_hosts(): if temp['name'] == name: host = temp break return host
Private function to check host
def _set_default_refdata(): global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA try: GRAPHTABLE = _refTable(os.path.join('mtab','*_tmg.fits')) COMPTABLE = _refTable(os.path.join('mtab','*_tmc.fits')) except IOError as e: GRAPHTABLE = None COMPTABLE = None warnings.warn('No graph or component tables found; ' 'functionality will be SEVERELY crippled. ' + str(e)) try: THERMTABLE = _refTable(os.path.join('mtab','*_tmt.fits')) except IOError as e: THERMTABLE = None warnings.warn('No thermal tables found, ' 'no thermal calculations can be performed. ' + str(e)) PRIMARY_AREA = 45238.93416 set_default_waveset()
Default refdata set on import.
def comma_separated_list(self, node, subnodes): for item in subnodes: position = (item.last_line, item.last_col) first, last = find_next_comma(self.lcode, position) if first: node.op_pos.append(NodeWithPosition(last, first))
Process comma separated list
def _find_usage_environments(self): environments = self.conn.describe_environments() self.limits['Environments']._add_current_usage( len(environments['Environments']), aws_type='AWS::ElasticBeanstalk::Environment', )
find usage for ElasticBeanstalk environments
def solve_tuple(expr, vars): result = tuple(solve(x, vars).value for x in expr.children) return Result(result, ())
Build a tuple from subexpressions.
def write(self, file): render( self.template, file, benchmarks=self.benchmarks, hostname=socket.gethostname(), )
Write YAML campaign template to the given open file
def im_messages_others(self, room_id, **kwargs): return self.__call_api_get('im.messages.others', roomId=room_id, kwargs=kwargs)
Retrieves the messages from any direct message in the server
def DEFINE_integer(self, name, default, help, constant=False): self.AddOption( type_info.Integer(name=name, default=default, description=help), constant=constant)
A helper for defining integer options.
def dicomdir_info(dirpath, *args, **kwargs): dr = DicomReader(dirpath=dirpath, *args, **kwargs) info = dr.dicomdirectory.get_stats_of_series_in_dir() return info
Get information about series in dir
def setup_auth(**keys): auth = tweepy.OAuthHandler(consumer_key=keys['consumer_key'], consumer_secret=keys['consumer_secret']) auth.set_access_token( key=keys.get('token', keys.get('key', keys.get('oauth_token'))), secret=keys.get('secret', keys.get('oauth_secret')) ) return auth
Set up Tweepy authentication using passed args or config file settings.
def list_slack(): try: token = os.environ['SLACK_TOKEN'] slack = Slacker(token) response = slack.channels.list() channels = response.body['channels'] for channel in channels: print(channel['id'], channel['name']) print() response = slack.users.list() users = response.body['members'] for user in users: if not user['deleted']: print(user['id'], user['name'], user['is_admin'], user[ 'is_owner']) print() except KeyError as ex: print('Environment variable %s not set.' % str(ex))
List channels & users in slack.
def deprecated(function, instead): if not isinstance(function, types.FunctionType): return function @wraps(function) def wrap(*args, **kwargs): warnings.warn("Deprecated, use %s instead" % instead, PyGIDeprecationWarning) return function(*args, **kwargs) return wrap
Mark a function deprecated so calling it issues a warning
def estimateabundance(self): logging.info('Estimating abundance of taxonomic groups') for i in range(self.cpus): threads = Thread(target=self.estimate, args=()) threads.setDaemon(True) threads.start() with progressbar(self.runmetadata.samples) as bar: for sample in bar: try: if sample.general.combined != 'NA': sample.general.abundance = sample.general.combined.split('.')[0] + '_abundance.csv' if not sample.commands.datastore: sample.commands = GenObject() sample.commands.target = self.targetcall sample.commands.classify = self.classifycall sample.commands.abundancecall = \ 'cd {} && ./estimate_abundance.sh -D {} -F {} > {}'.format(self.clarkpath, self.databasepath, sample.general.classification, sample.general.abundance) self.abundancequeue.put(sample) except KeyError: pass self.abundancequeue.join()
Estimate the abundance of taxonomic groups
def slicenet_params1_noam(): hparams = slicenet_params1() hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 1.0 hparams.learning_rate_warmup_steps = 4000 hparams.initializer = "uniform_unit_scaling" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 return hparams
Version with Noam's decay scheme.
def _decode(value): if value.isdigit(): return int(value) if isinstance(value, bytes): return value.decode('utf-8') else: return value
decode byte strings and convert to int where needed
def rec(self): try: self._snapshot() except Exception as e: self.log("Timer error: ", e, type(e), lvl=error)
Records a single snapshot
def display_variogram_model(self): fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.lags, self.semivariance, 'r*') ax.plot(self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), 'k-') plt.show()
Displays variogram model with the actual binned data.
def _rlfunc(rl,lz,pot): thisvcirc= vcirc(pot,rl,use_physical=False) return rl*thisvcirc-lz
Function that gives rvc-lz
def image_channel_embeddings_bottom(x, model_hparams, vocab_size): del vocab_size inputs = tf.to_int32(x) io_depth = model_hparams.num_channels tshape = common_layers.shape_list(inputs) hidden_size = model_hparams.hidden_size target_embeddings = cia.get_channel_embeddings( io_depth, inputs, hidden_size, "input_bottom") return tf.reshape(target_embeddings, [tshape[0], tshape[1], tshape[2] * io_depth, hidden_size])
Bottom transformation for image targets.
def update_value(self, key, value): if key == "Status": self._inhibited = value != "Enabled" elif key == "Color temperature": self._temperature = int(value.rstrip("K"), 10) elif key == "Period": self._period = value elif key == "Brightness": self._brightness = value elif key == "Location": location = [] for x in value.split(", "): v, d = x.split(" ") location.append(float(v) * (1 if d in "NE" else -1)) self._location = (location)
Parse key value pairs to update their values
def register_lsp_server_settings(self, settings, language): self.lsp_editor_settings[language] = settings logger.debug('LSP server settings for {!s} are: {!r}'.format( language, settings)) self.lsp_server_ready(language, self.lsp_editor_settings[language])
Register LSP server settings.
def parse_children(parent): components = [] for tag in parent.children: matched = parse_tag(tag) if matched: components.append(matched) elif hasattr(tag, 'contents'): components += parse_children(tag) return components
Recursively parse child tags until match is found
def _per_file_event_handler(self): file_event_handler = PatternMatchingEventHandler() file_event_handler.on_created = self._on_file_created file_event_handler.on_modified = self._on_file_modified file_event_handler.on_moved = self._on_file_moved file_event_handler._patterns = [ os.path.join(self._watch_dir, os.path.normpath('*'))] file_event_handler._ignore_patterns = [ '*/.*', '*.tmp', os.path.join(self._run.dir, OUTPUT_FNAME) ] for glob in self._api.settings("ignore_globs"): file_event_handler._ignore_patterns.append( os.path.join(self._run.dir, glob)) return file_event_handler
Create a Watchdog file event handler that does different things for every file
def _convert_exception(e): args = ('exception in ldap backend: {0}'.format(repr(e)), e) if six.PY2: six.reraise(LDAPError, args, sys.exc_info()[2]) else: six.raise_from(LDAPError(*args), e)
Convert an ldap backend exception to an LDAPError and raise it.
def instantiate_tasks(self): self.tasks_instances = {} for task_name, task_class in self.tasks_classes.items(): try: self.tasks_instances[task_name] = task_class() except Exception as ex: if not self.configuration[Configuration.ALGORITHM][Configuration.IOSF]: raise GOSTaskException("An exception happened during the task instantiation." "{exception}".format(exception=ex))
All loaded tasks are initialized. Depending on configuration fails in such instantiations may be silent
def start(self): self.is_collocated = bool(socket.gethostname() == self.config.slaveinput['cov_master_host'] and self.topdir == self.config.slaveinput['cov_master_topdir']) if not self.is_collocated: master_topdir = self.config.slaveinput['cov_master_topdir'] slave_topdir = self.topdir self.cov_source = [source.replace(master_topdir, slave_topdir) for source in self.cov_source] self.cov_data_file = self.cov_data_file.replace(master_topdir, slave_topdir) self.cov_config = self.cov_config.replace(master_topdir, slave_topdir) self.cov_data_file += '.%s' % self.nodeid self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.set_env()
Determine what data file and suffix to contribute to and start coverage.
def add_parameter(self, param_name, description=None, default=0, unit=None): if description is None: description = "Parameter called {}".format(param_name) if unit is None: unit = "-" name_check = lambda x: x['name'] == param_name name_check_list = list(filter(name_check, self.ext_params)) if len(name_check_list) == 0: self.ext_params.append({'name': param_name, 'description': description, 'default': default, 'unit': unit}) else: print('{} already exists - choose a different name'.format(param_name))
Add a global parameter to the database that can be accessed by functions
def getThirdPartyLibIncludeDirs(self, libs): platformDefaults = True if libs[0] == '--nodefaults': platformDefaults = False libs = libs[1:] details = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults) return details.getIncludeDirectories(self.getEngineRoot(), delimiter='\n')
Retrieves the list of include directories for building against the Unreal-bundled versions of the specified third-party libraries
def build_option_parser(parser): parser.add_argument( "--os-data-processing-api-version", metavar="<data-processing-api-version>", default=utils.env( 'OS_DATA_PROCESSING_API_VERSION', default=DEFAULT_DATA_PROCESSING_API_VERSION), help=("Data processing API version, default=" + DEFAULT_DATA_PROCESSING_API_VERSION + ' (Env: OS_DATA_PROCESSING_API_VERSION)')) parser.add_argument( "--os-data-processing-url", default=utils.env( "OS_DATA_PROCESSING_URL"), help=("Data processing API URL, " "(Env: OS_DATA_PROCESSING_API_URL)")) return parser
Hook to add global options.
def load(obj, settings_module, identifier="py", silent=False, key=None): mod, loaded_from = get_module(obj, settings_module, silent) if mod and loaded_from: obj.logger.debug("py_loader: {}".format(mod)) else: obj.logger.debug( "py_loader: %s (Ignoring, Not Found)", settings_module ) return for setting in dir(mod): if setting.isupper(): if key is None or key == setting: setting_value = getattr(mod, setting) obj.logger.debug( "py_loader: loading %s: %s (%s)", setting, "*****" if "secret" in settings_module else setting_value, identifier, ) obj.set(setting, setting_value, loader_identifier=identifier) obj._loaded_files.append(mod.__file__)
Tries to import a python module
def _pick_align_split_size(total_size, target_size, target_size_reads, max_splits): if total_size // target_size > max_splits: piece_size = total_size // max_splits return int(piece_size * target_size_reads / target_size) else: return int(target_size_reads)
Do the work of picking an alignment split size for the given criteria.
def _get_filekey(self): if not os.path.exists(self.keyfile): raise KPError('Keyfile not exists.') try: with open(self.keyfile, 'rb') as handler: handler.seek(0, os.SEEK_END) size = handler.tell() handler.seek(0, os.SEEK_SET) if size == 32: return handler.read(32) elif size == 64: try: return binascii.unhexlify(handler.read(64)) except (TypeError, binascii.Error): handler.seek(0, os.SEEK_SET) sha = SHA256.new() while True: buf = handler.read(2048) sha.update(buf) if len(buf) < 2048: break return sha.digest() except IOError as e: raise KPError('Could not read file: %s' % e)
This method creates a key from a keyfile.
def _gather_image_parts(self): for rel in self.iter_rels(): if rel.is_external: continue if rel.reltype != RT.IMAGE: continue if rel.target_part in self.image_parts: continue self.image_parts.append(rel.target_part)
Load the image part collection with all the image parts in package.
def count_partitions(self, topic): return sum(1 for p in topic.partitions if p in self.partitions)
Return count of partitions for given topic.
def update(self, branch='default'): log.debug('Updating hg repo from hg_pillar module (pull)') self.repo.pull() log.debug('Updating hg repo from hg_pillar module (update)') self.repo.update(branch, clean=True)
Ensure we are using the latest revision in the hg repository
def change_tunnel_ad_url(self): if self.is_open: self.close() req = requests.delete('https://api.psiturk.org/api/tunnel/', auth=(self.access_key, self.secret_key)) if req.status_code in [401, 403, 500]: print(req.content) return False
Change tunnel ad url.
def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True): i = 0 p = Progress() for path in paths: with open(path, 'r') as f: for line in f: i += 1 p.print_progress(i/n) line = line.lower() if sentences: for sent in sent_tokenize(line): tokens = tokenizer(sent) yield LabeledSentence(tokens, ['SENT_{}'.format(i)]) else: tokens = tokenizer(line) yield LabeledSentence(tokens, ['SENT_{}'.format(i)])
Generator to feed sentences to the dov2vec model.
def device_info(device): status = subprocess.check_output([ 'ibstat', device, '-s']).splitlines() regexes = { "CA type: (.*)": "device_type", "Number of ports: (.*)": "num_ports", "Firmware version: (.*)": "fw_ver", "Hardware version: (.*)": "hw_ver", "Node GUID: (.*)": "node_guid", "System image GUID: (.*)": "sys_guid", } device = DeviceInfo() for line in status: for expression, key in regexes.items(): matches = re.search(expression, line) if matches: setattr(device, key, matches.group(1)) return device
Returns a DeviceInfo object with the current device settings
def _open(self, path, skip_to_end = True, offset = None): fh = os.fdopen(os.open(path, os.O_RDONLY | os.O_NONBLOCK)) if offset is None: if skip_to_end: fh.seek(0, 2) self._offset = fh.tell() else: self._offset = 0 else: fh.seek(offset) self._offset = fh.tell() self._fh = fh self._lastsize = fh.tell() self._inode = os.stat(self._path).st_ino
Open `path`, optionally seeking to the end if `skip_to_end` is True.
def address(self, is_compressed=None): return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))
Return the public address representation of this key, if available.
def reset_defaults(self): self.save_login.setChecked(False) self.save_password.setChecked(False) self.save_url.setChecked(False) set_setting(GEONODE_USER, '') set_setting(GEONODE_PASSWORD, '') set_setting(GEONODE_URL, '') self.login.setText('') self.password.setText('') self.url.setText('')
Reset login and password in QgsSettings.
def base_url(self, space_id, content_type_id, environment_id=None, **kwargs): return "spaces/{0}{1}/content_types/{2}/editor_interface".format( space_id, '/environments/{0}'.format(environment_id) if environment_id is not None else '', content_type_id )
Returns the URI for the editor interface.
def _sample_batch_prioritized(self, segment_tree, batch_size, history, forward_steps=1): p_total = segment_tree.total() segment = p_total / batch_size batch = [ self._get_sample_from_segment(segment_tree, segment, i, history, forward_steps) for i in range(batch_size) ] probs, idxs, tree_idxs = zip(*batch) return np.array(probs), np.array(idxs), np.array(tree_idxs)
Return indexes of the next sample in from prioritized distribution
def step_use_curdir_as_working_directory(context): context.workdir = os.path.abspath(".") command_util.ensure_workdir_exists(context)
Uses the current directory as working directory
def calc_std(c0, c1=[]): if c1 == []: return numpy.std(c0, 0) prop = float(len(c0)) / float(len(c1)) if prop < 1: p0 = int(math.ceil(1 / prop)) p1 = 1 else: p0 = 1 p1 = int(math.ceil(prop)) return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
Calculates the variance of the data.
def create_arguments(primary, pyfunction, call_node, scope): args = list(call_node.args) args.extend(call_node.keywords) called = call_node.func if _is_method_call(primary, pyfunction) and \ isinstance(called, ast.Attribute): args.insert(0, called.value) return Arguments(args, scope)
A factory for creating `Arguments`
def register_token_network( self, token_registry_abi: Dict, token_registry_address: str, token_address: str, channel_participant_deposit_limit: Optional[int], token_network_deposit_limit: Optional[int], ): with_limits = contracts_version_expects_deposit_limits(self.contracts_version) if with_limits: return self._register_token_network_with_limits( token_registry_abi, token_registry_address, token_address, channel_participant_deposit_limit, token_network_deposit_limit, ) else: return self._register_token_network_without_limits( token_registry_abi, token_registry_address, token_address, channel_participant_deposit_limit, token_network_deposit_limit, )
Register token with a TokenNetworkRegistry contract.
def _write_values(kwargs, variables): writeto = [] for var_name, value in kwargs.items(): var = variables[var_name] var.notify_will_write() var.write(value) writeto.append(var) return _notify_reader_writes(writeto)
Write values of kwargs and return thus-satisfied closures.
def touch(self, pathobj): if not pathobj.drive or not pathobj.root: raise RuntimeError('Full path required') if pathobj.exists(): return url = str(pathobj) text, code = self.rest_put(url, session=pathobj.session, verify=pathobj.verify, cert=pathobj.cert) if not code == 201: raise RuntimeError("%s %d" % (text, code))
Create an empty file
def to_data_frame(sc, features, labels, categorical=False): lp_rdd = to_labeled_point(sc, features, labels, categorical) sql_context = SQLContext(sc) df = sql_context.createDataFrame(lp_rdd) return df
Convert numpy arrays of features and labels into Spark DataFrame
def v_type_base(ctx, stmt, no_error_report=False): name = stmt.arg stmt.i_identity = None if name.find(":") == -1: prefix = None else: [prefix, name] = name.split(':', 1) if prefix is None or stmt.i_module.i_prefix == prefix: pmodule = stmt.i_module else: pmodule = prefix_to_module(stmt.i_module, prefix, stmt.pos, ctx.errors) if pmodule is None: return if name in pmodule.i_identities: i = pmodule.i_identities[name] if prefix is None and not is_submodule_included(stmt, i): pass else: stmt.i_identity = i v_type_identity(ctx, stmt.i_identity) if stmt.i_identity is None and no_error_report == False: err_add(ctx.errors, stmt.pos, 'IDENTITY_NOT_FOUND', (name, pmodule.arg))
verify that the referenced identity exists.
def repo_tools(self, branch): tools = [] m_helper = Tools() repo = self.parentApp.repo_value['repo'] version = self.parentApp.repo_value['versions'][branch] status = m_helper.repo_tools(repo, branch, version) if status[0]: r_tools = status[1] for tool in r_tools: tools.append(tool[0]) return tools
Set the appropriate repo dir and get the tools available of it
def RGBA(self, val): val = np.atleast_1d(val).astype(np.float32) / 255 self.rgba = val
Set the color using an Nx4 array of RGBA uint8 values
def smooth_rectangle(x, y, rec_w, rec_h, gaussian_width_x, gaussian_width_y): gaussian_x_coord = abs(x)-rec_w/2.0 gaussian_y_coord = abs(y)-rec_h/2.0 box_x=np.less(gaussian_x_coord,0.0) box_y=np.less(gaussian_y_coord,0.0) sigmasq_x=gaussian_width_x*gaussian_width_x sigmasq_y=gaussian_width_y*gaussian_width_y with float_error_ignore(): falloff_x=x*0.0 if sigmasq_x==0.0 else \ np.exp(np.divide(-gaussian_x_coord*gaussian_x_coord,2*sigmasq_x)) falloff_y=y*0.0 if sigmasq_y==0.0 else \ np.exp(np.divide(-gaussian_y_coord*gaussian_y_coord,2*sigmasq_y)) return np.minimum(np.maximum(box_x,falloff_x), np.maximum(box_y,falloff_y))
Rectangle with a solid central region, then Gaussian fall-off at the edges.
def help_text(self): result = [] for name in sorted(self._declarations.keys()): result.append(name) result.append('-' * len(name)) decl = self._declarations[name] if decl.description: result.append(decl.description.strip()) else: result.append('(no description found)') if decl.has_default: result.append('') quotes = '"' if type(decl.default_value) is str else '' result.append(' default_value={quotes}{val}{quotes}'.format( quotes=quotes, val=decl.default_value)) result.append('') result.append('') return '\n'.join(result)
Return a string with all config keys and their descriptions.
def read_json_breakdown(cls, fname): if not os.path.exists(fname): raise RuntimeError with open(fname, 'r') as data_file: return cls.fixup_from_json(data_file.read())
Read json file to get fixture data
def url(self, name): scheme = 'http' path = self._prepend_name_prefix(name) query = '' fragment = '' url_tuple = (scheme, self.netloc, path, query, fragment) return urllib.parse.urlunsplit(url_tuple)
Return URL of resource
def default_working_dir(): import nameset.virtualchain_hooks as virtualchain_hooks return os.path.expanduser('~/.{}'.format(virtualchain_hooks.get_virtual_chain_name()))
Get the default configuration directory for blockstackd
def _update_capacity(self, data): if 'ConsumedCapacity' in data: consumed = data['ConsumedCapacity'] if not isinstance(consumed, list): consumed = [consumed] for cap in consumed: self.capacity += cap.get('CapacityUnits', 0) self.table_capacity += cap.get('Table', {}).get('CapacityUnits', 0) local_indexes = cap.get('LocalSecondaryIndexes', {}) for k, v in six.iteritems(local_indexes): self.indexes.setdefault(k, 0) self.indexes[k] += v['CapacityUnits'] global_indexes = cap.get('GlobalSecondaryIndexes', {}) for k, v in six.iteritems(global_indexes): self.global_indexes.setdefault(k, 0) self.global_indexes[k] += v['CapacityUnits']
Update the consumed capacity metrics
def append_sample(self, v, vartype, _left=False): vstr = str(v).rjust(2) length = len(vstr) if vartype is dimod.SPIN: def f(datum): return _spinstr(datum.sample[v], rjust=length) else: def f(datum): return _binarystr(datum.sample[v], rjust=length) self.append(vstr, f, _left=_left)
Add a sample column
def _sub_nat(self): result = np.zeros(len(self), dtype=np.int64) result.fill(iNaT) return result.view('timedelta64[ns]')
Subtract pd.NaT from self
def _convert_dns_answer(cls, answer: dns.resolver.Answer) \ -> Iterable[AddressInfo]: assert answer.rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA) if answer.rdtype == dns.rdatatype.A: family = socket.AF_INET else: family = socket.AF_INET6 for record in answer: ip_address = record.to_text() if family == socket.AF_INET6: flow_info, control_id = cls._get_ipv6_info(ip_address) else: flow_info = control_id = None yield AddressInfo(ip_address, family, flow_info, control_id)
Convert the DNS answer to address info.
def copy_id(self): stdout, stderr, retcode = self._run_cmd(self._copy_id_str_old()) if salt.defaults.exitcodes.EX_OK != retcode and 'Usage' in stderr: stdout, stderr, retcode = self._run_cmd(self._copy_id_str_new()) return stdout, stderr, retcode
Execute ssh-copy-id to plant the id file on the target
def store_from(self, last_level_store): assert isinstance(last_level_store, Cache), \ "last_level needs to be a Cache object." assert last_level_store.store_to is None, \ "last_level_store must be a last level cache (.store_to is None)." self.last_level_store = last_level_store
Set level where to store to.
def make_path(phase) -> str: return "{}/{}{}{}".format(conf.instance.output_path, phase.phase_path, phase.phase_name, phase.phase_tag)
Create the path to the folder at which the metadata and optimizer pickle should be saved
def median(timeseries, segmentlength, **kwargs): if scipy_version <= '1.1.9999': raise ValueError( "median average PSD estimation requires scipy >= 1.2.0", ) kwargs.setdefault('average', 'median') return welch(timeseries, segmentlength, **kwargs)
Calculate a PSD using Welch's method with a median average
def loadd(self, ava): if "attributes" in ava: for key, val in ava["attributes"].items(): self.attributes[key] = val try: self.tag = ava["tag"] except KeyError: if not self.tag: raise KeyError("ExtensionElement must have a tag") try: self.namespace = ava["namespace"] except KeyError: if not self.namespace: raise KeyError("ExtensionElement must belong to a namespace") try: self.text = ava["text"] except KeyError: pass if "children" in ava: for item in ava["children"]: self.children.append(ExtensionElement(item["tag"]).loadd(item)) return self
expects a special set of keys
def delete_account(self, data): error = False msg = "" username = self.user_manager.session_username() result = self.database.users.find_one_and_delete({"username": username, "email": data.get("delete_email", "")}) if not result: error = True msg = _("The specified email is incorrect.") else: self.database.submissions.remove({"username": username}) self.database.user_tasks.remove({"username": username}) all_courses = self.course_factory.get_all_courses() for courseid, course in all_courses.items(): if self.user_manager.course_is_open_to_user(course, username): self.user_manager.course_unregister_user(course, username) self.user_manager.disconnect_user() raise web.seeother("/index") return msg, error
Delete account from DB
def majority(image, mask=None, iterations=1): global majority_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, majority_table, False, iterations) if not mask is None: result[~mask] = image[~mask] return result
A pixel takes the value of the majority of its neighbors
def _get_struct_shapewithstyle(self, shape_number): obj = _make_object("ShapeWithStyle") obj.FillStyles = self._get_struct_fillstylearray(shape_number) obj.LineStyles = self._get_struct_linestylearray(shape_number) bc = BitConsumer(self._src) obj.NumFillBits = n_fill_bits = bc.u_get(4) obj.NumlineBits = n_line_bits = bc.u_get(4) obj.ShapeRecords = self._get_shaperecords( n_fill_bits, n_line_bits, shape_number) return obj
Get the values for the SHAPEWITHSTYLE record.
def _feature_returner(self, **kwargs): kwargs.setdefault('dialect', self.dialect) kwargs.setdefault('keep_order', self.keep_order) kwargs.setdefault('sort_attribute_values', self.sort_attribute_values) return Feature(**kwargs)
Returns a feature, adding additional database-specific defaults
def projR(gamma, p): return np.multiply(gamma.T, p / np.maximum(np.sum(gamma, axis=1), 1e-10)).T
return the KL projection on the row constrints
def _drop_indices(self): self._logger.info('Dropping database indices') self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL) self._logger.info('Finished dropping database indices')
Drops the database indices relating to n-grams.
def basic_map(proj): fig = plt.figure(figsize=(15, 10)) add_metpy_logo(fig, 0, 80, size='large') view = fig.add_axes([0, 0, 1, 1], projection=proj) view.set_extent([-120, -70, 20, 50]) view.add_feature(cfeature.STATES.with_scale('50m')) view.add_feature(cfeature.OCEAN) view.add_feature(cfeature.COASTLINE) view.add_feature(cfeature.BORDERS, linestyle=':') return fig, view
Make our basic default map for plotting
def register(self, scope, *args, **kwargs): self._assert_not_frozen() self.get_parser(scope).register(*args, **kwargs) deprecated_scope = self.known_scope_to_info[scope].deprecated_scope if deprecated_scope: self.get_parser(deprecated_scope).register(*args, **kwargs)
Register an option in the given scope.
def reset_weights(self): if self.fk.isAPF: lw = (rs.log_mean_exp(self.logetat, W=self.W) - self.logetat[self.A]) self.wgts = rs.Weights(lw=lw) else: self.wgts = rs.Weights()
Reset weights after a resampling step.
async def status_by_state(self, state: str) -> dict: data = await self.raw_cdc_data() try: info = next((v for k, v in data.items() if state in k)) except StopIteration: return {} return adjust_status(info)
Return the CDC status for the specified state.
def ranges(self): ranges = self._target.getRanges() return map(SheetAddress._from_uno, ranges)
Returns a list of addresses with source data.
def _get_cells(self, left, right, vertical): if vertical: vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0]) hcells = (max(self._shape(l)[1] for l in left) + self._shape(right)[1]) else: vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) return hcells, vcells
Calculate appropriate figure size based on left and right data.
def getFixedStarList(IDs, date): starList = [getFixedStar(ID, date) for ID in IDs] return FixedStarList(starList)
Returns a list of fixed stars.
def returner(ret): serv = _get_serv(ret) json_return = salt.utils.json.dumps(ret['return']) del ret['return'] json_full_ret = salt.utils.json.dumps(ret) if "influxdb08" in serv.__module__: req = [ { 'name': 'returns', 'columns': ['fun', 'id', 'jid', 'return', 'full_ret'], 'points': [ [ret['fun'], ret['id'], ret['jid'], json_return, json_full_ret] ], } ] else: req = [ { 'measurement': 'returns', 'tags': { 'fun': ret['fun'], 'id': ret['id'], 'jid': ret['jid'] }, 'fields': { 'return': json_return, 'full_ret': json_full_ret } } ] try: serv.write_points(req) except Exception as ex: log.critical('Failed to store return with InfluxDB returner: %s', ex)
Return data to a influxdb data store
def _assemble_activeform(stmt): subj_str = _assemble_agent_str(stmt.agent) if stmt.is_active: is_active_str = 'active' else: is_active_str = 'inactive' if stmt.activity == 'activity': stmt_str = subj_str + ' is ' + is_active_str elif stmt.activity == 'kinase': stmt_str = subj_str + ' is kinase-' + is_active_str elif stmt.activity == 'phosphatase': stmt_str = subj_str + ' is phosphatase-' + is_active_str elif stmt.activity == 'catalytic': stmt_str = subj_str + ' is catalytically ' + is_active_str elif stmt.activity == 'transcription': stmt_str = subj_str + ' is transcriptionally ' + is_active_str elif stmt.activity == 'gtpbound': stmt_str = subj_str + ' is GTP-bound ' + is_active_str return _make_sentence(stmt_str)
Assemble ActiveForm statements into text.
def print_bytes(byte_str): if isinstance(byte_str, str): print(byte_str) else: print(str(byte_str, encoding='utf8'))
Prints a string or converts bytes to a string and then prints.
def all_macro_systems(network, state, do_blackbox=False, do_coarse_grain=False, time_scales=None): if time_scales is None: time_scales = [1] def blackboxes(system): if not do_blackbox: return [None] return all_blackboxes(system) def coarse_grains(blackbox, system): if not do_coarse_grain: return [None] if blackbox is None: return all_coarse_grains(system) return all_coarse_grains_for_blackbox(blackbox) for system in utils.powerset(network.node_indices): for time_scale in time_scales: for blackbox in blackboxes(system): for coarse_grain in coarse_grains(blackbox, system): try: yield MacroSubsystem( network, state, system, time_scale=time_scale, blackbox=blackbox, coarse_grain=coarse_grain) except (StateUnreachableError, ConditionallyDependentError): continue
Generator over all possible macro-systems for the network.
def pitch(self): x, y, z, w = self.x, self.y, self.z, self.w return math.atan2(2*x*w - 2*y*z, 1 - 2*x*x - 2*z*z)
Calculates the Pitch of the Quaternion.
def entries_published(queryset): now = timezone.now() return queryset.filter( models.Q(start_publication__lte=now) | models.Q(start_publication=None), models.Q(end_publication__gt=now) | models.Q(end_publication=None), status=PUBLISHED, sites=Site.objects.get_current())
Return only the entries published.
def dtype_repr(dtype): dtype = np.dtype(dtype) if dtype == np.dtype(int): return "'int'" elif dtype == np.dtype(float): return "'float'" elif dtype == np.dtype(complex): return "'complex'" elif dtype.shape: return "('{}', {})".format(dtype.base, dtype.shape) else: return "'{}'".format(dtype)
Stringify ``dtype`` for ``repr`` with default for int and float.
def precision(precision, id_, hwid, type_): if id_ and (hwid or type_): raise click.BadOptionUsage( "If --id is given --hwid and --type are not allowed." ) if id_: try: sensor = W1ThermSensor.get_available_sensors()[id_ - 1] except IndexError: raise click.BadOptionUsage( "No sensor with id {0} available. " "Use the ls command to show all available sensors.".format(id_) ) else: sensor = W1ThermSensor(type_, hwid) sensor.set_precision(precision, persist=True)
Change the precision for the sensor and persist it in the sensor's EEPROM
def update(self): if self.device_time_check(): if not self.in_process: outlets, switches, fans = self.get_devices() self.outlets = helpers.resolve_updates(self.outlets, outlets) self.switches = helpers.resolve_updates( self.switches, switches) self.fans = helpers.resolve_updates(self.fans, fans) self.last_update_ts = time.time()
Fetch updated information about devices
def fix_job_def(job_def): if six.PY2 and isinstance(job_def.get('func'), six.text_type): job_def['func'] = str(job_def.get('func')) if isinstance(job_def.get('start_date'), six.string_types): job_def['start_date'] = dateutil.parser.parse(job_def.get('start_date')) if isinstance(job_def.get('end_date'), six.string_types): job_def['end_date'] = dateutil.parser.parse(job_def.get('end_date')) if isinstance(job_def.get('run_date'), six.string_types): job_def['run_date'] = dateutil.parser.parse(job_def.get('run_date')) if isinstance(job_def.get('trigger'), dict): trigger = job_def.pop('trigger') job_def['trigger'] = trigger.pop('type', 'date') job_def.update(trigger)
Replaces the datetime in string by datetime object.
def retry(self): logger.info('Job {0} retrying all failed tasks'.format(self.name)) self.initialize_snapshot() failed_task_names = [] for task_name, log in self.run_log['tasks'].items(): if log.get('success', True) == False: failed_task_names.append(task_name) if len(failed_task_names) == 0: raise DagobahError('no failed tasks to retry') self._set_status('running') self.run_log['last_retry_time'] = datetime.utcnow() logger.debug('Job {0} seeding run logs'.format(self.name)) for task_name in failed_task_names: self._put_task_in_run_log(task_name) self.tasks[task_name].start() self._commit_run_log()
Restarts failed tasks of a job.
def digest_auth(realm, auth_func): def digest_auth_decorator(func): def func_replacement(self, *args, **kwargs): if self.get_authenticated_user(auth_func, realm): return func(self, *args, **kwargs) return func_replacement return digest_auth_decorator
A decorator used to protect methods with HTTP Digest authentication.
def from_json(s): d = json.loads(s) sbp = SBP.from_json_dict(d) return sbp
Given a JSON-encoded message, build an object.