code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def spin_sz(self): return conversions.secondary_spin(self.mass1, self.mass2, self.spin1z, self.spin2z)
Returns the z-component of the spin of the secondary mass.
def find_model(sender, model_name): MC = get_mc() model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!='')) if model: model_inst = model.get_instance() orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='') return orm.__models__.get(model_name)
Register new model to ORM
def phi_from_spinx_spiny(spinx, spiny): phi = numpy.arctan2(spiny, spinx) return phi % (2 * numpy.pi)
Returns the angle between the x-component axis and the in-plane spin.
def available(): proc = popen_multiple( COMMANDS, ['-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=PROC_FLAGS, ) proc.wait() return (proc.returncode == 0)
Detect if the FFmpeg backend can be used on this system.
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel): class _Many2ManyRelatedManager(Many2ManyRelatedManager): pass _Many2ManyRelatedManager.formodel = formodel _Many2ManyRelatedManager.name_relmodel = name_relmodel _Many2ManyRelatedManager.name_formodel = name_formodel return _Many2ManyRelatedManager
formodel is the model which the manager .
def serial_number(): sdata = {} if os.getuid() == 0: try: sdata['Serial'] = open('/sys/class/dmi/id/product_serial') \ .read().strip() except: for line in os.popen('/usr/sbin/dmidecode -s system-serial-number'): sdata['Serial'] = line.strip() return sdata
Get the serial number. Requires root access
def biopax_process_pc_pathsbetween(): if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) genes = body.get('genes') bp = biopax.process_pc_pathsbetween(genes) return _stmts_from_proc(bp)
Process PathwayCommons paths between genes, return INDRA Statements.
def _update_nonce_explicit(self): ne = self.nonce_explicit + 1 self.nonce_explicit = ne % 2**(self.nonce_explicit_len * 8)
Increment the explicit nonce while avoiding any overflow.
def armed(HEARTBEAT): from . import mavutil if HEARTBEAT.type == mavutil.mavlink.MAV_TYPE_GCS: self = mavutil.mavfile_global if self.motors_armed(): return 1 return 0 if HEARTBEAT.base_mode & mavutil.mavlink.MAV_MODE_FLAG_SAFETY_ARMED: return 1 return 0
return 1 if armed, 0 if not
def remove_property(xml_root, partial_name): if xml_root.tag in ("testsuites", "testcases", "requirements"): properties = xml_root.find("properties") remove_properties = [] for prop in properties: prop_name = prop.get("name", "") if partial_name in prop_name: remove_properties.append(prop) for rem_prop in remove_properties: properties.remove(rem_prop) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
Removes properties if exist.
def trigger(self): self.call_back(*self.args, **self.kwargs) if self.__timer is not None: self.__timer.cancel()
calls the call_back function. interrupts the timer to start a new countdown
def find(self, node, interval, start, end): data = [] if len(interval) != 2: raise SyntaxError('Interval malformed %s. Allways specify start and end position for interval.' % str(interval)) left = (start, node[0]) right = (node[0], end) if self.overlap(left, interval): data.extend(node[-1]) if node[1] != -1: data.extend(self.find(node[1], interval, left[0], left[1])) if self.overlap(right, interval): data.extend(node[-1]) if node[2] != -1: data.extend(self.find(node[2], interval, right[0], right[1])) return list(set(data))
recursively finds ids within a range
def flatten_all_but_last(a): ret = tf.reshape(a, [-1, tf.shape(a)[-1]]) if not tf.executing_eagerly(): ret.set_shape([None] + a.get_shape().as_list()[-1:]) return ret
Flatten all dimensions of a except the last.
def calculate_descriptors(self,mol): self.ligand_atoms = {index:{"name":x.name} for index,x in enumerate(self.topology_data.universe.ligand_noH.atoms)} contribs = self.calculate_logP(mol) self.calculate_Gasteiger_charges(mol) fcharges = self.calculate_formal_charge(mol) for atom in self.ligand_atoms.keys(): self.ligand_atoms[atom]["logP"]=contribs[atom][0] self.ligand_atoms[atom]["MR"]=contribs[atom][1] self.ligand_atoms[atom]["Gasteiger_ch"]=mol.GetAtomWithIdx(atom).GetProp("_GasteigerCharge") self.ligand_atoms[atom]["Formal charges"]=fcharges[atom] self.rot_bonds=self.get_rotatable_bonds(mol)
Calculates descriptors such as logP, charges and MR and saves that in a dictionary.
def contains(self, other): return (self.left <= other.left and self.right >= other.right and self.top >= other.top and self.bottom <= other.bottom)
Return true if the given shape is inside this rectangle.
def updateNetwork(self, dhcp='dhcp', ipaddress=None, netmask=None, gateway=None, dns=None): return self.__post('/api/updateNetwork', data={ 'dhcp': dhcp, 'ipaddress': ipaddress, 'netmask': netmask, 'gateway': gateway, 'dns': json.dumps(dns) })
Change the current network settings.
def apply_defaults(self, row, tables_dict): "apply defaults to missing cols for a row that's being inserted" return [ emergency_cast(colx, field_default(colx, self.name, tables_dict) if v is Missing else v) for colx,v in zip(self.fields,row) ]
apply defaults to missing cols for a row that's being inserted
def getProperty(self, key, strip=True): self.__checkTransport() import collections MAP_PROPS = collections.OrderedDict([ (re.compile('display.width'), self.__getDisplayWidth), (re.compile('display.height'), self.__getDisplayHeight), (re.compile('display.density'), self.__getDisplayDensity), (re.compile('display.orientation'), self.__getDisplayOrientation), (re.compile('.*'), self.__getProp), ]) for kre in MAP_PROPS.keys(): if kre.match(key): return MAP_PROPS[kre](key=key, strip=strip) raise ValueError("key='%s' does not match any map entry")
Gets the property value for key
async def update_group_memory(self, memory_id, mode, name, slaves, codectype=0x0040, bitrate=0x0003): act = self.service.action("X_UpdateGroupMemory") res = await act.async_call(MemoryID=memory_id, GroupMode=mode, GroupName=name, SlaveList=slaves, CodecType=codectype, CodecBitrate=bitrate) return res
Update existing memory? Can be used to create new ones, too?
def remove(self, resource): if isinstance(resource, Resource): self._resources.remove(resource)
Removes a resource from the context
def next_address_avoid_collision(self, start_addr): i = 1 while self.is_address_in_use(next_addr(start_addr, i)): i += 1 return next_addr(start_addr, i)
Finds the next address recursively which does not collide with any other address
def finish(self): log.debug("Disconnecting from JLigier.") self.client.socket.shutdown(socket.SHUT_RDWR) self.client._disconnect()
Clean up the JLigier controlhost connection
def add_graph(patterns, G): if not patterns: patterns.append([G]) return for i, graphs in enumerate(patterns): if networkx.is_isomorphic(graphs[0], G, node_match=type_match, edge_match=type_match): patterns[i].append(G) return patterns.append([G])
Add a graph to a set of unique patterns.
def bincount(dig, weight, minlength): if numpy.isscalar(weight): return numpy.bincount(dig, minlength=minlength) * weight else: return numpy.bincount(dig, weight, minlength)
bincount supporting scalar and vector weight
def should_checkpoint(self): result = self.last_result or {} if result.get(DONE) and self.checkpoint_at_end: return True if self.checkpoint_freq: return result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0 else: return False
Whether this trial is due for checkpointing.
def json_dump_hook(cfg, text: bool=False): data = cfg.config.dump() if not text: json.dump(data, cfg.fd) else: return json.dumps(data)
Dumps all the data into a JSON file.
def _index_item(self, uri, num, batch_num): data = RdfDataset(get_all_item_data(uri, self.namespace), uri).base_class.es_json() self.batch_data[batch_num].append(data) self.count += 1
queries the triplestore for an item sends it to elasticsearch
def show(self, ax:plt.Axes=None, figsize:tuple=(3,3), title:Optional[str]=None, hide_axis:bool=True, cmap:str=None, y:Any=None, **kwargs): "Show image on `ax` with `title`, using `cmap` if single-channel, overlaid with optional `y`" cmap = ifnone(cmap, defaults.cmap) ax = show_image(self, ax=ax, hide_axis=hide_axis, cmap=cmap, figsize=figsize) if y is not None: y.show(ax=ax, **kwargs) if title is not None: ax.set_title(title)
Show image on `ax` with `title`, using `cmap` if single-channel, overlaid with optional `y`
def document_root_path(cls, project, database): return google.api_core.path_template.expand( "projects/{project}/databases/{database}/documents", project=project, database=database, )
Return a fully-qualified document_root string.
def _get_id2gos(self): kws = {} if self.args.ev_inc is not None: kws['ev_include'] = set(self.args.ev_inc.split(',')) if self.args.ev_exc is not None: kws['ev_exclude'] = set(self.args.ev_exc.split(',')) return self.objanno.get_id2gos(**kws)
Return annotations as id2gos
def hicexplorer_create_plot(self, pKeyList, pTitle, pId): keys = OrderedDict() for i, key_ in enumerate(pKeyList): keys[key_] = {'color': self.colors[i]} data = {} for data_ in self.mod_data: data['{}'.format(self.mod_data[data_]['File'][0])] = {} for key_ in pKeyList: data['{}'.format(self.mod_data[data_]['File'][0])][key_] = self.mod_data[data_][key_][0] config = { 'id': 'hicexplorer_' + pId + '_plot', 'title': pTitle, 'ylab': 'Number of Reads', 'cpswitch_counts_label': 'Number of Reads' } return bargraph.plot(data, keys, config)
Create the graphics containing information about the read quality.
def wifi_status(self): return self._info_json.get(CONST.STATUS, {}).get(CONST.WIFI_LINK)
Get the wifi status.
def addDocEntity(self, name, type, ExternalID, SystemID, content): ret = libxml2mod.xmlAddDocEntity(self._o, name, type, ExternalID, SystemID, content) if ret is None:raise treeError('xmlAddDocEntity() failed') __tmp = xmlEntity(_obj=ret) return __tmp
Register a new entity for this document.
def prompt(self): name = self.term.prompt_input('Enter page: /') if name: submission_pattern = re.compile(r'(^|/)comments/(?P<id>.+?)($|/)') match = submission_pattern.search(name) if match: url = 'https://www.reddit.com/comments/{0}'.format(match.group('id')) self.selected_page = self.open_submission_page(url) else: self.selected_page = self.open_subreddit_page(name)
Open a prompt to navigate to a different subreddit or comment"
def _call_brew(cmd, failhard=True): user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result
Calls the brew command with the user account of brew
def synchronous(function, event): try: function(event) except Exception as error: logger = get_function_logger(function) logger.exception(error)
Runs the function synchronously taking care of exceptions.
def _validate_user(self, user): if self._show_all_users: return if user.id in self._allowed_user_ids: return user_member_group_ids = set([g['id'] for g in user._raw['groups']]) if user_member_group_ids & self._allowed_member_ids: return raise ValidationError( self.record, 'User `{}` is not a valid selection for field `{}`'.format( user, self.name ) )
Validate a User instance against allowed user IDs or membership in a group
def _clearQuantity(self, offbids, gen): gOffbids = [offer for offer in offbids if offer.generator == gen] valid = [ob for ob in gOffbids if not ob.withheld] valid.sort(key=lambda ob: ob.price, reverse=[False, True][gen.is_load]) acceptedQty = 0.0 for ob in valid: accepted = (ob.totalQuantity - acceptedQty) / ob.quantity if accepted > 1.0: accepted = 1.0 elif accepted < 1.0e-05: accepted = 0.0 ob.clearedQuantity = accepted * ob.quantity ob.accepted = (accepted > 0.0) acceptedQty += ob.quantity
Computes the cleared bid quantity from total dispatched quantity.
def verts_in_common(self, segments): verts_by_segm = self.verts_by_segm return sorted(reduce(lambda s0, s1: s0.intersection(s1), [set(verts_by_segm[segm]) for segm in segments]))
returns array of all vertex indices common to each segment in segments
def getJobIDsForResultsFile(self, resultsFile): jobIDs = [] for line in self._runParasol(['-extended', 'list', 'jobs'])[1]: fields = line.strip().split() if len(fields) == 0 or fields[-1] != resultsFile: continue jobID = fields[0] jobIDs.append(int(jobID)) return set(jobIDs)
Get all queued and running jobs for a results file.
def centroid(data, method=median): "returns the central vector of a list of vectors" out = [] for i in range(len(data[0])): out.append(method([x[i] for x in data])) return tuple(out)
returns the central vector of a list of vectors
def funding_info(self, key, value): return { 'agency': value.get('a'), 'grant_number': value.get('c'), 'project_number': value.get('f'), }
Populate the ``funding_info`` key.
def setFigForm(): fig_width_pt = 245.26*2 inches_per_pt = 1.0/72.27 golden_mean = (math.sqrt(5.)-1.0)/2.0 fig_width = fig_width_pt*inches_per_pt fig_height = fig_width*golden_mean fig_size = [1.5*fig_width, fig_height] params = {'backend': 'ps', 'axes.labelsize': 12, 'text.fontsize': 12, 'legend.fontsize': 7, 'xtick.labelsize': 11, 'ytick.labelsize': 11, 'text.usetex': True, 'font.family': 'serif', 'font.serif': 'Times', 'image.aspect': 'auto', 'figure.subplot.left': 0.1, 'figure.subplot.bottom': 0.1, 'figure.subplot.hspace': 0.25, 'figure.figsize': fig_size} rcParams.update(params)
set the rcparams to EmulateApJ columnwidth=245.26 pts
def default_config_file(self): import os.path as p import pyemma return p.join(pyemma.__path__[0], Config.DEFAULT_CONFIG_FILE_NAME)
default config file living in PyEMMA package
def _cnn_tranch_filtering(in_file, vrn_files, tensor_type, data): out_file = "%s-filter.vcf.gz" % utils.splitext_plus(in_file)[0] if not utils.file_uptodate(out_file, in_file): runner = broad.runner_from_config(data["config"]) gatk_type = runner.gatk_type() assert gatk_type == "gatk4", "CNN filtering requires GATK4" if "train_hapmap" not in vrn_files: raise ValueError("CNN filtering requires HapMap training inputs: %s" % vrn_files) with file_transaction(data, out_file) as tx_out_file: params = ["-T", "FilterVariantTranches", "--variant", in_file, "--output", tx_out_file, "--snp-truth-vcf", vrn_files["train_hapmap"], "--indel-truth-vcf", vrn_files["train_indels"]] if tensor_type == "reference": params += ["--info-key", "CNN_1D", "--tranche", "99"] else: assert tensor_type == "read_tensor" params += ["--info-key", "CNN_2D", "--tranche", "99"] runner.run_gatk(params) return vcfutils.bgzip_and_index(out_file, data["config"])
Filter CNN scored VCFs in tranches using standard SNP and Indel truth sets.
def _wrap_universe(self, func): @wraps(func) def wrapper(graph, *args, **kwargs): if self.universe is None: raise MissingUniverseError( 'Can not run universe function [{}] - No universe is set'.format(func.__name__)) return func(self.universe, graph, *args, **kwargs) return wrapper
Take a function that needs a universe graph as the first argument and returns a wrapped one.
def optional_else(self, node, last): if node.orelse: min_first_max_last(node, node.orelse[-1]) if 'else' in self.operators: position = (node.orelse[0].first_line, node.orelse[0].first_col) _, efirst = self.operators['else'].find_previous(position) if efirst and efirst > last: elast, _ = self.operators[':'].find_previous(position) node.op_pos.append(NodeWithPosition(elast, efirst))
Create op_pos for optional else
def put_file(self, in_path, out_path): vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) if not os.path.exists(in_path): raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) try: shutil.copyfile(in_path, out_path) except shutil.Error: traceback.print_exc() raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) except IOError: traceback.print_exc() raise errors.AnsibleError("failed to transfer file to %s" % out_path)
transfer a file from local to local
def create_attributes(klass, attributes, previous_object=None): if previous_object is not None: return {'name': attributes.get('name', previous_object.name)} return { 'name': attributes.get('name', ''), 'defaultLocale': attributes['default_locale'] }
Attributes for space creation.
def _get_xml(self, metric): self._opener = urllib2.build_opener() self._opener.addheaders = [('User-agent', self.user_agent)] if metric: url = self.base_url + '?w={0}&u=c'.format(self.woeid) else: url = self.base_url + '?w={0}'.format(self.woeid) return etree.parse( self._opener.open(url) ).getroot()[0]
Returns the channel element of the RSS feed
def run(self): new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids()) cc = cache_cli(self.opts) cc.get_cached() cc.put_cache([new_mins]) log.debug('ConCache CacheWorker update finished')
Gather currently connected minions and update the cache
def validate_pai_trial_conifg(experiment_config): if experiment_config.get('trainingServicePlatform') == 'pai': if experiment_config.get('trial').get('shmMB') and \ experiment_config['trial']['shmMB'] > experiment_config['trial']['memoryMB']: print_error('shmMB should be no more than memoryMB!') exit(1)
validate the trial config in pai platform
def list_accounts(self): account = self.client['Account'] mask = 'cdnAccounts[%s]' % ', '.join(['id', 'createDate', 'cdnAccountName', 'cdnSolutionName', 'cdnAccountNote', 'status']) return account.getObject(mask=mask).get('cdnAccounts', [])
Lists CDN accounts for the active user.
def parse_sitelist(sitelist): sites = [] for site in sitelist["Locations"]["Location"]: try: ident = site["id"] name = site["name"] except KeyError: ident = site["@id"] name = site["@name"] if "latitude" in site: lat = float(site["latitude"]) lon = float(site["longitude"]) else: lat = lon = None s = Site(ident, name, lat, lon) sites.append(s) return sites
Return list of Site instances from retrieved sitelist data
async def copy_to(self, dest, container, buffering = True): if self.eof: await dest.write(u'' if self.isunicode else b'', True) elif self.errored: await dest.error(container) else: try: while not self.eof: await self.prepareRead(container) data = self.readonce() try: await dest.write(data, container, self.eof, buffering = buffering) except IOError: break except: async def _cleanup(): try: await dest.error(container) except IOError: pass container.subroutine(_cleanup(), False) raise finally: self.close(container.scheduler)
Coroutine method to copy content from this stream to another stream.
def upload_large_items(self): for local_file, parent in self.large_items: if local_file.need_to_send: self.process_large_file(local_file, parent)
Upload files that were too large.
def make_input_fn_from_generator(gen): first_ex = six.next(gen) flattened = tf.contrib.framework.nest.flatten(first_ex) types = [t.dtype for t in flattened] shapes = [[None] * len(t.shape) for t in flattened] first_ex_list = [first_ex] def py_func(): if first_ex_list: example = first_ex_list.pop() else: example = six.next(gen) return tf.contrib.framework.nest.flatten(example) def input_fn(): flat_example = tf.py_func(py_func, [], types) _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)] example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example) return example return input_fn
Use py_func to yield elements from the given generator.
def restore_descriptor(self, dataframe): fields = [] primary_key = None if dataframe.index.name: field_type = self.restore_type(dataframe.index.dtype) field = { 'name': dataframe.index.name, 'type': field_type, 'constraints': {'required': True}, } fields.append(field) primary_key = dataframe.index.name for column, dtype in dataframe.dtypes.iteritems(): sample = dataframe[column].iloc[0] if len(dataframe) else None field_type = self.restore_type(dtype, sample=sample) field = {'name': column, 'type': field_type} fields.append(field) descriptor = {} descriptor['fields'] = fields if primary_key: descriptor['primaryKey'] = primary_key return descriptor
Restore descriptor from Pandas
def as_urlpatterns(self): urls = [] for action in self.actions: view_class = self.view_for_action(action) view_pattern = self.pattern_for_view(view_class, action) name = self.url_name_for_action(action) urls.append(url(view_pattern, view_class.as_view(), name=name)) return urls
Creates the appropriate URLs for this object.
def entropy_difference(d1, d2): d1, d2 = flatten(d1), flatten(d2) return abs(entropy(d1, base=2.0) - entropy(d2, base=2.0))
Return the difference in entropy between two distributions.
def ensure_dir_exists(path): import os f_dir = os.path.dirname(path) if not os.path.exists(f_dir): os.makedirs(f_dir) return f_dir
Given a file, ensure that the path to the file exists
def cut_blockquote(html_message): quote = html_message.xpath( '(.//blockquote)' '[not(@class="gmail_quote") and not(ancestor::blockquote)]' '[last()]') if quote: quote = quote[0] quote.getparent().remove(quote) return True
Cuts the last non-nested blockquote with wrapping elements.
def to_array(self): dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns)))) arr = np.empty_like(self.columns[0], dt) for label in self.labels: arr[label] = self[label] return arr
Convert the table to a structured NumPy array.
def add_extras(self, dict, level): my_copy = copy.deepcopy(dict) if 'level' not in my_copy: my_copy['level'] = level if 'timestamp' not in my_copy: my_copy['timestamp'] = self._get_time() if 'logger' not in my_copy: my_copy['logger'] = self.name return my_copy
Adds the log level to the dict object
def extract_descriptor(self, obj): descriptor = [] def flatten(current): if isinstance(current, dict): for key in current: flatten(current[key]) elif isinstance(current, list): for val in current: flatten(val) elif isinstance(current, (int, bool, float, str)): descriptor.append(str(current)) flatten(obj.descriptor) return descriptor
Extract data from the descriptor.
def __skullbomb_radius(self, position): sb_row, sb_col = position left = max(sb_row - 1, 0) right = min(sb_row + 1, 7) top = max(sb_col - 1, 0) bottom = min(sb_col + 1, 7) for explosion_row in xrange(left, right + 1): for explosion_col in xrange(top, bottom + 1): yield (explosion_row, explosion_col)
Generate all valid positions in the square around position.
def record_schemas( fn, wrapper, location, request_schema=None, response_schema=None): has_acceptable = hasattr(fn, '_acceptable_metadata') if request_schema is not None: wrapper._request_schema = wrapper._request_schema = request_schema wrapper._request_schema_location = location if has_acceptable: fn._acceptable_metadata._request_schema = request_schema fn._acceptable_metadata._request_schema_location = location if response_schema is not None: wrapper._response_schema = wrapper._response_schema = response_schema wrapper._response_schema_location = location if has_acceptable: fn._acceptable_metadata._response_schema = response_schema fn._acceptable_metadata._response_schema_location = location
Support extracting the schema from the decorated function.
def _validate_angles(self, angles): dataType = np.float64 tempAngles = np.asarray(angles, dtype=dataType) tempAngles = tempAngles.reshape((3,)) if np.shape(tempAngles) == (self.dimension,): if np.sum(tempAngles) < 360.0 or np.sum(tempAngles) > -360.0: if (np.all(tempAngles != 180.0) and np.all(tempAngles != 0.0)): pass else: raise ValueError('Angles cannot be 180.0 or 0.0') else: raise ValueError('Angles sum: {} is either greater than ' '360.0 or less than -360.0' .format(np.sum(tempAngles))) for subset in it.permutations(tempAngles, r=self.dimension): if not subset[0] < np.sum(tempAngles) - subset[0]: raise ValueError('Each angle provided must be less' 'than the sum of the other angles. ' '{} is greater.'.format(subset[0])) else: raise ValueError('Incorrect array size. When converted to a ' 'Numpy array, the shape is: {}, expected {}.' .format(np.shape(tempAngles), (3,))) self.angles = tempAngles
Ensure that the angles between the lattice_vectors are correct
def strip_doc_string(proto): assert isinstance(proto, google.protobuf.message.Message) for descriptor in proto.DESCRIPTOR.fields: if descriptor.name == 'doc_string': proto.ClearField(descriptor.name) elif descriptor.type == descriptor.TYPE_MESSAGE: if descriptor.label == descriptor.LABEL_REPEATED: for x in getattr(proto, descriptor.name): strip_doc_string(x) elif proto.HasField(descriptor.name): strip_doc_string(getattr(proto, descriptor.name))
Empties `doc_string` field on any nested protobuf messages
def to_df(self): return pd.DataFrame({k: v for k, v in self.items() if k is not 'names'}, index=self['names'])
Convert dict structure into Pandas DataFrame.
def constant_outfile_iterator(outfiles, infiles, arggroups): assert len(infiles) == 1 assert len(arggroups) == 1 return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
Iterate over all output files.
def user(self, username=None): if username is None: username = self.__getUsername() parsedUsername = urlparse.quote(username) url = self.root + "/%s" % parsedUsername return User(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=False)
A user resource that represents a registered user in the portal.
def available_backends(self, hub=None, group=None, project=None, access_token=None, user_id=None): if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): raise CredentialsError('credentials invalid') else: url = get_backend_url(self.config, hub, group, project) ret = self.req.get(url) if (ret is not None) and (isinstance(ret, dict)): return [] return [backend for backend in ret if backend.get('status') == 'on']
Get the backends available to use in the QX Platform
def _prod(group_idx, a, size, fill_value, dtype=None): dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value != 1: ret[group_idx] = 1 np.multiply.at(ret, group_idx, a) return ret
Same as aggregate_numpy.py
def serve_forever(self, poll_interval=0.5): self.serial_port.timeout = poll_interval while not self._shutdown_request: try: self.serve_once() except (CRCError, struct.error) as e: log.error('Can\'t handle request: {0}'.format(e)) except (SerialTimeoutException, ValueError): pass
Wait for incomming requests.
def statsd_metric(name, count, elapsed): with statsd.pipeline() as pipe: pipe.incr(name, count) pipe.timing(name, int(round(1000 * elapsed)))
Metric that records to statsd & graphite
def draw(self, img, pixmapper, bounds): if self._img is None: self._img = self.draw_legend() w = self._img.shape[1] h = self._img.shape[0] px = 5 py = 5 img[py:py+h,px:px+w] = self._img
draw legend on the image
def any_auth(self, form, auth_list, fun, arg, tgt=None, tgt_type='glob'): salt.utils.versions.warn_until( 'Neon', 'The \'any_auth\' function has been deprecated. Support for this ' 'function will be removed in Salt {version}.' ) if form == 'publish': return self.auth_check( auth_list, fun, arg, tgt, tgt_type) return self.spec_check( auth_list, fun, arg, form)
Read in the form and determine which auth check routine to execute
def add_stats_table(self): totals = {sample: sum(counts.values()) for sample, counts in self.data.items()} percentages = {sample: {k: (v / totals[sample]) * 100 for k, v in counts.items()} for sample, counts in self.data.items()} headers = { 'species_a': { 'title': '% Species a', 'description': 'Percentage of reads mapping to species a', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } } self.general_stats_addcols(percentages, headers)
Adds stats to general table.
def getKeySequenceCounter(self): print '%s call getKeySequenceCounter' % self.port keySequence = '' keySequence = self.__sendCommand(WPANCTL_CMD + 'getprop -v Network:KeyIndex')[0] return keySequence
get current Thread Network key sequence
def sc_pan(self, viewer, event, msg=True): if not self.canpan: return True rev = self.settings.get('pan_reverse', False) direction = event.direction if rev: direction = math.fmod(direction + 180.0, 360.0) pan_accel = self.settings.get('scroll_pan_acceleration', 1.0) scr_pan_adj_factor = 1.4142135623730951 amount = (event.amount * scr_pan_adj_factor * pan_accel) / 360.0 self.pan_omni(viewer, direction, amount) return True
Interactively pan the image by scrolling motion.
def harvest(self): if self.perform_initialization() is not None: self.process_items() self.finalize() return self.job
Start the harvesting process
def legal_node_coords(): nodes = set() for tile_id in legal_tile_ids(): for node in nodes_touching_tile(tile_id): nodes.add(node) logging.debug('Legal node coords({})={}'.format(len(nodes), nodes)) return nodes
Return all legal node coordinates on the grid
def matrix_from_basis_coefficients(expansion: value.LinearDict[str], basis: Dict[str, np.ndarray]) -> np.ndarray: some_element = next(iter(basis.values())) result = np.zeros_like(some_element, dtype=np.complex128) for name, coefficient in expansion.items(): result += coefficient * basis[name] return result
Computes linear combination of basis vectors with given coefficients.
def plot_multi(func:Callable[[int,int,plt.Axes],None], r:int=1, c:int=1, figsize:Tuple=(12,6)): "Call `func` for every combination of `r,c` on a subplot" axes = plt.subplots(r, c, figsize=figsize)[1] for i in range(r): for j in range(c): func(i,j,axes[i,j])
Call `func` for every combination of `r,c` on a subplot
def model_actions(self, model, **kwargs): check(model) ctype = ContentType.objects.get_for_model(model) return self.public( (Q(target_content_type=ctype) | Q(action_object_content_type=ctype) | Q(actor_content_type=ctype)), **kwargs )
Stream of most recent actions by any particular model
def DFReader_is_text_log(filename): f = open(filename) ret = (f.read(8000).find('FMT, ') != -1) f.close() return ret
return True if a file appears to be a valid text log
def load_module(filename): basename = os.path.basename(filename) path = os.path.dirname(filename) sys.path.append(path) return __import__(os.path.splitext(basename)[0])
Loads a module by filename
def find_by_user(user: str) -> List['ApiKey']: return [ApiKey.from_db(key) for key in db.get_keys(qb.from_dict({'user': user}))]
List API keys for a user.
def fullConn (self, preCellsTags, postCellsTags, connParam): from .. import sim if sim.cfg.verbose: print('Generating set of all-to-all connections (rule: %s) ...' % (connParam['label'])) paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam] for paramStrFunc in paramsStrFunc: connParam[paramStrFunc[:-4]+'List'] = {(preGid,postGid): connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()}) for preGid,preCellTags in preCellsTags.items() for postGid,postCellTags in postCellsTags.items()} for postCellGid in postCellsTags: if postCellGid in self.gid2lid: for preCellGid, preCellTags in preCellsTags.items(): self._addCellConn(connParam, preCellGid, postCellGid)
Generates connections between all pre and post-syn cells
def values(self, *args: str, **kwargs: str) -> "ValuesQuery": fields_for_select = {} for field in args: if field in fields_for_select: raise FieldError("Duplicate key {}".format(field)) fields_for_select[field] = field for return_as, field in kwargs.items(): if return_as in fields_for_select: raise FieldError("Duplicate key {}".format(return_as)) fields_for_select[return_as] = field return ValuesQuery( db=self._db, model=self.model, q_objects=self._q_objects, fields_for_select=fields_for_select, distinct=self._distinct, limit=self._limit, offset=self._offset, orderings=self._orderings, annotations=self._annotations, custom_filters=self._custom_filters, )
Make QuerySet return dicts instead of objects.
def seek(self, rev): if not self: return if type(rev) is not int: raise TypeError("rev must be int") past = self._past future = self._future if future: appender = past.append popper = future.pop future_start = future[-1][0] while future_start <= rev: appender(popper()) if future: future_start = future[-1][0] else: break if past: popper = past.pop appender = future.append past_end = past[-1][0] while past_end > rev: appender(popper()) if past: past_end = past[-1][0] else: break
Arrange the caches to help look up the given revision.
def delete(self, **kwargs): url_str = self.base_url + '/%s' % kwargs['alarm_id'] resp = self.client.delete(url_str) return resp
Delete a specific alarm.
async def remove(self, *instances, using_db=None) -> None: db = using_db if using_db else self.model._meta.db if not instances: raise OperationalError("remove() called on no instances") through_table = Table(self.field.through) if len(instances) == 1: condition = (getattr(through_table, self.field.forward_key) == instances[0].id) & ( getattr(through_table, self.field.backward_key) == self.instance.id ) else: condition = (getattr(through_table, self.field.backward_key) == self.instance.id) & ( getattr(through_table, self.field.forward_key).isin([i.id for i in instances]) ) query = db.query_class.from_(through_table).where(condition).delete() await db.execute_query(str(query))
Removes one or more of ``instances`` from the relation.
def _move_data_entries(destination_eggdir, dist_data): dist_data = os.path.join(destination_eggdir, dist_data) dist_data_scripts = os.path.join(dist_data, 'scripts') if os.path.exists(dist_data_scripts): egg_info_scripts = os.path.join( destination_eggdir, 'EGG-INFO', 'scripts') os.mkdir(egg_info_scripts) for entry in os.listdir(dist_data_scripts): if entry.endswith('.pyc'): os.unlink(os.path.join(dist_data_scripts, entry)) else: os.rename( os.path.join(dist_data_scripts, entry), os.path.join(egg_info_scripts, entry), ) os.rmdir(dist_data_scripts) for subdir in filter(os.path.exists, ( os.path.join(dist_data, d) for d in ('data', 'headers', 'purelib', 'platlib') )): unpack(subdir, destination_eggdir) if os.path.exists(dist_data): os.rmdir(dist_data)
Move data entries to their correct location.
def url_for_token(self, token): book_url = self.get_config_value("pages", token) book, _, url_tail = book_url.partition(':') book_base = settings.HELP_TOKENS_BOOKS[book] url = book_base lang = getattr(settings, "HELP_TOKENS_LANGUAGE_CODE", None) if lang is not None: lang = self.get_config_value("locales", lang) url += "/" + lang version = getattr(settings, "HELP_TOKENS_VERSION", None) if version is not None: url += "/" + version url += "/" + url_tail return url
Find the full URL for a help token.
def __getFilenameSuffix(self, filename): if filename and isinstance(filename, string_types): if self.__isValidTGZ(filename): return ".tar.gz" elif filename.endswith(".zip"): return ".zip"
Gets the filename suffix
def hash_to_exponent(self, h): ctr = Crypto.Util.Counter.new(128, initial_value=0) cipher = Crypto.Cipher.AES.new(h, Crypto.Cipher.AES.MODE_CTR, counter=ctr) buf = cipher.encrypt(b'\0' * self.order_len_bin) return self._buf_to_exponent(buf)
Converts a 32 byte hash to an exponent
def ast_from_class(self, klass, modname=None): if modname is None: try: modname = klass.__module__ except AttributeError as exc: raise exceptions.AstroidBuildingError( "Unable to get module for class {class_name}.", cls=klass, class_repr=safe_repr(klass), modname=modname, ) from exc modastroid = self.ast_from_module_name(modname) return modastroid.getattr(klass.__name__)[0]
get astroid for the given class
def alphabetize_attributes(self): self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
Orders attributes names alphabetically, except for the class attribute, which is kept last.