code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _item_type(item): tag = item['tag'] style = item.get('style', None) if tag == 'p': if style is None or 'paragraph' in style: return 'paragraph' else: return style elif tag == 'span': if style in (None, 'normal-text'): return 'text' elif style == 'url': return 'link' else: return style elif tag == 'h': assert style is not None return style elif tag in ('list', 'list-item', 'line-break'): if style == '_numbered_list': return 'numbered-list' else: return tag elif tag == 's': return 'spaces' raise Exception("The tag '{0}' with style '{1}' hasn't " "been implemented.".format(tag, style))
Indicate to the ODF reader the type of the block or text.
def rlmb_long_stochastic_discrete_simulation_deterministic_starts(): hparams = rlmb_base_stochastic_discrete() hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long" hparams.ppo_epochs_num = 1000 hparams.simulation_random_starts = False return hparams
Long setting with stochastic discrete model & deterministic sim starts.
def count_missense_per_gene(lines): counts = {} for x in lines: x = x.split("\t") gene = x[0] consequence = x[3] if gene not in counts: counts[gene] = 0 if consequence != "missense_variant": continue counts[gene] += 1 return counts
count the number of missense variants in each gene.
def _isvalid(self, datatype): if datatype in self.meta: return bool(Dap._meta_valid[datatype].match(self.meta[datatype])) else: return datatype in Dap._optional_meta
Checks if the given datatype is valid in meta
def egg_name(self): filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename
Return what this distribution's standard .egg filename should be
def _visit_functiondef(self, cls, node, parent): self._global_names.append({}) node, doc = self._get_doc(node) newnode = cls(node.name, doc, node.lineno, node.col_offset, parent) if node.decorator_list: decorators = self.visit_decorators(node, newnode) else: decorators = None if PY3 and node.returns: returns = self.visit(node.returns, newnode) else: returns = None type_comment_args = type_comment_returns = None type_comment_annotation = self.check_function_type_comment(node) if type_comment_annotation: type_comment_returns, type_comment_args = type_comment_annotation newnode.postinit( args=self.visit(node.args, newnode), body=[self.visit(child, newnode) for child in node.body], decorators=decorators, returns=returns, type_comment_returns=type_comment_returns, type_comment_args=type_comment_args, ) self._global_names.pop() return newnode
visit an FunctionDef node to become astroid
def start_review(self): if self.set_status: self.github_repo.create_status( state="pending", description="Static analysis in progress.", context="inline-plz", sha=self.last_sha, )
Mark our review as started.
def unique(transactions): seen = set() return [x for x in transactions if not (x in seen or seen.add(x))]
Remove any duplicate entries.
def GetConsistentValueOrRaise(self, error_format, context=None): if self.has_error: full_context = dict(self._context) if context: full_context.update(context) raise ValueError(error_format.format(**full_context)) return self.value
Gets consistent value or raises ValueError with formatted contexts.
def info(message, *args, **kwargs): if 'end' in kwargs: end = kwargs['end'] else: end = '\n' if len(args) == 0: sys.stdout.write(message) else: sys.stdout.write(message % args) sys.stdout.write(end) sys.stdout.flush()
write a message to stdout
def output(id, url): try: experiment = ExperimentClient().get(normalize_job_name(id)) except FloydException: experiment = ExperimentClient().get(id) output_dir_url = "%s/%s/files" % (floyd.floyd_web_host, experiment.name) if url: floyd_logger.info(output_dir_url) else: floyd_logger.info("Opening output path in your browser ...") webbrowser.open(output_dir_url)
View the files from a job.
def do_cat(self, path): path = path[0] tmp_file_path = self.TMP_PATH + 'tmp' if not os.path.exists(self.TMP_PATH): os.makedirs(self.TMP_PATH) f = self.n.downloadFile(self.current_path + path, tmp_file_path) f = open(tmp_file_path, 'r') self.stdout.write(f.read()) self.stdout.write("\n")
display the contents of a file
def emit(self, item, defaults=None, stencil=None, to_log=False, item_formatter=None): item_text = self.format_item(item, defaults, stencil) if item_formatter: item_text = item_formatter(item_text) if item is None and os.isatty(sys.stdout.fileno()): item_text = ''.join((config.output_header_ecma48, item_text, "\x1B[0m")) if to_log: if callable(to_log): to_log(item_text) else: self.LOG.info(item_text) elif self.options.nul: sys.stdout.write(item_text + '\0') sys.stdout.flush() else: print(item_text) return item_text.count('\n') + 1
Print an item to stdout, or the log on INFO level.
def load_json(file): here = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(here, file)) as jfile: data = json.load(jfile) return data
Load JSON file at app start
def find_agent(self, desc): agent_id = (desc.doc_id if IDocument.providedBy(desc) else desc) self.log("I'm trying to find the agent with id: %s", agent_id) result = first(x for x in self._agents if x._descriptor.doc_id == agent_id) return defer.succeed(result)
Gives medium class of the agent if the agency hosts it.
def best(self): b = (-1e999999, None) for k, c in iteritems(self.counts): b = max(b, (c, k)) return b[1]
Returns the element with the highest probability.
def get(self, idx, default=''): if isinstance(idx, int) and (idx >= len(self) or idx < -1 * len(self)): return default return super().__getitem__(idx)
Returns the element at idx, or default if idx is beyond the length of the list
def _get_assessment_part_collection(self, assessment_part_id): collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) assessment_part_map = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())}) if 'itemIds' not in assessment_part_map: raise errors.NotFound('no Items are assigned to this AssessmentPart') return assessment_part_map, collection
Returns a Mongo Collection and AssessmentPart given a AssessmentPart Id
def update_commands(self, commands_str): commands = dict(parse_qsl(commands_str, keep_blank_values=True)) _if = commands.get("if", self._if) if _if: self._if = Condition(_if) self._set_int(commands, "max_length") self._set_int(commands, "min_length") self.color = self._check_color(commands.get("color")) self.not_zero = "not_zero" in commands or self.not_zero self.show = "show" in commands or self.show self.soft = "soft" in commands or self.soft
update with commands from the block
def relpath_for(self, path): if self.parent_dir in (".", ""): return path if path == self.parent_dir: return "" dirname = os.path.dirname(path) or "." basename = os.path.basename(path) cached = self.relpath_cache.get(dirname, empty) if cached is empty: cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir) return os.path.join(cached, basename)
Find the relative path from here from the parent_dir
def launch_browser(attempt_launch_browser=True): _DISPLAY_VARIABLES = ['DISPLAY', 'WAYLAND_DISPLAY', 'MIR_SOCKET'] _WEBBROWSER_NAMES_BLACKLIST = [ 'www-browser', 'lynx', 'links', 'elinks', 'w3m'] import webbrowser launch_browser = attempt_launch_browser if launch_browser: if ('linux' in sys.platform and not any(os.getenv(var) for var in _DISPLAY_VARIABLES)): launch_browser = False try: browser = webbrowser.get() if (hasattr(browser, 'name') and browser.name in _WEBBROWSER_NAMES_BLACKLIST): launch_browser = False except webbrowser.Error: launch_browser = False return launch_browser
Decide if we should launch a browser
def send_exit_with_code(cls, sock, code): encoded_exit_status = cls.encode_int(code) cls.send_exit(sock, payload=encoded_exit_status)
Send an Exit chunk over the specified socket, containing the specified return code.
def kinetic_law_reaction_parameters(self): for parameter in self._root.iterfind( './{}/{}/{}'.format(self._reader._sbml_tag('kineticLaw'), self._reader._sbml_tag('listOfParameters'), self._reader._sbml_tag('parameter'))): param_id = parameter.get('id') param_name = parameter.get('name') param_value = Decimal(parameter.get('value')) param_units = parameter.get('units') yield param_id, param_name, param_value, param_units
Iterator over the values of kinetic law reaction parameters
def _get_aggregated_info(self): agg_results = {} for key in self.aggregated_info['occurrences']: agg_results[key] = { 'occurrences': self.aggregated_info['occurrences'].get(key), 'coverage': (float(self.aggregated_info['occurrences'] .get(key))/float(self.get_metadata('items_count')))*100 } return agg_results
Keeps track of aggregated info in a dictionary called self.aggregated_info
def determine_metadata(self, request, view): metadata = super( DynamicMetadata, self).determine_metadata( request, view) metadata['features'] = getattr(view, 'features', []) if hasattr(view, 'get_serializer'): serializer = view.get_serializer(dynamic=False) if hasattr(serializer, 'get_name'): metadata['resource_name'] = serializer.get_name() if hasattr(serializer, 'get_plural_name'): metadata['resource_name_plural'] = serializer.get_plural_name() metadata['properties'] = self.get_serializer_info(serializer) return metadata
Adds `properties` and `features` to the metadata response.
def fix_pickle(self): from coconut import __coconut__ for var in self.vars: if not var.startswith("__") and var in dir(__coconut__): self.vars[var] = getattr(__coconut__, var)
Fix pickling of Coconut header objects.
def _tcpdump_callback(self, line, kill_switch): line = line.lower() if ("listening" in line) or ("reading" in line): self.started = True if ("no suitable device" in line): self.error = True self.kill_switch() if "by kernel" in line: self.stopped = True
Callback function to handle tcpdump
def hdate(self, date): if date is None and isinstance(self.gdate, datetime.date): date = self.hdate if not isinstance(date, HebrewDate): raise TypeError('date: {} is not of type HebrewDate'.format(date)) if not 0 < date.month < 15: raise ValueError( 'month ({}) legal values are 1-14'.format(date.month)) if not 0 < date.day < 31: raise ValueError('day ({}) legal values are 1-31'.format(date.day)) self._last_updated = "hdate" self._hdate = date
Set the dates of the HDate object based on a given Hebrew date.
def _list_selection_changed(self): items = self.list_layers_in_map_report.selectedItems() self.remove_layer.setEnabled(len(items) >= 1) if len(items) == 1 and self.list_layers_in_map_report.count() >= 2: index = self.list_layers_in_map_report.indexFromItem(items[0]) index = index.row() if index == 0: self.move_up.setEnabled(False) self.move_down.setEnabled(True) elif index == self.list_layers_in_map_report.count() - 1: self.move_up.setEnabled(True) self.move_down.setEnabled(False) else: self.move_up.setEnabled(True) self.move_down.setEnabled(True) else: self.move_up.setEnabled(False) self.move_down.setEnabled(False)
Selection has changed in the list.
def tweet(ctx, message): if not valid_tweet(message): click.echo("Message is too long for twitter.") click.echo("Message:" + message) ctx.exit(2) if not ctx.obj['DRYRUN']: ctx.obj['TWEEPY_API'].update_status(message) else: click.echo("Tweet not sent due to dry-run mode.")
Sends a tweet directly to your timeline
def name_from_type(type_): if isinstance(type_, (DictType, ListType, TupleType, SetType, IteratorType)): return repr(type_) else: if type_.__name__ != 'NoneType': module = type_.__module__ if module in BUILTIN_MODULES or module == '<unknown>': return type_.__name__ else: name = getattr(type_, '__qualname__', None) or type_.__name__ delim = '.' if '.' not in name else ':' return '%s%s%s' % (module, delim, name) else: return 'None'
Helper function to get PEP-484 compatible string representation of our internal types.
def aug_sysargv(cmdstr): import shlex argv = shlex.split(cmdstr) sys.argv.extend(argv)
DEBUG FUNC modify argv to look like you ran a command
def are_all_nodes_discovered(self): undiscovered = self.find_all(lambda e: not e.discovered) return len(list(undiscovered)) == 0
Reports whether there are nodes whose node info is still unknown.
def _build_relations_config(self, yamlconfig): config = {} for element in yamlconfig: if isinstance(element, str): config[element] = {'relation_name': element, 'schemas': []} elif isinstance(element, dict): if 'relation_name' not in element or 'schemas' not in element: self.log.warning("Unknown element format for relation element %s", element) continue if not isinstance(element['schemas'], list): self.log.warning("Expected a list of schemas for %s", element) continue name = element['relation_name'] config[name] = {'relation_name': name, 'schemas': element['schemas']} else: self.log.warning('Unhandled relations config type: {}'.format(element)) return config
Builds a dictionary from relations configuration while maintaining compatibility
def section_term_branch_orders(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.branch_order, neurites, neurite_type=neurite_type, iterator_type=Tree.ileaf)
Termination section branch orders in a collection of neurites
def _parse_state_file(state_file_path='terraform.tfstate'): ret = {} with salt.utils.files.fopen(state_file_path, 'r') as fh_: tfstate = salt.utils.json.load(fh_) modules = tfstate.get('modules') if not modules: log.error('Malformed tfstate file. No modules found') return ret for module in modules: resources = module.get('resources', []) for resource_name, resource in salt.ext.six.iteritems(resources): roster_entry = None if resource['type'] == 'salt_host': roster_entry = _handle_salt_host_resource(resource) if not roster_entry: continue minion_id = roster_entry.get(MINION_ID, resource.get('id')) if not minion_id: continue if MINION_ID in roster_entry: del roster_entry[MINION_ID] _add_ssh_key(roster_entry) ret[minion_id] = roster_entry return ret
Parses the terraform state file passing different resource types to the right handler
def _covar_mstep_spherical(*args): cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
Performing the covariance M step for spherical cases
def thishost(): global _thishost if _thishost is None: try: _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) except socket.gaierror: _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) return _thishost
Return the IP addresses of the current host.
def create(cls, service=Service(), private=False): response = service.send(SRequest('POST', cls.path, data={'private': private})) return cls.from_response(response, service=service)
create a bin instance on the server
def all_unique(keys, axis=semantics.axis_default): index = as_index(keys, axis) return index.groups == index.size
Returns true if all keys are unique
def show_firewall(self, firewall, **_params): return self.get(self.firewall_path % (firewall), params=_params)
Fetches information of a certain firewall.
def _connectionEstablished(self, transport): self.transport = transport self.transport.writeOpen() self.heartbeater.schedule()
Store a reference to our transport and write an open frame.
def to_bucket(self, timestamp, steps=0): dt = datetime.utcfromtimestamp( timestamp ) if steps!=0: if self._step == 'daily': dt = dt + timedelta(days=steps) elif self._step == 'weekly': dt = dt + timedelta(weeks=steps) elif self._step == 'monthly': dt = dt + MonthDelta(steps) elif self._step == 'yearly': year = int(dt.strftime( self.FORMATS[self._step] )) year += steps dt = datetime(year=year, month=1, day=1) return int(dt.strftime( self.FORMATS[self._step] ))
Calculate the bucket from a timestamp.
def whoami(ctx, opts): click.echo("Retrieving your authentication status from the API ... ", nl=False) context_msg = "Failed to retrieve your authentication status!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): is_auth, username, email, name = get_user_brief() click.secho("OK", fg="green") click.echo("You are authenticated as:") if not is_auth: click.secho("Nobody (i.e. anonymous user)", fg="yellow") else: click.secho( "%(name)s (slug: %(username)s, email: %(email)s)" % { "name": click.style(name, fg="cyan"), "username": click.style(username, fg="magenta"), "email": click.style(email, fg="green"), } )
Retrieve your current authentication status.
def append_value(dictionary, key, item): items = dictionary.get(key, []) items.append(item) dictionary[key] = items
Append those items to the values for that key
def endpoint_update(**kwargs): client = get_client() endpoint_id = kwargs.pop("endpoint_id") get_res = client.get_endpoint(endpoint_id) if get_res["host_endpoint_id"]: endpoint_type = "shared" elif get_res["is_globus_connect"]: endpoint_type = "personal" elif get_res["s3_url"]: endpoint_type = "s3" else: endpoint_type = "server" validate_endpoint_create_and_update_params( endpoint_type, get_res["subscription_id"], kwargs ) ep_doc = assemble_generic_doc("endpoint", **kwargs) res = client.update_endpoint(endpoint_id, ep_doc) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
Executor for `globus endpoint update`
def visit_ifexp(self, node, parent): newnode = nodes.IfExp(node.lineno, node.col_offset, parent) newnode.postinit( self.visit(node.test, newnode), self.visit(node.body, newnode), self.visit(node.orelse, newnode), ) return newnode
visit a IfExp node by returning a fresh instance of it
def to_dataframe(self, **kwargs): return pandas.io.parsers.read_csv(self.path, sep=self.d, **kwargs)
Load up the CSV file as a pandas dataframe
def start(self): self._lc = LoopingCall(self._download) self._lc.start(30, now=True)
Start the background process.
def RGB(self, val): val = np.atleast_1d(val).astype(np.float32) / 255. self.rgba = val
Set the color using an Nx3 array of RGB uint8 values
def _hilink_decrypt(self, encrypted_firmware): cipher = DES.new(self.DES_KEY, DES.MODE_ECB) p1 = encrypted_firmware[0:3] p2 = encrypted_firmware[3:] p2 += b"\x00" * (8 - (len(p2) % 8)) d1 = p1 + cipher.decrypt(p2) d1 += b"\x00" * (8 - (len(d1) % 8)) return cipher.decrypt(d1)
This does the actual decryption.
def fetchref(self, ref): log.debug('[%s] Fetching ref: %s', self.name, ref) fetch_info = self.repo.remotes.origin.fetch(ref).pop() return fetch_info.ref
Fetch a particular git ref.
def ansi(color, text): code = COLOR_CODES[color] return '\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM)
Wrap text in an ansi escape sequence
def _detect_term_type(): if os.name == 'nt': if os.environ.get('TERM') == 'xterm': return 'mintty' else: return 'nt' if platform.system().upper().startswith('CYGWIN'): return 'cygwin' return 'posix'
Detect the type of the terminal.
def _readcsv(self, path_to_csv): return np.genfromtxt(path_to_csv, dtype=None, delimiter=',', names=True)
reads a csv column
def update_alarm(self, entity, alarm, criteria=None, disabled=False, label=None, name=None, metadata=None): return entity.update_alarm(alarm, criteria=criteria, disabled=disabled, label=label, name=name, metadata=metadata)
Updates an existing alarm on the given entity.
def _setup_tls_files(self, files): for file_type in TLSFileType: if file_type.value in files: file_path = files[file_type.value] setattr(self, file_type.value, TLSFile(file_path, file_type=file_type))
Initiates TLSFIle objects with the paths given to this bundle
def seekset_ng(func): @functools.wraps(func) def seekcur(file, *args, seekset=os.SEEK_SET, **kw): file.seek(seekset, os.SEEK_SET) return_ = func(file, *args, seekset=seekset, **kw) return return_ return seekcur
Read file from start then set back to original.
def _parse_topic(client, command, actor, args): channel, _, topic = args.partition(" :") channel = client.server.get_channel(channel) channel.topic = topic or None if actor: actor = User(actor) client.dispatch_event("TOPIC", actor, channel, topic)
Parse a TOPIC and update channel state, then dispatch a TOPIC event.
def _add_nested(self, rec, name, value): (typedef, target_term) = value.split('!')[0].rstrip().split(' ') getattr(rec, name)[typedef].append(target_term)
Adds a term's nested attributes.
def steady_connection(self): return connect( self._creator, self._maxusage, self._setsession, self._failures, self._ping, self._closeable, *self._args, **self._kwargs)
Get a steady, non-persistent DB-API 2 connection.
def read(self, timeout=1.0): self.ser.timeout = timeout if self.ser is None: return '' return self.ser.readline()
read from modem port, return null string on timeout.
def __get_all_child_accounts_as_array(self, account: Account) -> List[Account]: result = [] result.append(account) for child in account.children: sub_accounts = self.__get_all_child_accounts_as_array(child) result += sub_accounts return result
Returns the whole tree of child accounts in a list
def unhumanize_class(my_classes): result = [] interval = my_classes[-1] - my_classes[-2] min_value = 0 for max_value in my_classes: result.append((format_decimal(interval, min_value), format_decimal(interval, max_value))) min_value = max_value return result
Return class as interval without formatting.
def install_scripts(distributions): try: if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.command import easy_install else: from pex.third_party.setuptools.command import easy_install if "__PEX_UNVENDORED__" in __import__("os").environ: import pkg_resources else: import pex.third_party.pkg_resources as pkg_resources except ImportError: raise RuntimeError("'wheel install_scripts' needs setuptools.") for dist in distributions: pkg_resources_dist = pkg_resources.get_distribution(dist) install = get_install_command(dist) command = easy_install.easy_install(install.distribution) command.args = ['wheel'] command.finalize_options() command.install_egg_scripts(pkg_resources_dist)
Regenerate the entry_points console_scripts for the named distribution.
def _add_device(self, scs_id, ha_id, name): if scs_id in self._devices: return self._devices[scs_id] = { 'name': name, 'ha_id': ha_id }
Add device to the list of known ones
def _refresh_mine_cache(wrapped): @functools.wraps(wrapped) def wrapper(*args, **kwargs): returned = wrapped(*args, **__utils__['args.clean_kwargs'](**kwargs)) if _check_update_mine(): __salt__['mine.send']( 'docker.ps', verbose=True, all=True, host=True) return returned return wrapper
Decorator to trigger a refresh of salt mine data.
def _verify(self, path_prefix=None): for field, spec in self.doc_spec.iteritems(): path = self._append_path(path_prefix, field) if isinstance(spec, dict): self._verify_field_spec(spec, path) else: raise SchemaFormatException("Invalid field definition for {}", path)
Verifies that this schema's doc spec is valid and makes sense.
def all_files(self): return set([entry.decode() for entry, _ in self.git.open_index().items()])
Return a set of all the files under git control
def _save_notebook(self, os_path, nb): with self.atomic_writing(os_path, encoding='utf-8') as f: if ftdetect(os_path) == 'notebook': nbformat.write(nb, f, version=nbformat.NO_CONVERT) elif ftdetect(os_path) == 'markdown': nbjson = nbformat.writes(nb, version=nbformat.NO_CONVERT) markdown = convert(nbjson, informat='notebook', outformat='markdown', strip_outputs=self.strip_outputs) f.write(markdown)
Save a notebook to an os_path.
def _feature_file(self, parallel = None, index = None): if index is None: index = 0 if parallel is None or "SGE_TASK_ID" not in os.environ else int(os.environ["SGE_TASK_ID"]) return os.path.join(self.feature_directory, "Features_%02d.hdf5" % index)
Returns the name of an intermediate file for storing features.
def _virtualenv_sys(venv_path): "obtain version and path info from a virtualenv." executable = os.path.join(venv_path, env_bin_dir, 'python') p = subprocess.Popen([executable, '-c', 'import sys;' 'print (sys.version[:3]);' 'print ("\\n".join(sys.path));'], env={}, stdout=subprocess.PIPE) stdout, err = p.communicate() assert not p.returncode and stdout lines = stdout.decode('utf-8').splitlines() return lines[0], list(filter(bool, lines[1:]))
obtain version and path info from a virtualenv.
def read(filename): return codecs.open(os.path.join(__DIR__, filename), 'r').read()
Read and return `filename` in root dir of project and return string
def _CheckConnectionEncoding(cursor): cur_character_set = _ReadVariable("character_set_connection", cursor) if cur_character_set != CHARACTER_SET: raise EncodingEnforcementError( "Require MySQL character_set_connection of {}, got {}.".format( CHARACTER_SET, cur_character_set))
Enforces a sane UTF-8 encoding for the database connection.
def copy(self): return Character(self.name, self.race,self.ch_class, self.stats, self.skills, self.story, self.inventory)
make an identical copy of the character
def parse_qaml(self): logging.info('Parsing GenomeQAML outputs') nesteddictionary = dict() dictionary = pandas.read_csv(self.qaml_report).to_dict() for header in dictionary: for sample, value in dictionary[header].items(): try: nesteddictionary[sample].update({header: value}) except KeyError: nesteddictionary[sample] = dict() nesteddictionary[sample].update({header: value}) for sample in self.metadata: setattr(sample, self.analysistype, GenObject()) sample[self.analysistype].prediction = str() for line in nesteddictionary: name = nesteddictionary[line]['Sample'] if name == sample.name: sample[self.analysistype].prediction = nesteddictionary[line]['Predicted_Class']
Parse the GenomeQAML report, and populate metadata objects
def atlasdb_set_zonefile_tried_storage( zonefile_hash, tried_storage, con=None, path=None ): with AtlasDBOpen(con=con, path=path) as dbcon: if tried_storage: tried_storage = 1 else: tried_storage = 0 sql = "UPDATE zonefiles SET tried_storage = ? WHERE zonefile_hash = ?;" args = (tried_storage, zonefile_hash) cur = dbcon.cursor() res = atlasdb_query_execute( cur, sql, args ) dbcon.commit() return True
Make a note that we tried to get the zonefile from storage
def stdin_readable(): if not WINDOWS: try: return bool(select([sys.stdin], [], [], 0)[0]) except Exception: logger.log_exc() try: return not sys.stdin.isatty() except Exception: logger.log_exc() return False
Determine whether stdin has any data to read.
def data(self): data=np.empty((self.nRows,self.nCols),dtype=np.float) data[:]=np.nan for colNum,colData in enumerate(self.colData): validIs=np.where([np.isreal(v) for v in colData])[0] validData=np.ones(len(colData))*np.nan validData[validIs]=np.array(colData)[validIs] data[:len(colData),colNum]=validData return data
return all of colData as a 2D numpy array.
def hardware_version(self): hardware_string = self.hardware_string if not isinstance(hardware_string, bytes): hardware_string = self.hardware_string.encode('utf-8') if len(hardware_string) > 10: self._logger.warn("Truncating hardware string that was longer than 10 bytes: %s", self.hardware_string) if len(hardware_string) < 10: hardware_string += b'\0'*(10 - len(hardware_string)) return [hardware_string]
Get a hardware identification string.
def write_astrom_data(self, astrom_data): self.write_headers(astrom_data.observations, astrom_data.sys_header) self._write_source_data(astrom_data.sources)
Writes a full AstromData structure at once.
def _init_attr_config(attr_cfg): attr_cfg.name = '' attr_cfg.writable = AttrWriteType.READ attr_cfg.data_format = AttrDataFormat.SCALAR attr_cfg.data_type = 0 attr_cfg.max_dim_x = 0 attr_cfg.max_dim_y = 0 attr_cfg.description = '' attr_cfg.label = '' attr_cfg.unit = '' attr_cfg.standard_unit = '' attr_cfg.display_unit = '' attr_cfg.format = '' attr_cfg.min_value = '' attr_cfg.max_value = '' attr_cfg.writable_attr_name = '' attr_cfg.extensions = []
Helper function to initialize attribute config objects
def update_role(u_name, newprivilege): entry = TabMember.update( role=newprivilege ).where(TabMember.user_name == u_name) try: entry.execute() return True except: return False
Update the role of the usr.
def _find_classes_param(self): for attr in ["classes_"]: try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickTypeError( "could not find classes_ param on {}".format( self.estimator.__class__.__name__ ) )
Searches the wrapped model for the classes_ parameter.
def check_manual_seed(seed): seed = seed or random.randint(1, 10000) random.seed(seed) torch.manual_seed(seed) print('Using manual seed: {seed}'.format(seed=seed))
If manual seed is not specified, choose a random one and communicate it to the user.
def read(self, source_path): with pelican_open(source_path) as text: parts = text.split('----', 1) if len(parts) == 2: headerlines = parts[0].splitlines() headerpairs = map(lambda l: l.split(':', 1), headerlines) headerdict = {pair[0]: pair[1].strip() for pair in headerpairs if len(pair) == 2} metadata = self._parse_metadata(headerdict) content = textile(parts[1]) else: metadata = {} content = textile(text) return content, metadata
Parse content and metadata of textile files.
def seek_to_packet(self, index): pointer_position = self.packet_positions[index] self.blob_file.seek(pointer_position, 0)
Move file pointer to the packet with given index.
def calc_path_and_create_folders(folder, import_path): file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py")) mkdir_p(dirname(file_path)) return file_path
calculate the path and create the needed folders
def load(cls, filename, name=None): if not os.path.exists(filename): return {} name = name or filename if name not in cls._conffiles: with open(filename) as fdesc: content = yaml.load(fdesc, YAMLLoader) if content is None: content = {} cls._conffiles[name] = content return cls._conffiles[name]
Load yaml configuration from filename.
def spawn_gdb(pid, address=DFLT_ADDRESS, gdb='gdb', verbose=False, ctx=None, proc_iut=None): parent, child = socket.socketpair() proc = Popen([gdb, '--interpreter=mi', '-nx'], bufsize=0, stdin=child, stdout=child, stderr=STDOUT) child.close() connections = {} gdb = GdbSocket(ctx, address, proc, proc_iut, parent, verbose, connections) gdb.mi_command('-target-attach %d' % pid) gdb.cli_command('python import pdb_clone.bootstrappdb_gdb') asyncore.loop(map=connections) proc.wait() return gdb.error
Spawn gdb and attach to a process.
def from_yaml(cls, file_path=None): try: import yaml except ImportError: yaml = None if not yaml: import sys sys.exit('PyYAML is not installed, but is required in order to parse YAML files.' '\nTo install, run:\n$ pip install PyYAML\nor visit' ' http://pyyaml.org/wiki/PyYAML for instructions.') with io.open(file_path, encoding=text_type('utf-8')) as stream: users_yaml = yaml.safe_load(stream) if isinstance(users_yaml, dict): return cls.construct_user_list(raw_users=users_yaml.get('users')) else: raise ValueError('No YAML object could be decoded')
Create collection from a YAML file.
def log_context(trace_level, stream): original_trace_level = DebugInfoHolder.DEBUG_TRACE_LEVEL original_stream = DebugInfoHolder.DEBUG_STREAM DebugInfoHolder.DEBUG_TRACE_LEVEL = trace_level DebugInfoHolder.DEBUG_STREAM = stream try: yield finally: DebugInfoHolder.DEBUG_TRACE_LEVEL = original_trace_level DebugInfoHolder.DEBUG_STREAM = original_stream
To be used to temporarily change the logging settings.
def bitdepthof(pixel): maxd = 0 for c in re.findall(r'[a-z]\d*', pixel): if c[0] != 'x': maxd = max(maxd, int(c[1:])) return maxd
Return the bitdepth for a Plan9 pixel format string.
def cleanup(arctic_lib, symbol, version_ids, versions_coll, shas_to_delete=None, pointers_cfgs=None): pointers_cfgs = set(pointers_cfgs) if pointers_cfgs else set() collection = arctic_lib.get_top_level_collection() version_ids = list(version_ids) all_symbol_pointers_cfgs = _get_symbol_pointer_cfgs(symbol, versions_coll) all_symbol_pointers_cfgs.update(pointers_cfgs) if all_symbol_pointers_cfgs == {FwPointersCfg.DISABLED} or not all_symbol_pointers_cfgs: _cleanup_parent_pointers(collection, symbol, version_ids) return if FwPointersCfg.DISABLED not in all_symbol_pointers_cfgs: _cleanup_fw_pointers(collection, symbol, version_ids, versions_coll, shas_to_delete=shas_to_delete, do_clean=True) return _cleanup_mixed(symbol, collection, version_ids, versions_coll)
Helper method for cleaning up chunks from a version store
def _check_trim(data): trim = data["algorithm"].get("trim_reads") if trim: if trim == "fastp" and data["algorithm"].get("align_split_size") is not False: raise ValueError("In sample %s, `trim_reads: fastp` currently requires `align_split_size: false`" % (dd.get_sample_name(data)))
Check for valid values for trim_reads.
def complete_media(self, text, line, begidx, endidx): choices = {'actor': query_actors, 'director': TabCompleteExample.static_list_directors, 'movie_file': (self.path_complete,) } completer = argparse_completer.AutoCompleter(TabCompleteExample.media_parser, self, arg_choices=choices) tokens, _ = self.tokens_for_completion(line, begidx, endidx) results = completer.complete_command(tokens, text, line, begidx, endidx) return results
Adds tab completion to media
def _output_work(self, work, root): output_filename = os.path.join(self._output_dir, work) tree = etree.ElementTree(root) tree.write(output_filename, encoding='utf-8', pretty_print=True)
Saves the TEI XML document `root` at the path `work`.
def schedule_ping_frequency(self): "Send a ping message to slack every 20 seconds" ping = crontab('* * * * * */20', func=self.send_ping, start=False) ping.start()
Send a ping message to slack every 20 seconds
def getDefaultItems(self): plugins = [ InspectorRegItem(DEFAULT_INSPECTOR, 'argos.inspector.qtplugins.table.TableInspector'), InspectorRegItem('Qt/Text', 'argos.inspector.qtplugins.text.TextInspector'), InspectorRegItem('PyQtGraph/1D Line Plot', 'argos.inspector.pgplugins.lineplot1d.PgLinePlot1d'), InspectorRegItem('PyQtGraph/2D Image Plot', 'argos.inspector.pgplugins.imageplot2d.PgImagePlot2d'), ] if DEBUGGING: plugins.append(InspectorRegItem('Debug Inspector', 'argos.inspector.debug.DebugInspector')) return plugins
Returns a list with the default plugins in the inspector registry.
def after_init_app(self, app: FlaskUnchained): from flask_wtf.csrf import generate_csrf @app.after_request def set_csrf_cookie(response): if response: response.set_cookie('csrf_token', generate_csrf()) return response
Configure an after request hook to set the ``csrf_token`` in the cookie.