code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def __unregister_services(self): with self.__registration_lock: registered_services = self.__registered_services.copy() for registration in registered_services: try: registration.unregister() except BundleException: pass if self.__registered_services: _logger.warning("Not all services have been unregistered...") with self.__registration_lock: self.__registered_services.clear()
Unregisters all bundle services
def _get_pillar_cfg(pillar_key, pillarenv=None, saltenv=None): pillar_cfg = __salt__['pillar.get'](pillar_key, pillarenv=pillarenv, saltenv=saltenv) return pillar_cfg
Retrieve the pillar data from the right environment.
def validate(self): for header in self._requiredHeaders: if not self.headers.get(header, False): raise errors.ParseError('Missing Registration Header: ' + header) for notice in self.notifications: for header in self._requiredNotificationHeaders: if not notice.get(header, False): raise errors.ParseError('Missing Notification Header: ' + header)
Validate required headers and validate notification headers
def run(items, run_parallel): to_process = [] extras = [] for batch, cur_items in _group_by_batches(items).items(): if _ready_for_het_analysis(cur_items): to_process.append((batch, cur_items)) else: for data in cur_items: extras.append([data]) processed = run_parallel("heterogeneity_estimate", ([xs, b, xs[0]["config"]] for b, xs in to_process)) return _group_by_sample_and_batch(extras + processed)
Top level entry point for calculating heterogeneity, handles organization and job distribution.
def unpack_flags(value, flags): try: return [flags[value]] except KeyError: return [flags[k] for k in sorted(flags.keys()) if k & value > 0]
Multiple flags might be packed in the same field.
def rebin(a, newshape): slices = [slice(0, old, float(old)/new) for old, new in zip(a.shape, newshape)] coordinates = numpy.mgrid[slices] indices = coordinates.astype('i') return a[tuple(indices)]
Rebin an array to a new shape.
def _doc_parms(cls): axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)) name = (cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else 'scalar') name2 = cls.__name__ return axis_descr, name, name2
Return a tuple of the doc parms.
def writeBoolean(self, n): t = TYPE_BOOL_TRUE if n is False: t = TYPE_BOOL_FALSE self.stream.write(t)
Writes a Boolean to the stream.
def strip_files(files, argv_max=(256 * 1024)): tostrip = [(fn, flipwritable(fn)) for fn in files] while tostrip: cmd = list(STRIPCMD) flips = [] pathlen = reduce(operator.add, [len(s) + 1 for s in cmd]) while pathlen < argv_max: if not tostrip: break added, flip = tostrip.pop() pathlen += len(added) + 1 cmd.append(added) flips.append((added, flip)) else: cmd.pop() tostrip.append(flips.pop()) os.spawnv(os.P_WAIT, cmd[0], cmd) for args in flips: flipwritable(*args)
Strip a list of files
def _create_http_client(): global _http_client defaults = {'user_agent': USER_AGENT} auth_username, auth_password = _credentials if auth_username and auth_password: defaults['auth_username'] = auth_username defaults['auth_password'] = auth_password _http_client = httpclient.AsyncHTTPClient( force_instance=True, defaults=defaults, max_clients=_max_clients)
Create the HTTP client with authentication credentials if required.
async def generate_access_token(self, user): payload = await self._get_payload(user) secret = self._get_secret(True) algorithm = self._get_algorithm() return jwt.encode(payload, secret, algorithm=algorithm).decode("utf-8")
Generate an access token for a given user.
def visit_Import(self, node): for alias in node.names: current_module = MODULES for path in alias.name.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(alias.name), node) else: current_module = current_module[path]
Check if imported module exists in MODULES.
def _setup_states(state_definitions, prev=()): states = list(prev) for state_def in state_definitions: if len(state_def) != 2: raise TypeError( "The 'state' attribute of a workflow should be " "a two-tuple of strings; got %r instead." % (state_def,) ) name, title = state_def state = State(name, title) if any(st.name == name for st in states): states = [state if st.name == name else st for st in states] else: states.append(state) return StateList(states)
Create a StateList object from a 'states' Workflow attribute.
def _fail(self, message, text, i): raise ValueError("{}:\n{}".format(message, text[i : i + 79]))
Raise an exception with given message and text at i.
def _collapse_by_bam_variantcaller(samples): by_bam = collections.OrderedDict() for data in (x[0] for x in samples): work_bam = utils.get_in(data, ("combine", "work_bam", "out"), data.get("align_bam")) variantcaller = get_variantcaller(data) if isinstance(work_bam, list): work_bam = tuple(work_bam) key = (multi.get_batch_for_key(data), work_bam, variantcaller) try: by_bam[key].append(data) except KeyError: by_bam[key] = [data] out = [] for grouped_data in by_bam.values(): cur = grouped_data[0] cur.pop("region", None) region_bams = cur.pop("region_bams", None) if region_bams and len(region_bams[0]) > 1: cur.pop("work_bam", None) out.append([cur]) return out
Collapse regions to a single representative by BAM input, variant caller and batch.
def gen_template_files(path): " Generate relative template pathes. " path = path.rstrip(op.sep) for root, _, files in walk(path): for f in filter(lambda x: not x in (TPLNAME, CFGNAME), files): yield op.relpath(op.join(root, f), path)
Generate relative template pathes.
def delete_typeattr(typeattr,**kwargs): tmpltype = get_templatetype(typeattr.type_id, user_id=kwargs.get('user_id')) ta = db.DBSession.query(TypeAttr).filter(TypeAttr.type_id == typeattr.type_id, TypeAttr.attr_id == typeattr.attr_id).one() tmpltype.typeattrs.remove(ta) db.DBSession.flush() return 'OK'
Remove an typeattr from an existing type
def checksum(self, path, hashtype='sha1'): return self._handler.checksum(hashtype, posix_path(path))
Returns the checksum of the given path.
def _terminate_procs(procs): logging.warn("Stopping all remaining processes") for proc, g in procs.values(): logging.debug("[%s] SIGTERM", proc.pid) try: proc.terminate() except OSError as e: if e.errno != errno.ESRCH: raise sys.exit(1)
Terminate all processes in the process dictionary
def check_exists(path, type='file'): if type == 'file': if not os.path.isfile(path): raise RuntimeError('The file `%s` does not exist.' % path) else: if not os.path.isdir(path): raise RuntimeError('The folder `%s` does not exist.' % path) return True
Check if a file or a folder exists
def run(self): t0 = time.time() haveQ = self._isReactiveMarket() self._withholdOffbids() self._offbidToCase() success = self._runOPF() if success: gteeOfferPrice, gteeBidPrice = self._nodalPrices(haveQ) self._runAuction(gteeOfferPrice, gteeBidPrice, haveQ) logger.info("SmartMarket cleared in %.3fs" % (time.time() - t0)) else: for offbid in self.offers + self.bids: offbid.clearedQuantity = 0.0 offbid.clearedPrice = 0.0 offbid.accepted = False offbid.generator.p = 0.0 logger.error("Non-convergent market OPF. Blackout!") return self.offers, self.bids
Computes cleared offers and bids.
def to_unit_memory(number): kb = 1024 number /= kb if number < 100: return '{} Kb'.format(round(number, 2)) number /= kb if number < 300: return '{} Mb'.format(round(number, 2)) number /= kb return '{} Gb'.format(round(number, 2))
Creates a string representation of memory size given `number`.
def get(self, request, pzone_pk): try: pzone = PZone.objects.get(pk=pzone_pk) except PZone.DoesNotExist: raise Http404("Cannot find given pzone.") filters = {"pzone": pzone} if "from" in request.GET: parsed = dateparse.parse_datetime(request.GET["from"]) if parsed is not None: filters["when__gte"] = parsed if "to" in request.GET: parsed = dateparse.parse_datetime(request.GET["to"]) if parsed is not None: filters["when__lt"] = parsed operations = PZoneOperation.objects.filter(**filters) return Response(self.serialize_operations(operations), content_type="application/json")
Get all the operations for a given pzone.
def check_purge_status(self, purge_id): content = self._fetch("/purge?id=%s" % purge_id) return map(lambda x: FastlyPurgeStatus(self, x), content)
Get the status and times of a recently completed purge.
def render_robots_meta_tag(context): request = context['request'] robots_indexing = None robots_following = None if context.request.get_host() in settings.META_TAGGER_ROBOTS_DOMAIN_WHITELIST: if context.get('object'): try: robots_indexing = context['object'].get_robots_indexing() robots_following = context['object'].get_robots_following() except AttributeError: pass elif context.get('meta_tagger'): robots_indexing = context['meta_tagger'].get('robots_indexing', robots_indexing) robots_following = context['meta_tagger'].get('robots_following', robots_following) if robots_indexing is None: try: robots_indexing = request.current_page.metatagpageextension.robots_indexing except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist): robots_indexing = True if robots_following is None: try: robots_following = request.current_page.metatagpageextension.robots_following except (AttributeError, NoReverseMatch, MetaTagPageExtension.DoesNotExist): robots_following = True return mark_safe('<meta name="robots" content="{robots_indexing}, {robots_following}">'.format( robots_indexing='index' if robots_indexing else 'noindex', robots_following='follow' if robots_following else 'nofollow' ))
Returns the robots meta tag.
def tally_role_columns(self): totals = self.report["totals"] roles = self.report["roles"] totals["dependencies"] = sum(roles[item] ["total_dependencies"] for item in roles) totals["defaults"] = sum(roles[item] ["total_defaults"] for item in roles) totals["facts"] = sum(roles[item]["total_facts"] for item in roles) totals["files"] = sum(roles[item]["total_files"] for item in roles) totals["lines"] = sum(roles[item]["total_lines"] for item in roles)
Sum up all of the stat columns.
def create_genome_size_dict(genome): size_file = get_genome_size_file(genome) size_lines = open(size_file).readlines() size_dict = {} for line in size_lines: genome, length = line.split() size_dict[genome] = int(length) return size_dict
Creates genome size dict from string containing data.
def send_prefix(pymux, variables): process = pymux.arrangement.get_active_pane().process for k in pymux.key_bindings_manager.prefix: vt100_data = prompt_toolkit_key_to_vt100_key(k) process.write_input(vt100_data)
Send prefix to active pane.
def getfile(object): if ismodule(object): if hasattr(object, '__file__'): return object.__file__ raise TypeError, 'arg is a built-in module' if isclass(object): object = sys.modules.get(object.__module__) if hasattr(object, '__file__'): return object.__file__ raise TypeError, 'arg is a built-in class' if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): return object.co_filename raise TypeError, 'arg is not a module, class, method, ' \ 'function, traceback, frame, or code object'
Work out which source or compiled file an object was defined in.
def _create_merge_filelist(bam_files, base_file, config): bam_file_list = "%s.list" % os.path.splitext(base_file)[0] samtools = config_utils.get_program("samtools", config) with open(bam_file_list, "w") as out_handle: for f in sorted(bam_files): do.run('{} quickcheck -v {}'.format(samtools, f), "Ensure integrity of input merge BAM files") out_handle.write("%s\n" % f) return bam_file_list
Create list of input files for merge, ensuring all files are valid.
def on_if(self, node): block = node.body if not self.run(node.test): block = node.orelse for tnode in block: self.run(tnode)
Regular if-then-else statement.
def to_representation(self, instance): ret = OrderedDict() readable_fields = [ field for field in self.fields.values() if not field.write_only ] for field in readable_fields: try: field_representation = self._get_field_representation(field, instance) ret[field.field_name] = field_representation except SkipField: continue return ret
Object instance -> Dict of primitive datatypes.
def deserialize_uri(value): if isinstance(value, BNode): return value if isinstance(value, URIRef): return value if not value: return None if not isinstance(value, basestring): raise ValueError("Cannot create URI from {0} of type {1}".format(value, value.__class__)) if value.startswith("_:"): return BNode(value[2:]) return URIRef(value)
Deserialize a representation of a BNode or URIRef.
def update_token_tempfile(token): with open(tmp, 'w') as f: f.write(json.dumps(token, indent=4))
Example of function for token update
def create_manifest(self): config_path = os.path.join(self.expt.control_path, DEFAULT_CONFIG_FNAME) self.manifest = [] if os.path.isfile(config_path): self.manifest.append(config_path) for model in self.expt.models: config_files = model.config_files + model.optional_config_files self.manifest.extend(os.path.join(model.control_path, f) for f in config_files) for mf in self.expt.manifest: self.manifest.append(mf.path)
Construct the list of files to be tracked by the runlog.
def rooms_favorite(self, room_id=None, room_name=None, favorite=True): if room_id is not None: return self.__call_api_post('rooms.favorite', roomId=room_id, favorite=favorite) elif room_name is not None: return self.__call_api_post('rooms.favorite', roomName=room_name, favorite=favorite) else: raise RocketMissingParamException('roomId or roomName required')
Favorite or unfavorite room.
def special_login_handler(self, delay_factor=1): delay_factor = self.select_delay_factor(delay_factor) self.write_channel(self.RETURN) time.sleep(1 * delay_factor)
Adding a delay after login.
def Exception(obj, eng, callbacks, exc_info): exception_repr = ''.join(traceback.format_exception(*exc_info)) msg = "Error:\n%s" % (exception_repr) eng.log.error(msg) if obj: obj.extra_data['_error_msg'] = exception_repr obj.save( status=obj.known_statuses.ERROR, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid ) eng.save(WorkflowStatus.ERROR) db.session.commit() super(InvenioTransitionAction, InvenioTransitionAction).Exception( obj, eng, callbacks, exc_info )
Handle general exceptions in workflow, saving states.
def __prepare_gprest_call(self, requestURL, params=None, headers=None, restType='GET', body=None): if self.__serviceAccount.is_iam_enabled(): auth = None iam_api_key_header = { self.__AUTHORIZATION_HEADER_KEY: str('API-KEY '+self.__serviceAccount.get_api_key()) } if not headers is None: headers.update(iam_api_key_header) else: headers = iam_api_key_header elif self.__auth == self.BASIC_AUTH: auth = (self.__serviceAccount.get_user_id(), self.__serviceAccount.get_password()) elif self.__auth == self.HMAC_AUTH: auth = None fakeRequest = requests.PreparedRequest() fakeRequest.prepare_url(requestURL, params=params) preparedUrl = fakeRequest.url hmacHeaders = self.__get_gaas_hmac_headers(method=restType, url=preparedUrl, body=body) if not headers is None: headers.update(hmacHeaders) else: headers = hmacHeaders return auth, headers
Returns Authorization type and GP headers
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): self.fileExtension = extension with open(path, 'r') as hmetFile: for line in hmetFile: sline = line.strip().split() try: dateTime = datetime(int(sline[0]), int(sline[1]), int(sline[2]), int(sline[3])) hmetRecord = HmetRecord(hmetDateTime=dateTime, barometricPress=sline[4], relHumidity=sline[5], totalSkyCover=sline[6], windSpeed=sline[7], dryBulbTemp=sline[8], directRad=sline[9], globalRad=sline[10]) hmetRecord.hmetFile = self except: pass
Read HMET WES from File Method
def frameify(self, state, data): data = state.recv_buf + data while data: line, sep, rest = data.partition('\n') if sep != '\n': break data = rest if self.carriage_return and line[-1] == '\r': line = line[:-1] try: yield line except FrameSwitch: break state.recv_buf = data
Split data into a sequence of lines.
def reset(self): self.start_point = self.end_point = None self.is_emitting_point = False self.rubber_band.reset(QgsWkbTypes.PolygonGeometry)
Clear the rubber band for the analysis extents.
def _get_arg_tokens(cli): arg = cli.input_processor.arg return [ (Token.Prompt.Arg, '(arg: '), (Token.Prompt.Arg.Text, str(arg)), (Token.Prompt.Arg, ') '), ]
Tokens for the arg-prompt.
def tt_comp(self, sampled_topics): samples = sampled_topics.shape[0] tt = np.zeros((self.V, self.K, samples)) for s in range(samples): tt[:, :, s] = \ samplers_lda.tt_comp(self.tokens, sampled_topics[s, :], self.N, self.V, self.K, self.beta) return tt
Compute term-topic matrix from sampled_topics.
def __CheckValid(self, value): "check for validity of value" val = self.__val self.is_valid = True try: val = set_float(value) if self.__min is not None and (val < self.__min): self.is_valid = False val = self.__min if self.__max is not None and (val > self.__max): self.is_valid = False val = self.__max except: self.is_valid = False self.__bound_val = self.__val = val fgcol, bgcol = self.fgcol_valid, self.bgcol_valid if not self.is_valid: fgcol, bgcol = self.fgcol_invalid, self.bgcol_invalid self.SetForegroundColour(fgcol) self.SetBackgroundColour(bgcol) self.Refresh()
check for validity of value
def list_elasticache(region, filter_by_kwargs): conn = boto.elasticache.connect_to_region(region) req = conn.describe_cache_clusters() data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"] if filter_by_kwargs: clusters = [x['CacheClusterId'] for x in data if x[filter_by_kwargs.keys()[0]] == filter_by_kwargs.values()[0]] else: clusters = [x['CacheClusterId'] for x in data] return clusters
List all ElastiCache Clusters.
def _merge_fastqc(samples): fastqc_list = collections.defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
merge all fastqc samples into one by module
def timestamp(num_params, p_levels, k_choices, N): string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params, p_levels, k_choices, N, dt.strftime(dt.now(), "%d%m%y%H%M%S")) return string
Returns a uniform timestamp with parameter values for file identification
def configure_namespacebrowser(self): self.sig_namespace_view.connect(lambda data: self.namespacebrowser.process_remote_view(data)) self.sig_var_properties.connect(lambda data: self.namespacebrowser.set_var_properties(data))
Configure associated namespace browser widget
def resolve_remote(self): for idx, target in enumerate(self._targets): if isinstance(target, remote): resolved = target.resolve() if isinstance(resolved, str): resolved = interpolate(resolved, env.sos_dict.dict()) self._targets[idx] = file_target(resolved).set(**target._dict) return self
If target is of remote type, resolve it
def hydrate_sources(sources_field, glob_match_error_behavior): path_globs = sources_field.path_globs.copy(glob_match_error_behavior=glob_match_error_behavior) snapshot = yield Get(Snapshot, PathGlobs, path_globs) fileset_with_spec = _eager_fileset_with_spec( sources_field.address.spec_path, sources_field.filespecs, snapshot) sources_field.validate_fn(fileset_with_spec) yield HydratedField(sources_field.arg, fileset_with_spec)
Given a SourcesField, request a Snapshot for its path_globs and create an EagerFilesetWithSpec.
def create_peptidequant_lookup(fns, pqdb, poolnames, pepseq_colnr, ms1_qcolpattern=None, isobqcolpattern=None, psmnrpattern=None, fdrcolpattern=None, pepcolpattern=None): patterns = [ms1_qcolpattern, fdrcolpattern, pepcolpattern] storefuns = [pqdb.store_precursor_quants, pqdb.store_fdr, pqdb.store_pep] create_pep_protein_quant_lookup(fns, pqdb, poolnames, pepseq_colnr, patterns, storefuns, isobqcolpattern, psmnrpattern)
Calls lower level function to create a peptide quant lookup
def sum(self, vector): return self.from_list( [x + vector.vector[i] for i, x in self.to_list()] )
Return a Vector instance as the vector sum of two vectors.
def _hline(self): if not self._hline_string: self._hline_string = self._build_hline() return self._hline_string
Print an horizontal line
def _normalize_abmn(abmn): abmn_2d = np.atleast_2d(abmn) abmn_normalized = np.hstack(( np.sort(abmn_2d[:, 0:2], axis=1), np.sort(abmn_2d[:, 2:4], axis=1), )) return abmn_normalized
return a normalized version of abmn
def seed(self): for generation in self.generations: for s in generation: try: if s.rseed is not None: value = s.random(**s.parents.value) except: pass
Seed new initial values for the stochastics.
def read_texture(filename, attrs=None): filename = os.path.abspath(os.path.expanduser(filename)) try: reader = get_reader(filename) image = standard_reader_routine(reader, filename, attrs=attrs) return vtki.image_to_texture(image) except KeyError: pass return vtki.numpy_to_texture(imageio.imread(filename))
Loads a ``vtkTexture`` from an image file.
def geom_transform(geom, t_srs): s_srs = geom.GetSpatialReference() if not s_srs.IsSame(t_srs): ct = osr.CoordinateTransformation(s_srs, t_srs) geom.Transform(ct) geom.AssignSpatialReference(t_srs)
Transform a geometry in place
def pesach_dow(self): jdn = conv.hdate_to_jdn(HebrewDate(self.hdate.year, Months.Nisan, 15)) return (jdn + 1) % 7 + 1
Return the first day of week for Pesach.
def rollout(self, **kwargs): if kwargs.has_key('tau'): timesteps = int(self.timesteps / kwargs['tau']) else: timesteps = self.timesteps self.x_track = np.zeros(timesteps) self.reset_state() for t in range(timesteps): self.x_track[t] = self.x self.step(**kwargs) return self.x_track
Generate x for open loop movements.
def _remove_media(self,directory,files=None): if not self._connectToFB(): logger.error("%s - Couldn't connect to fb") return False db=self._loadDB(directory) if not files: files=db.keys() if isinstance(files,basestring): files=[files] for fn in files: print("%s - Deleting from fb [local copy intact]"%(fn)) try: pid=db[fn]['photoid'] except: logger.debug("%s - Was never in fb DB"%(fn)) continue try: self.fb.delete_object(pid) except facebook.GraphAPIError as e: print("%s - fb: delete failed with status: %s:%s"\ %(fn,e.type,e.message)) return False logger.debug('Removing %s from fb DB'%(fn)) del db[fn] self._saveDB(directory,db) return True
Removes specified files from fb
def retrieve_data(self): url = self.config.get('url') timeout = float(self.config.get('timeout', 10)) self.data = requests.get(url, verify=self.verify_ssl, timeout=timeout).content
retrieve data from an HTTP URL
def resume(jid, state_id=None): minion = salt.minion.MasterMinion(__opts__) minion.functions['state.resume'](jid, state_id)
Remove a pause from a jid, allowing it to continue
def _from_dict(cls, _dict): args = {} if 'element_pair' in _dict: args['element_pair'] = [ ElementPair._from_dict(x) for x in (_dict.get('element_pair')) ] if 'identical_text' in _dict: args['identical_text'] = _dict.get('identical_text') if 'provenance_ids' in _dict: args['provenance_ids'] = _dict.get('provenance_ids') if 'significant_elements' in _dict: args['significant_elements'] = _dict.get('significant_elements') return cls(**args)
Initialize a AlignedElement object from a json dictionary.
def upload(ctx, release, rebuild): dist_path = Path(DIST_PATH) if rebuild is False: if not dist_path.exists() or not list(dist_path.glob('*')): print("No distribution files found. Please run 'build' command first") return else: ctx.invoke(build, force=True) if release: args = ['twine', 'upload', 'dist/*'] else: repository = 'https://test.pypi.org/legacy/' args = ['twine', 'upload', '--repository-url', repository, 'dist/*'] env = os.environ.copy() p = subprocess.Popen(args, env=env) p.wait()
Uploads distribuition files to pypi or pypitest.
def use_file(self, enabled=True, file_name=None, level=logging.WARNING, when='d', interval=1, backup_count=30, delay=False, utc=False, at_time=None, log_format=None, date_format=None): if enabled: if not self.__file_handler: assert file_name, 'File name is missing!' kwargs = { 'filename': file_name, 'when': when, 'interval': interval, 'backupCount': backup_count, 'encoding': 'UTF-8', 'delay': delay, 'utc': utc, } if sys.version_info[0] >= 3: kwargs['atTime'] = at_time self.__file_handler = TimedRotatingFileHandler(**kwargs) if not log_format: log_format = '%(asctime)s %(name)s[%(process)d] ' \ '%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' \ '%(levelname)s %(message)s' formatter = logging.Formatter(fmt=log_format, datefmt=date_format) self.__file_handler.setFormatter(fmt=formatter) self.__file_handler.setLevel(level=level) self.add_handler(hdlr=self.__file_handler) elif self.__file_handler: self.remove_handler(hdlr=self.__file_handler) self.__file_handler = None
Handler for logging to a file, rotating the log file at certain timed intervals.
def fetch_user(app_id, token, ticket, url_detail='https://pswdless.appspot.com/rest/detail'): return FetchUserWithValidation(app_id, token, ticket, url_detail)
Fetch the user deatil from Passwordless
def schnorr_partial_combine(self, schnorr_sigs): if not HAS_SCHNORR: raise Exception("secp256k1_schnorr not enabled") assert len(schnorr_sigs) > 0 sig64 = ffi.new('char [64]') sig64sin = [] for sig in schnorr_sigs: if not isinstance(sig, bytes): raise TypeError('expected bytes, got {}'.format(type(sig))) if len(sig) != 64: raise Exception('invalid signature length') sig64sin.append(ffi.new('char []', sig)) res = lib.secp256k1_schnorr_partial_combine( self.ctx, sig64, sig64sin, len(sig64sin)) if res <= 0: raise Exception('failed to combine signatures ({})'.format(res)) return bytes(ffi.buffer(sig64, 64))
Combine multiple Schnorr partial signatures.
def push_tx(self, crypto, tx_hex): url = "%s/pushtx" % self.base_url return self.post_url(url, {'hex': tx_hex}).content
This method is untested.
async def check_response(response, valid_response_codes): if response.status == 204: return True if response.status in valid_response_codes: _js = await response.json() return _js else: raise PvApiResponseStatusError(response.status)
Check the response for correctness.
async def listTriggers(self): trigs = [] for (iden, trig) in self.cell.triggers.list(): useriden = trig['useriden'] if not (self.user.admin or useriden == self.user.iden): continue user = self.cell.auth.user(useriden) trig['username'] = '<unknown>' if user is None else user.name trigs.append((iden, trig)) return trigs
Lists all the triggers that the current user is authorized to access
def CreateCounterMetadata(metric_name, fields=None, docstring=None, units=None): return rdf_stats.MetricMetadata( varname=metric_name, metric_type=rdf_stats.MetricMetadata.MetricType.COUNTER, value_type=rdf_stats.MetricMetadata.ValueType.INT, fields_defs=FieldDefinitionProtosFromTuples(fields or []), docstring=docstring, units=units)
Helper function for creating MetricMetadata for counter metrics.
def _format_numeric_sequence(self, _sequence, separator="."): if not _sequence: return colorize(_sequence, "purple") _sequence = _sequence if _sequence is not None else self.obj minus = (2 if self._depth > 0 else 0) just_size = len(str(len(_sequence))) out = [] add_out = out.append for i, item in enumerate(_sequence): self._incr_just_size(just_size+minus) add_out(self._numeric_prefix( i, self.pretty(item, display=False), just=just_size, color="blue", separator=separator)) self._decr_just_size(just_size+minus) if not self._depth: return padd("\n".join(out) if out else str(out), padding="top") else: return "\n".join(out) if out else str(out)
Length of the highest index in chars = justification size
def _load_extensions(self): log.debug(u"loading all extensions : %s", self.extensions) self.loaded_extensions = [] for f in self.extensions: if not os.path.isabs(f): f = os.path.abspath(f) if not os.path.exists(f): raise CoreError(u"Extension file: {0} not found on disk".format(f)) self.loaded_extensions.append(imp.load_source("", f)) log.debug(self.loaded_extensions) log.debug([dir(m) for m in self.loaded_extensions])
Load all extension files into the namespace pykwalify.ext
def update_default_channels(sender, instance, created, **kwargs): if instance.default: Channel.objects.filter(default=True).exclude( channel_id=instance.channel_id ).update(default=False)
Post save hook to ensure that there is only one default
def _get_func(cls, source_ver, target_ver): matches = ( func for func in cls._upgrade_funcs if func.source == source_ver and func.target == target_ver ) try: match, = matches except ValueError: raise ValueError( f"No migration from {source_ver} to {target_ver}") return match
Return exactly one function to convert from source to target
def gen_str(src, dst): return ReilBuilder.build(ReilMnemonic.STR, src, ReilEmptyOperand(), dst)
Return a STR instruction.
def disconnect(self): try: os.kill(-self.pid, signal.SIGKILL) except OSError: pass self.read_buffer = b'' self.write_buffer = b'' self.set_enabled(False) if self.read_in_state_not_started: self.print_lines(self.read_in_state_not_started) self.read_in_state_not_started = b'' if options.abort_error and self.state is STATE_NOT_STARTED: raise asyncore.ExitNow(1) self.change_state(STATE_DEAD)
We are no more interested in this remote process
def summary(self): return "== Model %s did not complete test %s due to error '%s'. ==" %\ (str(self.model), str(self.test), str(self.score))
Summarize the performance of a model on a test.
def peer_retrieve(key, relation_name='cluster'): cluster_rels = relation_ids(relation_name) if len(cluster_rels) > 0: cluster_rid = cluster_rels[0] return relation_get(attribute=key, rid=cluster_rid, unit=local_unit()) else: raise ValueError('Unable to detect' 'peer relation {}'.format(relation_name))
Retrieve a named key from peer relation `relation_name`.
def _iter_valid_subtotal_dicts(self): for insertion_dict in self._insertion_dicts: if not isinstance(insertion_dict, dict): continue if insertion_dict.get("function") != "subtotal": continue if not {"anchor", "args", "name"}.issubset(insertion_dict.keys()): continue if not self._element_ids.intersection(insertion_dict["args"]): continue yield insertion_dict
Generate each insertion dict that represents a valid subtotal.
def _import_sub_module(module, name): module = __import__(module.__name__ + "." + name) for level in name.split("."): module = getattr(module, level) return module
import_sub_module will mimic the function of importlib.import_module
def _set_pip_ssl(anaconda_dir): if anaconda_dir: cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem") if os.path.exists(cert_file): os.environ["PIP_CERT"] = cert_file
Set PIP SSL certificate to installed conda certificate to avoid SSL errors
def AppendIndexDictionaryToFile(uniqueWords, ndxFile, ipFile, useShortFileName='Y'): if useShortFileName == 'Y': f = os.path.basename(ipFile) else: f = ipFile with open(ndxFile, "a", encoding='utf-8', errors='replace') as ndx: word_keys = uniqueWords.keys() for word in sorted(word_keys): if word != '': line_nums = uniqueWords[word] ndx.write(f + ', ' + word + ', ') for line_num in line_nums: ndx.write(str(line_num)) ndx.write('\n')
Save the list of unique words to the master list
def label_for(self, name): method = getattr(self, name) if method.__doc__ and method.__doc__.strip(): return method.__doc__.strip().splitlines()[0] return humanize(name.replace(self._prefix, ''))
Get a human readable label for a method given its name
def wrap_onspace(self, text): def _truncate(line, word): return '{line}{part}{word}'.format( line=line, part=' \n'[(len(line[line.rfind('\n')+1:]) + len(word.split('\n', 1)[0]) >= self.width)], word=word ) return reduce(_truncate, text.split(' '))
When the text inside the column is longer then the width, will split by space and continue on the next line.
def check_existance(f): if not opath.isfile(f): logging.error("Nanoget: File provided doesn't exist or the path is incorrect: {}".format(f)) sys.exit("File provided doesn't exist or the path is incorrect: {}".format(f))
Check if the file supplied as input exists.
def _rgbtomask(self, obj): dat = obj.get_image().get_data() return dat.sum(axis=2).astype(np.bool)
Convert RGB arrays from mask canvas object back to boolean mask.
def Dlmk(l,m,k,phi1,phi2,theta1,theta2): return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \ exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))
returns value of D^l_mk as defined in allen, ottewill 97.
def run(self): config = self.state.document.settings.env.config processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url) process_nodes = [] for process in sorted(processes, key=itemgetter('name')): process_nodes.extend(self.make_process_node(process)) return process_nodes
Create a list of process definitions.
def random_string(length): letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" return "".join([random.choice(letters) for _ in range(length)])
Generates a random alphanumeric string
def main(argv=None): try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + 'ERROR: ' + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
Main entry point when the user runs the `trytravis` command.
def curl_couchdb(url, method='GET', base_url=BASE_URL, data=None): (username, password) = get_admin() if username is None: auth = None else: auth = (username, password) if method == 'PUT': req = requests.put('{}{}'.format(base_url, url), auth=auth, data=data) elif method == 'DELETE': req = requests.delete('{}{}'.format(base_url, url), auth=auth) else: req = requests.get('{}{}'.format(base_url, url), auth=auth) if req.status_code not in [200, 201]: raise HTTPError('{}: {}'.format(req.status_code, req.text)) return req
Launch a curl on CouchDB instance
def doFindAny(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params): self._preFind(WHAT, SORT, SKIP, MAX, LOP) for key in params: self._addDBParam(key, params[key]) return self._doAction('-findany')
This function will perform the command -findany.
def checkout_and_create_branch(repo, name): local_branch = repo.branches[name] if name in repo.branches else None if not local_branch: if name in repo.remotes.origin.refs: msg = repo.git.checkout(name) _LOGGER.debug(msg) return local_branch = repo.create_head(name) local_branch.checkout()
Checkout branch. Create it if necessary
def _configure_app(app_): app_.url_map.strict_slashes = False app_.config.from_object(default_settings) app_.config.from_envvar('JOB_CONFIG', silent=True) db_url = app_.config.get('SQLALCHEMY_DATABASE_URI') if not db_url: raise Exception('No db_url in config') app_.wsgi_app = ProxyFix(app_.wsgi_app) global SSL_VERIFY if app_.config.get('SSL_VERIFY') in ['False', 'FALSE', '0', False, 0]: SSL_VERIFY = False else: SSL_VERIFY = True return app_
Configure the Flask WSGI app.
def _read_preference_for(self, session): if session: return session._txn_read_preference() or self.__read_preference return self.__read_preference
Read only access to the read preference of this instance or session.
def page_exists_on_disk(self, slug): r = False page_dir = os.path.join(self.dirs['source'], slug) page_file_name = os.path.join(page_dir, slug + '.md') if os.path.isdir(page_dir): if os.path.isfile(page_file_name): r = True return r
Return true if post directory and post file both exist.
def validate_target(self, target): archs = target.split('/') for arch in archs: if not arch in self.archs: return False return True
Make sure that the specified target only contains architectures that we know about.
def check_sentence_spacing(text): err = "typography.symbols.sentence_spacing" msg = u"More than two spaces after the period; use 1 or 2." regex = "\. {3}" return existence_check( text, [regex], err, msg, max_errors=3, require_padding=False)
Use no more than two spaces after a period.