code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def validate(self, data, expect_header_row=True, ignore_lines=0, summarize=False, limit=0, context=None, report_unexpected_exceptions=True): problems = list() problem_generator = self.ivalidate(data, expect_header_row, ignore_lines, summarize, context, report_unexpected_exceptions) for i, p in enumerate(problem_generator): if not limit or i < limit: problems.append(p) return problems
Validate `data` and return a list of validation problems found. Arguments --------- `data` - any source of row-oriented data, e.g., as provided by a `csv.reader`, or a list of lists of strings, or ... `expect_header_row` - does the data contain a header row (i.e., the first record is a list of field names)? Defaults to True. `ignore_lines` - ignore n lines (rows) at the beginning of the data `summarize` - only report problem codes, no other details `limit` - report at most n problems `context` - a dictionary of any additional information to be added to any problems found - useful if problems are being aggregated from multiple validators `report_unexpected_exceptions` - value check function, value predicates, record check functions, record predicates, and other user-supplied validation functions may raise unexpected exceptions. If this argument is true, any unexpected exceptions will be reported as validation problems; if False, unexpected exceptions will be handled silently.
def _emit_search_criteria(user_ids, job_ids, task_ids, labels): print('Delete running jobs:') print(' user:') print(' %s\n' % user_ids) print(' job-id:') print(' %s\n' % job_ids) if task_ids: print(' task-id:') print(' %s\n' % task_ids) if labels: print(' labels:') print(' %s\n' % repr(labels))
Print the filters used to delete tasks. Use raw flags as arguments.
def _retrieve_certificate(self, access_token, timeout=3): logger.debug("Retrieve certificate with token.") key_pair = crypto.PKey() key_pair.generate_key(crypto.TYPE_RSA, 2048) private_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, key_pair).decode("utf-8") cert_request = crypto.X509Req() cert_request.set_pubkey(key_pair) cert_request.sign(key_pair, 'md5') der_cert_req = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, cert_request) encoded_cert_req = base64.b64encode(der_cert_req) token = {'access_token': access_token, 'token_type': 'Bearer'} client = OAuth2Session(token=token) response = client.post( self.certificate_url, data={'certificate_request': encoded_cert_req}, verify=False, timeout=timeout, ) if response.ok: content = "{} {}".format(response.text, private_key) with open(self.esgf_credentials, 'w') as fh: fh.write(content) logger.debug('Fetched certificate successfully.') else: msg = "Could not get certificate: {} {}".format(response.status_code, response.reason) raise Exception(msg) return True
Generates a new private key and certificate request, submits the request to be signed by the SLCS CA and returns the certificate.
def _scan_footpaths_to_departure_stop(self, connection_dep_stop, connection_dep_time, arrival_time_target): for _, neighbor, data in self._walk_network.edges_iter(nbunch=[connection_dep_stop], data=True): d_walk = data['d_walk'] neighbor_dep_time = connection_dep_time - d_walk / self._walk_speed pt = LabelTimeSimple(departure_time=neighbor_dep_time, arrival_time_target=arrival_time_target) self._stop_profiles[neighbor].update_pareto_optimal_tuples(pt)
A helper method for scanning the footpaths. Updates self._stop_profiles accordingly
def parse_token(response): items = response.split("&") items = [item.split("=") for item in items] return {key: value for key, value in items}
parse the responses containing the tokens Parameters ---------- response : str The response containing the tokens Returns ------- dict The parsed tokens
def from_timestamp(microsecond_timestamp): return datetime.datetime.fromtimestamp( microsecond_timestamp // 1000000, datetime.timezone.utc ).replace(microsecond=(microsecond_timestamp % 1000000))
Convert a microsecond timestamp to a UTC datetime instance.
def _updateDutyCycles(self, overlaps, activeColumns): overlapArray = numpy.zeros(self._numColumns, dtype=realDType) activeArray = numpy.zeros(self._numColumns, dtype=realDType) overlapArray[overlaps > 0] = 1 activeArray[activeColumns] = 1 period = self._dutyCyclePeriod if (period > self._iterationNum): period = self._iterationNum self._overlapDutyCycles = self._updateDutyCyclesHelper( self._overlapDutyCycles, overlapArray, period ) self._activeDutyCycles = self._updateDutyCyclesHelper( self._activeDutyCycles, activeArray, period )
Updates the duty cycles for each column. The OVERLAP duty cycle is a moving average of the number of inputs which overlapped with the each column. The ACTIVITY duty cycles is a moving average of the frequency of activation for each column. Parameters: ---------------------------- :param overlaps: An array containing the overlap score for each column. The overlap score for a column is defined as the number of synapses in a "connected state" (connected synapses) that are connected to input bits which are turned on. :param activeColumns: An array containing the indices of the active columns, the sparse set of columns which survived inhibition
def merge_dicts(base, changes): for k, v in changes.items(): if isinstance(v, dict): merge_dicts(base.setdefault(k, {}), v) else: base.setdefault(k, v)
Merge b into a recursively, without overwriting values. :param base: the dict that will be altered. :param changes: changes to update base.
def CALLCODE(self, gas, _ignored_, value, in_offset, in_size, out_offset, out_size): self.world.start_transaction('CALLCODE', address=self.address, data=self.read_buffer(in_offset, in_size), caller=self.address, value=value, gas=gas) raise StartTx()
Message-call into this account with alternative account's code
def does_not_contain_duplicates(self): try: if len(self.val) == len(set(self.val)): return self except TypeError: raise TypeError('val is not iterable') self._err('Expected <%s> to not contain duplicates, but did.' % self.val)
Asserts that val is iterable and does not contain any duplicate items.
def to_json(model, sort=False, **kwargs): obj = model_to_dict(model, sort=sort) obj[u"version"] = JSON_SPEC return json.dumps(obj, allow_nan=False, **kwargs)
Return the model as a JSON document. ``kwargs`` are passed on to ``json.dumps``. Parameters ---------- model : cobra.Model The cobra model to represent. sort : bool, optional Whether to sort the metabolites, reactions, and genes or maintain the order defined in the model. Returns ------- str String representation of the cobra model as a JSON document. See Also -------- save_json_model : Write directly to a file. json.dumps : Base function.
def output_image_link(self, m): return self.renderer.image_link( m.group('url'), m.group('target'), m.group('alt'))
Pass through rest role.
def compare(left, right, compare_locs=False): if type(left) != type(right): return False if isinstance(left, ast.AST): for field in left._fields: if not compare(getattr(left, field), getattr(right, field)): return False if compare_locs: for loc in left._locs: if getattr(left, loc) != getattr(right, loc): return False return True elif isinstance(left, list): if len(left) != len(right): return False for left_elt, right_elt in zip(left, right): if not compare(left_elt, right_elt): return False return True else: return left == right
An AST comparison function. Returns ``True`` if all fields in ``left`` are equal to fields in ``right``; if ``compare_locs`` is true, all locations should match as well.
def invalidate_cache(self): if self._use_cache: self._cache_version += 1 self._cache.increment('cached_httpbl_{0}_version'.format(self._api_key))
Invalidate httpBL cache
def rewind(self): super(FileRecordStream, self).rewind() self.close() self._file = open(self._filename, self._mode) self._reader = csv.reader(self._file, dialect="excel") self._reader.next() self._reader.next() self._reader.next() self._recordCount = 0
Put us back at the beginning of the file again.
def nested_formula_parser(formula, check=True): r formula = formula.replace('[', '').replace(']', '') charge_splits = bracketed_charge_re.split(formula) if len(charge_splits) > 1: formula = charge_splits[0] else: formula = formula.split('+')[0].split('-')[0] stack = [[]] last = stack[0] tokens = formula_token_matcher_rational.findall(formula) if check: token_letters = set([j for i in tokens for j in i if j in letter_set]) formula_letters = set(i for i in formula if i in letter_set) if formula_letters != token_letters: raise Exception('Input may not be a formula; extra letters were detected') for token in tokens: if token == "(": stack.append([]) last = stack[-1] elif token == ")": temp_dict = {} for d in last: for ele, count in d.items(): if ele in temp_dict: temp_dict[ele] = temp_dict[ele] + count else: temp_dict[ele] = count stack.pop() last = stack[-1] last.append(temp_dict) elif token.isalpha(): last.append({token: 1}) else: v = float(token) v_int = int(v) if v_int == v: v = v_int last[-1] = {ele: count*v for ele, count in last[-1].items()} ans = {} for d in last: for ele, count in d.items(): if ele in ans: ans[ele] = ans[ele] + count else: ans[ele] = count return ans
r'''Improved formula parser which handles braces and their multipliers, as well as rational element counts. Strips charges from the end of a formula first. Accepts repeated chemical units. Performs no sanity checking that elements are actually elements. As it uses regular expressions for matching, errors are mostly just ignored. Parameters ---------- formula : str Formula string, very simply formats only. check : bool If `check` is True, a simple check will be performed to determine if a formula is not a formula and an exception will be raised if it is not, [-] Returns ------- atoms : dict dictionary of counts of individual atoms, indexed by symbol with proper capitalization, [-] Notes ----- Inspired by the approach taken by CrazyMerlyn on a reddit DailyProgrammer challenge, at https://www.reddit.com/r/dailyprogrammer/comments/6eerfk/20170531_challenge_317_intermediate_counting/ Examples -------- >>> pprint(nested_formula_parser('Pd(NH3)4.0001+2')) {'H': 12.0003, 'N': 4.0001, 'Pd': 1}
def textpath(self, i): if len(self._textpaths) == i: self._ctx.font(self.font, self.fontsize) txt = self.q[i] if len(self.q) > 1: txt += " ("+str(i+1)+"/" + str(len(self.q))+")" p = self._ctx.textpath(txt, 0, 0, width=self._w) h = self._ctx.textheight(txt, width=self._w) self._textpaths.append((p, h)) return self._textpaths[i]
Returns a cached textpath of the given text in queue.
def get_identities(self): ret=[] l=self.xpath_ctxt.xpathEval("d:identity") if l is not None: for i in l: ret.append(DiscoIdentity(self,i)) return ret
List the identity objects contained in `self`. :return: the list of identities. :returntype: `list` of `DiscoIdentity`
def tilemap(self, query, styles={}, bbox=[-180,-90,180,90], zoom=16, api_key=os.environ.get('MAPBOX_API_KEY', None), image=None, image_bounds=None, index="vector-user-provided", name="GBDX_Task_Output", **kwargs): try: from IPython.display import display except: print("IPython is required to produce maps.") return assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable." wkt = box(*bbox).wkt features = self.query(wkt, query, index=index) union = cascaded_union([shape(f['geometry']) for f in features]) lon, lat = union.centroid.coords[0] url = 'https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?'; url += 'q={}&index={}'.format(query, index); if styles is not None and not isinstance(styles, list): styles = [styles] map_id = "map_{}".format(str(int(time.time()))) map_data = VectorTileLayer(url, source_name=name, styles=styles, **kwargs) image_layer = self._build_image_layer(image, image_bounds) template = BaseTemplate(map_id, **{ "lat": lat, "lon": lon, "zoom": zoom, "datasource": json.dumps(map_data.datasource), "layers": json.dumps(map_data.layers), "image_layer": image_layer, "mbkey": api_key, "token": self.gbdx_connection.access_token }) template.inject()
Renders a mapbox gl map from a vector service query
def min(self, constraints, X: BitVec, M=10000): assert isinstance(X, BitVec) return self.optimize(constraints, X, 'minimize', M)
Iteratively finds the minimum value for a symbol within given constraints. :param constraints: constraints that the expression must fulfil :param X: a symbol or expression :param M: maximum number of iterations allowed
def create_graph_from_data(self, data, **kwargs): self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_ccdr(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
Apply causal discovery on observational data using CCDr. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CCDR algorithm.
def SetWindowText(self, text: str) -> bool: handle = self.NativeWindowHandle if handle: return SetWindowText(handle, text) return False
Call native SetWindowText if control has a valid native handle.
def _poll_read(self, expected, timeout_s=5.0): start = time.time() response = bytearray(expected) index = 0 while time.time() - start <= timeout_s: ret, data = ftdi.read_data(self._ctx, expected - index) if ret < 0: raise RuntimeError('ftdi_read_data failed with error code {0}.'.format(ret)) response[index:index+ret] = data[:ret] index += ret if index >= expected: return str(response) time.sleep(0.01) raise RuntimeError('Timeout while polling ftdi_read_data for {0} bytes!'.format(expected))
Helper function to continuously poll reads on the FTDI device until an expected number of bytes are returned. Will throw a timeout error if no data is received within the specified number of timeout seconds. Returns the read data as a string if successful, otherwise raises an execption.
def update_sg(self, context, sg, rule_id, action): db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None with context.session.begin(): job_body = dict(action="%s sg rule %s" % (action, rule_id), resource_id=rule_id, tenant_id=db_sg['tenant_id']) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_client = QuarkSGAsyncProducerClient() try: rpc_client.populate_subtasks(context, sg, job['id']) except om_exc.MessagingTimeout: LOG.error("Failed to create subtasks. Rabbit running?") return None return {"job_id": job['id']}
Begins the async update process.
def _doSave(self, obj, isInsert, conn, pipeline=None): if pipeline is None: pipeline = conn newDict = obj.asDict(forStorage=True) key = self._get_key_for_id(obj._id) if isInsert is True: for thisField in self.fields: fieldValue = newDict.get(thisField, thisField.getDefaultValue()) pipeline.hset(key, thisField, fieldValue) if fieldValue == IR_NULL_STR: obj._origData[thisField] = irNull else: obj._origData[thisField] = object.__getattribute__(obj, str(thisField)) self._add_id_to_keys(obj._id, pipeline) for indexedField in self.indexedFields: self._add_id_to_index(indexedField, obj._id, obj._origData[indexedField], pipeline) else: updatedFields = obj.getUpdatedFields() for thisField, fieldValue in updatedFields.items(): (oldValue, newValue) = fieldValue oldValueForStorage = thisField.toStorage(oldValue) newValueForStorage = thisField.toStorage(newValue) pipeline.hset(key, thisField, newValueForStorage) if thisField in self.indexedFields: self._rem_id_from_index(thisField, obj._id, oldValueForStorage, pipeline) self._add_id_to_index(thisField, obj._id, newValueForStorage, pipeline) obj._origData[thisField] = newValue
_doSave - Internal function to save a single object. Don't call this directly. Use "save" instead. If a pipeline is provided, the operations (setting values, updating indexes, etc) will be queued into that pipeline. Otherwise, everything will be executed right away. @param obj - Object to save @param isInsert - Bool, if insert or update. Either way, obj._id is expected to be set. @param conn - Redis connection @param pipeline - Optional pipeline, if present the items will be queued onto it. Otherwise, go directly to conn.
def parse_definite_clause(s): "Return the antecedents and the consequent of a definite clause." assert is_definite_clause(s) if is_symbol(s.op): return [], s else: antecedent, consequent = s.args return conjuncts(antecedent), consequent
Return the antecedents and the consequent of a definite clause.
def data_cosine(N=1024, A=0.1, sampling=1024., freq=200): r t = arange(0, float(N)/sampling, 1./sampling) x = cos(2.*pi*t*freq) + A * randn(t.size) return x
r"""Return a noisy cosine at a given frequency. :param N: the final data size :param A: the strength of the noise :param float sampling: sampling frequency of the input :attr:`data`. :param float freq: the frequency :math:`f_0` of the cosine. .. math:: x[t] = cos(2\pi t * f_0) + A w[t] where w[t] is a white noise of variance 1. .. doctest:: >>> from spectrum import data_cosine >>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100)
def tiles_to_affine_shape(tiles): if not tiles: raise TypeError("no tiles provided") pixel_size = tiles[0].pixel_x_size left, bottom, right, top = ( min([t.left for t in tiles]), min([t.bottom for t in tiles]), max([t.right for t in tiles]), max([t.top for t in tiles]), ) return ( Affine(pixel_size, 0, left, 0, -pixel_size, top), Shape( width=int(round((right - left) / pixel_size, 0)), height=int(round((top - bottom) / pixel_size, 0)), ) )
Return Affine and shape of combined tiles. Parameters ---------- tiles : iterable an iterable containing BufferedTiles Returns ------- Affine, Shape
def gain(abf): Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,'freq')) Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)]) swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)", ylabel="average inst. freq. (Hz)") pylab.plot(Xs,Ys,'.-',ms=20,alpha=.5,color='b') pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--") pylab.margins(.1,.1)
easy way to plot a gain function.
def update(self, name=None, description=None, privacy_policy=None, subscription_policy=None, is_managed=None): with db.session.begin_nested(): if name is not None: self.name = name if description is not None: self.description = description if ( privacy_policy is not None and PrivacyPolicy.validate(privacy_policy) ): self.privacy_policy = privacy_policy if ( subscription_policy is not None and SubscriptionPolicy.validate(subscription_policy) ): self.subscription_policy = subscription_policy if is_managed is not None: self.is_managed = is_managed db.session.merge(self) return self
Update group. :param name: Name of group. :param description: Description of group. :param privacy_policy: PrivacyPolicy :param subscription_policy: SubscriptionPolicy :returns: Updated group
def select(course=False, tid=None, auto=False): if course: update(course=True) course = None try: course = Course.get_selected() except NoCourseSelected: pass ret = {} if not tid: ret = Menu.launch("Select a course", Course.select().execute(), course) else: ret["item"] = Course.get(Course.tid == tid) if "item" in ret: ret["item"].set_select() update() if ret["item"].path == "": select_a_path(auto=auto) skip() return else: print("You can select the course with `tmc select --course`") return else: selected = None try: selected = Exercise.get_selected() except NoExerciseSelected: pass ret = {} if not tid: ret = Menu.launch("Select an exercise", Course.get_selected().exercises, selected) else: ret["item"] = Exercise.byid(tid) if "item" in ret: ret["item"].set_select() print("Selected {}".format(ret["item"]))
Select a course or an exercise.
def buildTagMap(default, *args): built = {} for portion in args: if hasattr(portion, 'items'): for k,v in portion.items(): built[k] = v elif isList(portion): for k in portion: built[k] = default else: built[portion] = default return built
Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.
def get_data_sharing_consent(username, enterprise_customer_uuid, course_id=None, program_uuid=None): EnterpriseCustomer = apps.get_model('enterprise', 'EnterpriseCustomer') try: if course_id: return get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid) return get_program_data_sharing_consent(username, program_uuid, enterprise_customer_uuid) except EnterpriseCustomer.DoesNotExist: return None
Get the data sharing consent object associated with a certain user, enterprise customer, and other scope. :param username: The user that grants consent :param enterprise_customer_uuid: The consent requester :param course_id (optional): A course ID to which consent may be related :param program_uuid (optional): A program to which consent may be related :return: The data sharing consent object, or None if the enterprise customer for the given UUID does not exist.
def _update_cache_stats(self, key, result): if result is None: self._CACHE_STATS['access_stats'].setdefault(key, {'hit': 0, 'miss': 0, 'expired': 0}) else: self._CACHE_STATS['access_stats'][key][result] +=1
Update the cache stats. If no cache-result is specified, we iniitialize the key. Otherwise, we increment the correct cache-result. Note the behavior for expired. A client can be expired and the key still exists.
def _DropCommonSuffixes(filename): for suffix in itertools.chain( ('%s.%s' % (test_suffix.lstrip('_'), ext) for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), ('%s.%s' % (suffix, ext) for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
def style_string(string, ansi_style, colormode, nested=False): ansi_start_code, ansi_end_code = ansi_style if PY2: if isinstance(string, str): string = string.decode(DEFAULT_ENCODING) string = UNICODE(string).replace(ansi.NEST_PLACEHOLDER, ansi_start_code) return '{start_code}{string}{end_code}{nest_ph}'.format( start_code=ansi_start_code, string=string, end_code=ansi_end_code, nest_ph=ansi.NEST_PLACEHOLDER if nested else '')
Style the given string according to the given ANSI style string. :param str string: the string to style :param tuple ansi_style: the styling string returned by ``translate_style`` :param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code`` :returns: a string containing proper ANSI sequence
def _build_path(self, *args): return '/'.join(chain((self.endpoint,), map(str, args)))
Build path with endpoint and args :param args: Tokens in the endpoint URL :type args: :py:class:`unicode`
def remove(args): osf = _setup_osf(args) if osf.username is None or osf.password is None: sys.exit('To remove a file you need to provide a username and' ' password.') project = osf.project(args.project) storage, remote_path = split_storage(args.target) store = project.storage(storage) for f in store.files: if norm_remote_path(f.path) == remote_path: f.remove()
Remove a file from the project's storage. The first part of the remote path is interpreted as the name of the storage provider. If there is no match the default (osfstorage) is used.
def get_md5sum(src_file): with open(src_file, 'r') as src_data: src_content = src_data.read() if sys.version_info[0] == 3: src_content = src_content.encode('utf-8') src_md5 = hashlib.md5(src_content).hexdigest() return src_md5
Returns md5sum of file
def image_urls(self): all_image_urls = self.finder_image_urls[:] for image_url in self.extender_image_urls: if image_url not in all_image_urls: all_image_urls.append(image_url) return all_image_urls
Combine finder_image_urls and extender_image_urls, remove duplicate but keep order
def write_shas_to_shastore(sha_dict): if sys.version_info[0] < 3: fn_open = open else: fn_open = io.open with fn_open(".shastore", "w") as fh: fh.write("---\n") fh.write('sake version: {}\n'.format(constants.VERSION)) if sha_dict: fh.write(yaml.dump(sha_dict)) fh.write("...")
Writes a sha1 dictionary stored in memory to the .shastore file
def predict_proba(self, a, b, nb_runs=6, nb_jobs=None, gpu=None, idx=0, verbose=None, ttest_threshold=0.01, nb_max_runs=16, train_epochs=1000, test_epochs=1000): Nb_jobs, verbose, gpu = SETTINGS.get_default(('nb_jobs', nb_jobs), ('verbose', verbose), ('gpu', gpu)) x = np.stack([a.ravel(), b.ravel()], 1) ttest_criterion = TTestCriterion( max_iter=nb_max_runs, runs_per_iter=nb_runs, threshold=ttest_threshold) AB = [] BA = [] while ttest_criterion.loop(AB, BA): if nb_jobs != 1: result_pair = Parallel(n_jobs=nb_jobs)(delayed(GNN_instance)( x, idx=idx, device='cuda:{}'.format(run % gpu) if gpu else 'cpu', verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, ttest_criterion.iter + nb_runs)) else: result_pair = [GNN_instance(x, idx=idx, device='cuda:0' if gpu else 'cpu', verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, ttest_criterion.iter + nb_runs)] AB.extend([runpair[0] for runpair in result_pair]) BA.extend([runpair[1] for runpair in result_pair]) if verbose: print("P-value after {} runs : {}".format(ttest_criterion.iter, ttest_criterion.p_value)) score_AB = np.mean(AB) score_BA = np.mean(BA) return (score_BA - score_AB) / (score_BA + score_AB)
Run multiple times GNN to estimate the causal direction. Args: a (np.ndarray): Variable 1 b (np.ndarray): Variable 2 nb_runs (int): number of runs to execute per batch (before testing for significance with t-test). nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``) gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``) idx (int): (optional) index of the pair, for printing purposes verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``) ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant nb_max_runs (int): Max number of bootstraps train_epochs (int): Number of epochs during which the model is going to be trained test_epochs (int): Number of epochs during which the model is going to be tested Returns: float: Causal score of the pair (Value : 1 if a->b and -1 if b->a)
def get_config_value(name, fallback=None): cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.get('servicefabric', name, fallback)
Gets a config by name. In the case where the config name is not found, will use fallback value.
def get_day_start_ut_span(self): cur = self.conn.cursor() first_day_start_ut, last_day_start_ut = \ cur.execute("SELECT min(day_start_ut), max(day_start_ut) FROM days;").fetchone() return first_day_start_ut, last_day_start_ut
Return the first and last day_start_ut Returns ------- first_day_start_ut: int last_day_start_ut: int
def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names): print('Converting transpose ...') if params['perm'][0] != 0: if inputs[0] in layers: print('!!! Cannot permute batch dimension. Result may be wrong !!!') layers[scope_name] = layers[inputs[0]] else: print('Skip weight matrix transpose, result may be wrong.') else: if names: tf_name = 'PERM' + random_string(4) else: tf_name = w_name + str(random.random()) permute = keras.layers.Permute(params['perm'][1:], name=tf_name) layers[scope_name] = permute(layers[inputs[0]])
Convert transpose layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def NaiveBayesLearner(dataset): targetvals = dataset.values[dataset.target] target_dist = CountingProbDist(targetvals) attr_dists = dict(((gv, attr), CountingProbDist(dataset.values[attr])) for gv in targetvals for attr in dataset.inputs) for example in dataset.examples: targetval = example[dataset.target] target_dist.add(targetval) for attr in dataset.inputs: attr_dists[targetval, attr].add(example[attr]) def predict(example): def class_probability(targetval): return (target_dist[targetval] * product(attr_dists[targetval, attr][example[attr]] for attr in dataset.inputs)) return argmax(targetvals, class_probability) return predict
Just count how many times each value of each input attribute occurs, conditional on the target value. Count the different target values too.
def is_excluded_dir(self, path): if self.is_excluded(path): return True return matches_masks(path.name, ALWAYS_EXCLUDED_DIRS)
Determines whether or not the specified directory is excluded by the project's configuration. :param path: the path to check :type path: pathlib.Path :rtype: bool
def calc_global_bbox(self, view_matrix, bbox_min, bbox_max): if self.matrix is not None: view_matrix = matrix44.multiply(self.matrix, view_matrix) if self.mesh: bbox_min, bbox_max = self.mesh.calc_global_bbox(view_matrix, bbox_min, bbox_max) for child in self.children: bbox_min, bbox_max = child.calc_global_bbox(view_matrix, bbox_min, bbox_max) return bbox_min, bbox_max
Recursive calculation of scene bbox
def n_dir(self): self.assert_is_dir_and_exists() n = 0 for _ in self.select_dir(recursive=True): n += 1 return n
Count how many folders in this directory. Including folder in sub folder.
def instantiate(repo, name=None, filename=None): default_transformers = repo.options.get('transformer', {}) transformers = {} if name is not None: if name in default_transformers: transformers = { name : default_transformers[name] } else: transformers = { name : { 'files': [], } } else: transformers = default_transformers input_matching_files = None if filename is not None: input_matching_files = repo.find_matching_files([filename]) for t in transformers: for k in transformers[t]: if "files" not in k: continue if k == "files" and input_matching_files is not None: transformers[t][k] = input_matching_files else: if transformers[t][k] is None or len(transformers[t][k]) == 0: transformers[t][k] = [] else: matching_files = repo.find_matching_files(transformers[t][k]) transformers[t][k] = matching_files return transformers
Instantiate the generator and filename specification
async def rt_unsubscribe(self): if self._subscription_id is None: _LOGGER.error("Not subscribed.") return await self._tibber_control.sub_manager.unsubscribe(self._subscription_id)
Unsubscribe to Tibber rt subscription.
def walk_dn(start_dir, depth=10): start_depth = len(os.path.split(start_dir)) end_depth = start_depth + depth for root, subdirs, files in os.walk(start_dir): yield root, subdirs, files if len(os.path.split(root)) >= end_depth: break
Walk down a directory tree. Same as os.walk but allows for a depth limit via depth argument
def record_drop_duplicate_fields(record): out = {} position = 0 tags = sorted(record.keys()) for tag in tags: fields = record[tag] out[tag] = [] current_fields = set() for full_field in fields: field = (tuple(full_field[0]),) + full_field[1:4] if field not in current_fields: current_fields.add(field) position += 1 out[tag].append(full_field[:4] + (position,)) return out
Return a record where all the duplicate fields have been removed. Fields are considered identical considering also the order of their subfields.
def _writePrediction(self, result): self.__predictionCache.append(result) if self._isBestModel: self.__flushPredictionCache()
Writes the results of one iteration of a model. The results are written to this ModelRunner's in-memory cache unless this model is the "best model" for the job. If this model is the "best model", the predictions are written out to a permanent store via a prediction output stream instance Parameters: ----------------------------------------------------------------------- result: A opf_utils.ModelResult object, which contains the input and output for this iteration
def _update(self, conf_dict, base_name=None): for name in conf_dict: if name.startswith('_'): continue value = conf_dict[name] if value is Namespace: continue if base_name: name = base_name + '.' + name if isinstance(value, Namespace): for name, value in value.iteritems(name): self.set(name, value) elif callable(value): value = value() if value is not None: self.set(name, value) else: self.set(name, value)
Updates the current configuration with the values in `conf_dict`. :param dict conf_dict: Dictionary of key value settings. :param str base_name: Base namespace for setting keys.
def amz_cano_querystring(qs): safe_qs_amz_chars = '&=+' safe_qs_unresvd = '-_.~' if PY2: qs = qs.encode('utf-8') safe_qs_amz_chars = safe_qs_amz_chars.encode() safe_qs_unresvd = safe_qs_unresvd.encode() qs = unquote(qs) space = b' ' if PY2 else ' ' qs = qs.split(space)[0] qs = quote(qs, safe=safe_qs_amz_chars) qs_items = {} for name, vals in parse_qs(qs, keep_blank_values=True).items(): name = quote(name, safe=safe_qs_unresvd) vals = [quote(val, safe=safe_qs_unresvd) for val in vals] qs_items[name] = vals qs_strings = [] for name, vals in qs_items.items(): for val in vals: qs_strings.append('='.join([name, val])) qs = '&'.join(sorted(qs_strings)) if PY2: qs = unicode(qs) return qs
Parse and format querystring as per AWS4 auth requirements. Perform percent quoting as needed. qs -- querystring
def map_pin(self, abstract_pin_id, physical_pin_id): if physical_pin_id: self._pin_mapping[abstract_pin_id] = physical_pin_id else: self._pin_mapping.pop(abstract_pin_id, None)
Maps a pin number to a physical device pin. To make it easy to change drivers without having to refactor a lot of code, this library does not use the names set by the driver to identify a pin. This function will map a number, that will be used by other functions, to a physical pin represented by the drivers pin id. That way, if you need to use another pin or change the underlying driver completly, you only need to redo the mapping. If you're developing a driver, keep in mind that your driver will not know about this. The other functions will translate the mapped pin to your id before calling your function. @arg abstract_pin_id the id that will identify this pin in the other function calls. You can choose what you want. @arg physical_pin_id the id returned in the driver. See `AbstractDriver.available_pins`. Setting it to None removes the mapping.
def make_naive(value, timezone): value = value.astimezone(timezone) if hasattr(timezone, 'normalize'): value = timezone.normalize(value) return value.replace(tzinfo=None)
Makes an aware datetime.datetime naive in a given time zone.
def _zipped(self, docs_base): with pushd(docs_base): with tempfile.NamedTemporaryFile(prefix='pythonhosted-', delete=False) as ziphandle: pass zip_name = shutil.make_archive(ziphandle.name, 'zip') notify.info("Uploading {:.1f} MiB from '{}' to '{}'..." .format(os.path.getsize(zip_name) / 1024.0, zip_name, self.target)) with io.open(zip_name, 'rb') as zipread: try: yield zipread finally: os.remove(ziphandle.name) os.remove(ziphandle.name + '.zip')
Provide a zipped stream of the docs tree.
def format_uuid( uuid, max_length=10): if max_length <= 3: raise ValueError("max length must be larger than 3") if len(uuid) > max_length: uuid = "{}...".format(uuid[0:max_length-3]) return uuid
Format a UUID string :param str uuid: UUID to format :param int max_length: Maximum length of result string (> 3) :return: Formatted UUID :rtype: str :raises ValueError: If *max_length* is not larger than 3 This function formats a UUID so it is not longer than *max_length* characters. The resulting string is returned. It does so by replacing characters at the end of the *uuid* with three dots, if necessary. The idea is that the start of the *uuid* is the most important part to be able to identify the related entity. The default *max_length* is 10, which will result in a string containing the first 7 characters of the *uuid* passed in. Most of the time, such a string is still unique within a collection of UUIDs.
def list(self, ignore_patterns): for prefix, root in self.locations: storage = self.storages[root] for path in utils.get_files(storage, ignore_patterns): yield path, storage
List all files in all locations.
def set_rate(self, rate): self._rate = self._player_interface_property('Rate', dbus.Double(rate)) return self._rate
Set the playback rate of the video as a multiple of the default playback speed Examples: >>> player.set_rate(2) # Will play twice as fast as normal speed >>> player.set_rate(0.5) # Will play half speed
def normalize(lx): lx = numpy.asarray(lx) base = lx.max() x = numpy.exp(lx - base) result = x / x.sum() conventional = (numpy.exp(lx) / numpy.exp(lx).sum()) assert similar(result, conventional) return result
Accepts log-values as input, exponentiates them, normalizes and returns the result. Handles underflow by rescaling so that the largest values is exactly 1.0.
def __from_xml(self,value): n=value.children vns=get_node_ns(value) while n: if n.type!='element': n=n.next continue ns=get_node_ns(n) if (ns and vns and ns.getContent()!=vns.getContent()): n=n.next continue if n.name=='POBOX': self.pobox=unicode(n.getContent(),"utf-8","replace") elif n.name in ('EXTADR', 'EXTADD'): self.extadr=unicode(n.getContent(),"utf-8","replace") elif n.name=='STREET': self.street=unicode(n.getContent(),"utf-8","replace") elif n.name=='LOCALITY': self.locality=unicode(n.getContent(),"utf-8","replace") elif n.name=='REGION': self.region=unicode(n.getContent(),"utf-8","replace") elif n.name=='PCODE': self.pcode=unicode(n.getContent(),"utf-8","replace") elif n.name=='CTRY': self.ctry=unicode(n.getContent(),"utf-8","replace") elif n.name in ("HOME","WORK","POSTAL","PARCEL","DOM","INTL", "PREF"): self.type.append(n.name.lower()) n=n.next if self.type==[]: self.type=["intl","postal","parcel","work"] elif "dom" in self.type and "intl" in self.type: raise ValueError("Both 'dom' and 'intl' specified in vcard ADR")
Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`
def strptime(s, fmt, tzinfo=None): res = time.strptime(s, fmt) return datetime.datetime(tzinfo=tzinfo, *res[:6])
A function to replace strptime in the time module. Should behave identically to the strptime function except it returns a datetime.datetime object instead of a time.struct_time object. Also takes an optional tzinfo parameter which is a time zone info object.
def error(code: int, *args, **kwargs) -> HedgehogCommandError: if code == FAILED_COMMAND and len(args) >= 1 and args[0] == "Emergency Shutdown activated": return EmergencyShutdown(*args, **kwargs) return _errors[code](*args, **kwargs)
Creates an error from the given code, and args and kwargs. :param code: The acknowledgement code :param args: Exception args :param kwargs: Exception kwargs :return: the error for the given acknowledgement code
def start_local_env(recreate_containers): assembled_spec = spec_assembler.get_assembled_specs() required_absent_assets = virtualbox.required_absent_assets(assembled_spec) if required_absent_assets: raise RuntimeError('Assets {} are specified as required but are not set. Set them with `dusty assets set`'.format(required_absent_assets)) docker_ip = virtualbox.get_docker_vm_ip() if os.path.exists(constants.COMPOSEFILE_PATH): try: stop_apps_or_services(rm_containers=recreate_containers) except CalledProcessError as e: log_to_client("WARNING: docker-compose stop failed") log_to_client(str(e)) daemon_warnings.clear_namespace('disk') df_info = virtualbox.get_docker_vm_disk_info(as_dict=True) if 'M' in df_info['free'] or 'K' in df_info['free']: warning_msg = 'VM is low on disk. Available disk: {}'.format(df_info['free']) daemon_warnings.warn('disk', warning_msg) log_to_client(warning_msg) log_to_client("Compiling together the assembled specs") active_repos = spec_assembler.get_all_repos(active_only=True, include_specs_repo=False) log_to_client("Compiling the port specs") port_spec = port_spec_compiler.get_port_spec_document(assembled_spec, docker_ip) log_to_client("Compiling the nginx config") docker_bridge_ip = virtualbox.get_docker_bridge_ip() nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec, docker_bridge_ip) log_to_client("Creating setup and script bash files") make_up_command_files(assembled_spec, port_spec) log_to_client("Compiling docker-compose config") compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec) log_to_client("Saving port forwarding to hosts file") hosts.update_hosts_file_from_port_spec(port_spec) log_to_client("Configuring NFS") nfs.configure_nfs() log_to_client("Saving updated nginx config to the VM") nginx.update_nginx_from_config(nginx_config) log_to_client("Saving Docker Compose config and starting all containers") compose.update_running_containers_from_spec(compose_config, recreate_containers=recreate_containers) log_to_client("Your local environment is now started!")
This command will use the compilers to get compose specs will pass those specs to the systems that need them. Those systems will in turn launch the services needed to make the local environment go.
def json_struct_to_xml(json_obj, root, custom_namespace=None): if isinstance(root, (str, unicode)): if root.startswith('!'): root = etree.Element('{%s}%s' % (NS_PROTECTED, root[1:])) elif root.startswith('+'): if not custom_namespace: raise Exception("JSON fields starts with +, but no custom namespace provided") root = etree.Element('{%s}%s' % (custom_namespace, root[1:])) else: root = etree.Element(root) if root.tag in ('attachments', 'grouped_events', 'media_files'): for link in json_obj: root.append(json_link_to_xml(link)) elif isinstance(json_obj, (str, unicode)): root.text = json_obj elif isinstance(json_obj, (int, float)): root.text = unicode(json_obj) elif isinstance(json_obj, dict): if frozenset(json_obj.keys()) == frozenset(('type', 'coordinates')): root.append(geojson_to_gml(json_obj)) else: for key, val in json_obj.items(): if key == 'url' or key.endswith('_url'): el = json_link_to_xml(val, json_link_key_to_xml_rel(key)) else: el = json_struct_to_xml(val, key, custom_namespace=custom_namespace) if el is not None: root.append(el) elif isinstance(json_obj, list): tag_name = root.tag if tag_name.endswith('ies'): tag_name = tag_name[:-3] + 'y' elif tag_name.endswith('s'): tag_name = tag_name[:-1] for val in json_obj: el = json_struct_to_xml(val, tag_name, custom_namespace=custom_namespace) if el is not None: root.append(el) elif json_obj is None: return None else: raise NotImplementedError return root
Converts a Open511 JSON fragment to XML. Takes a dict deserialized from JSON, returns an lxml Element. This won't provide a conforming document if you pass in a full JSON document; it's for translating little fragments, and is mostly used internally.
def fill(self, iterations=1): used_reactions = list() for i in range(iterations): self.model.slim_optimize(error_value=None, message='gapfilling optimization failed') solution = [self.model.reactions.get_by_id(ind.rxn_id) for ind in self.indicators if ind._get_primal() > self.integer_threshold] if not self.validate(solution): raise RuntimeError('failed to validate gapfilled model, ' 'try lowering the integer_threshold') used_reactions.append(solution) self.update_costs() return used_reactions
Perform the gapfilling by iteratively solving the model, updating the costs and recording the used reactions. Parameters ---------- iterations : int The number of rounds of gapfilling to perform. For every iteration, the penalty for every used reaction increases linearly. This way, the algorithm is encouraged to search for alternative solutions which may include previously used reactions. I.e., with enough iterations pathways including 10 steps will eventually be reported even if the shortest pathway is a single reaction. Returns ------- iterable A list of lists where each element is a list reactions that were used to gapfill the model. Raises ------ RuntimeError If the model fails to be validated (i.e. the original model with the proposed reactions added, still cannot get the required flux through the objective).
def _composed_service_dict(service_spec): compose_dict = service_spec.plain_dict() _apply_env_overrides(env_overrides_for_app_or_service(service_spec.name), compose_dict) compose_dict.setdefault('volumes', []).append(_get_cp_volume_mount(service_spec.name)) compose_dict['container_name'] = "dusty_{}_1".format(service_spec.name) return compose_dict
This function returns a dictionary of the docker_compose specifications for one service. Currently, this is just the Dusty service spec with an additional volume mount to support Dusty's cp functionality.
def new_symbolic_buffer(self, nbytes, **options): label = options.get('label') avoid_collisions = False if label is None: label = 'buffer' avoid_collisions = True taint = options.get('taint', frozenset()) expr = self._constraints.new_array(name=label, index_max=nbytes, value_bits=8, taint=taint, avoid_collisions=avoid_collisions) self._input_symbols.append(expr) if options.get('cstring', False): for i in range(nbytes - 1): self._constraints.add(expr[i] != 0) return expr
Create and return a symbolic buffer of length `nbytes`. The buffer is not written into State's memory; write it to the state's memory to introduce it into the program state. :param int nbytes: Length of the new buffer :param str label: (keyword arg only) The label to assign to the buffer :param bool cstring: (keyword arg only) Whether or not to enforce that the buffer is a cstring (i.e. no NULL bytes, except for the last byte). (bool) :param taint: Taint identifier of the new buffer :type taint: tuple or frozenset :return: :class:`~manticore.core.smtlib.expression.Expression` representing the buffer.
def render_to_string(self): values = '' for key, value in self.items(): values += '{}={};'.format(key, value) return values
Render to cookie strings.
def activateCells(self, activeColumns, learn=True): prevActiveCells = self.activeCells prevWinnerCells = self.winnerCells self.activeCells = [] self.winnerCells = [] segToCol = lambda segment: int(segment.cell / self.cellsPerColumn) identity = lambda x: x for columnData in groupby2(activeColumns, identity, self.activeSegments, segToCol, self.matchingSegments, segToCol): (column, activeColumns, columnActiveSegments, columnMatchingSegments) = columnData if activeColumns is not None: if columnActiveSegments is not None: cellsToAdd = self.activatePredictedColumn(column, columnActiveSegments, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn) self.activeCells += cellsToAdd self.winnerCells += cellsToAdd else: (cellsToAdd, winnerCell) = self.burstColumn(column, columnMatchingSegments, prevActiveCells, prevWinnerCells, learn) self.activeCells += cellsToAdd self.winnerCells.append(winnerCell) else: if learn: self.punishPredictedColumn(column, columnActiveSegments, columnMatchingSegments, prevActiveCells, prevWinnerCells)
Calculate the active cells, using the current active columns and dendrite segments. Grow and reinforce synapses. :param activeColumns: (iter) A sorted list of active column indices. :param learn: (bool) If true, reinforce / punish / grow synapses. **Pseudocode:** :: for each column if column is active and has active distal dendrite segments call activatePredictedColumn if column is active and doesn't have active distal dendrite segments call burstColumn if column is inactive and has matching distal dendrite segments call punishPredictedColumn
def wait_for_job_to_start(single_master, job): i = 0 while True: try: r = requests.get("http://%s:4646/v1/job/%s" % (single_master, job)) if r.status_code == 200 and r.json()["Status"] == "running": break else: raise RuntimeError() except: Log.debug(sys.exc_info()[0]) Log.info("Waiting for %s to come up... %s" % (job, i)) time.sleep(1) if i > 20: Log.error("Failed to start Nomad Cluster!") sys.exit(-1) i = i + 1
Wait for a Nomad job to start
def pascal(n): errors.is_positive_integer(n) result = numpy.zeros((n, n)) for i in range(0, n): result[i, 0] = 1 result[0, i] = 1 if n > 1: for i in range(1, n): for j in range(1, n): result[i, j] = result[i-1, j] + result[i, j-1] return result
Return Pascal matrix :param int n: size of the matrix .. doctest:: >>> from spectrum import pascal >>> pascal(6) array([[ 1., 1., 1., 1., 1., 1.], [ 1., 2., 3., 4., 5., 6.], [ 1., 3., 6., 10., 15., 21.], [ 1., 4., 10., 20., 35., 56.], [ 1., 5., 15., 35., 70., 126.], [ 1., 6., 21., 56., 126., 252.]]) .. todo:: use the symmetric property to improve computational time if needed
def get_object(cls, api_token, ip): floating_ip = cls(token=api_token, ip=ip) floating_ip.load() return floating_ip
Class method that will return a FloatingIP object by its IP. Args: api_token: str - token ip: str - floating ip address
def get_file_environment_variables(file_params): env = {} for param in file_params: env[param.name] = os.path.join( DATA_MOUNT_POINT, param.docker_path.rstrip('/')) if param.value else '' return env
Return a dictionary of environment variables for the user container.
def _reg_name(self, reg_id): if reg_id >= X86_REG_ENDING: logger.warning("Trying to get register name for a non-register") return None cs_reg_name = self.cpu.instruction.reg_name(reg_id) if cs_reg_name is None or cs_reg_name.lower() == '(invalid)': return None return self.cpu._regfile._alias(cs_reg_name.upper())
Translates a register ID from the disassembler object into the register name based on manticore's alias in the register file :param int reg_id: Register ID
def save(self, *args, **kwargs): rerank = kwargs.pop('rerank', True) if rerank: if not self.id: self._process_new_rank_obj() elif self.rank == self._rank_at_load: pass else: self._process_moved_rank_obj() super(RankedModel, self).save(*args, **kwargs)
Overridden method that handles that re-ranking of objects and the integrity of the ``rank`` field. :param rerank: Added parameter, if True will rerank other objects based on the change in this save. Defaults to True.
def find(self, path: Path): if getattr(self, 'settings_attr', None): self.paths = getattr(settings, self.settings_attr) path_found = None for entry in self.paths: abspath = entry / path if abspath.exists(): path_found = abspath return path_found
Find a file in the path. The file may exist in multiple paths. The last found file will be returned. :param path: The path to find :return: The absolute path to the file or None if not found
def scale_image(in_fname, out_fname, max_width, max_height): try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = max_width / float(width_in) scale_h = max_height / float(height_in) if height_in * scale_w <= max_height: scale = scale_w else: scale = scale_h if scale >= 1.0 and in_fname == out_fname: return width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255)) pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname) if os.environ.get('SKLEARN_DOC_OPTIPNG', False): try: subprocess.call(["optipng", "-quiet", "-o", "9", out_fname]) except Exception: warnings.warn('Install optipng to reduce the size of the \ generated images')
Scales an image with the same aspect ratio centered in an image with a given max_width and max_height if in_fname == out_fname the image can only be scaled down
def _deserialize_uint(data, nbytes=32, padding=0, offset=0): assert isinstance(data, (bytearray, Array)) value = ABI._readBE(data, nbytes, padding=True, offset=offset) value = Operators.ZEXTEND(value, (nbytes + padding) * 8) return value
Read a `nbytes` bytes long big endian unsigned integer from `data` starting at `offset` :param data: sliceable buffer; symbolic buffer of Eth ABI encoded data :param nbytes: number of bytes to read starting from least significant byte :rtype: int or Expression
def prune(self, var, value, removals): "Rule out var=value." self.curr_domains[var].remove(value) if removals is not None: removals.append((var, value))
Rule out var=value.
def simple_flare_find(times, mags, errs, smoothbinsize=97, flare_minsigma=4.0, flare_maxcadencediff=1, flare_mincadencepoints=3, magsarefluxes=False, savgol_polyorder=2, **savgol_kwargs): if errs is None: errs = 0.001*mags finiteind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs) ftimes = times[finiteind] fmags = mags[finiteind] ferrs = errs[finiteind] smoothed = savgol_filter(fmags, smoothbinsize, savgol_polyorder, **savgol_kwargs) subtracted = fmags - smoothed series_mad = np.median(np.abs(subtracted)) series_stdev = 1.483*series_mad if magsarefluxes: extind = np.where(subtracted > (flare_minsigma*series_stdev)) else: extind = np.where(subtracted < (-flare_minsigma*series_stdev)) if extind and extind[0]: extrema_indices = extind[0] flaregroups = [] for ind, extrema_index in enumerate(extrema_indices): pass
This finds flares in time series using the method in Walkowicz+ 2011. FIXME: finish this. Parameters ---------- times,mags,errs : np.array The input time-series to find flares in. smoothbinsize : int The number of consecutive light curve points to smooth over in the time series using a Savitsky-Golay filter. The smoothed light curve is then subtracted from the actual light curve to remove trends that potentially last `smoothbinsize` light curve points. The default value is chosen as ~6.5 hours (97 x 4 minute cadence for HATNet/HATSouth). flare_minsigma : float The minimum sigma above the median LC level to designate points as belonging to possible flares. flare_maxcadencediff : int The maximum number of light curve points apart each possible flare event measurement is allowed to be. If this is 1, then we'll look for consecutive measurements. flare_mincadencepoints : int The minimum number of light curve points (each `flare_maxcadencediff` points apart) required that are at least `flare_minsigma` above the median light curve level to call an event a flare. magsarefluxes: bool If True, indicates that mags is actually an array of fluxes. savgol_polyorder: int The polynomial order of the function used by the Savitsky-Golay filter. savgol_kwargs : extra kwargs Any remaining keyword arguments are passed directly to the `savgol_filter` function from `scipy.signal`. Returns ------- (nflares, flare_indices) : tuple Returns the total number of flares found and their time-indices (start, end) as tuples.
def _microcanonical_average_spanning_cluster(has_spanning_cluster, alpha): r ret = dict() runs = has_spanning_cluster.size k = has_spanning_cluster.sum(dtype=np.float) ret['spanning_cluster'] = ( (k + 1) / (runs + 2) ) ret['spanning_cluster_ci'] = scipy.stats.beta.ppf( [alpha / 2, 1 - alpha / 2], k + 1, runs - k + 1 ) return ret
r''' Compute the average number of runs that have a spanning cluster Helper function for :func:`microcanonical_averages` Parameters ---------- has_spanning_cluster : 1-D :py:class:`numpy.ndarray` of bool Each entry is the ``has_spanning_cluster`` field of the output of :func:`sample_states`: An entry is ``True`` if there is a spanning cluster in that respective run, and ``False`` otherwise. alpha : float Significance level. Returns ------- ret : dict Spanning cluster statistics ret['spanning_cluster'] : float The average relative number (Binomial proportion) of runs that have a spanning cluster. This is the Bayesian point estimate of the posterior mean, with a uniform prior. ret['spanning_cluster_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2 The lower and upper bounds of the Binomial proportion confidence interval with uniform prior. See Also -------- sample_states : spanning cluster detection microcanonical_averages : spanning cluster statistics Notes ----- Averages and confidence intervals for Binomial proportions As Cameron [8]_ puts it, the normal approximation to the confidence interval for a Binomial proportion :math:`p` "suffers a *systematic* decline in performance (...) towards extreme values of :math:`p` near :math:`0` and :math:`1`, generating binomial [confidence intervals] with effective coverage far below the desired level." (see also References [6]_ and [7]_). A different approach to quantifying uncertainty is Bayesian inference. [5]_ For :math:`n` independent Bernoulli trails with common success probability :math:`p`, the *likelihood* to have :math:`k` successes given :math:`p` is the binomial distribution .. math:: P(k|p) = \binom{n}{k} p^k (1-p)^{n-k} \equiv B(a,b), where :math:`B(a, b)` is the *Beta distribution* with parameters :math:`a = k + 1` and :math:`b = n - k + 1`. Assuming a uniform prior :math:`P(p) = 1`, the *posterior* is [5]_ .. math:: P(p|k) = P(k|p)=B(a,b). A point estimate is the posterior mean .. math:: \bar{p} = \frac{k+1}{n+2} with the :math:`1 - \alpha` credible interval :math:`(p_l, p_u)` given by .. math:: \int_0^{p_l} dp B(a,b) = \int_{p_u}^1 dp B(a,b) = \frac{\alpha}{2}. References ---------- .. [5] Wasserman, L. All of Statistics (Springer New York, 2004), `doi:10.1007/978-0-387-21736-9 <http://dx.doi.org/10.1007/978-0-387-21736-9>`_. .. [6] DasGupta, A., Cai, T. T. & Brown, L. D. Interval Estimation for a Binomial Proportion. Statistical Science 16, 101-133 (2001). `doi:10.1214/ss/1009213286 <http://dx.doi.org/10.1214/ss/1009213286>`_. .. [7] Agresti, A. & Coull, B. A. Approximate is Better than "Exact" for Interval Estimation of Binomial Proportions. The American Statistician 52, 119-126 (1998), `doi:10.2307/2685469 <http://dx.doi.org/10.2307/2685469>`_. .. [8] Cameron, E. On the Estimation of Confidence Intervals for Binomial Population Proportions in Astronomy: The Simplicity and Superiority of the Bayesian Approach. Publications of the Astronomical Society of Australia 28, 128-139 (2011), `doi:10.1071/as10046 <http://dx.doi.org/10.1071/as10046>`_.
def BSF(cpu, dest, src): value = src.read() flag = Operators.EXTRACT(value, 0, 1) == 1 res = 0 for pos in range(1, src.size): res = Operators.ITEBV(dest.size, flag, res, pos) flag = Operators.OR(flag, Operators.EXTRACT(value, pos, 1) == 1) cpu.ZF = value == 0 dest.write(Operators.ITEBV(dest.size, cpu.ZF, dest.read(), res))
Bit scan forward. Searches the source operand (second operand) for the least significant set bit (1 bit). If a least significant 1 bit is found, its bit index is stored in the destination operand (first operand). The source operand can be a register or a memory location; the destination operand is a register. The bit index is an unsigned offset from bit 0 of the source operand. If the contents source operand are 0, the contents of the destination operand is undefined:: IF SRC = 0 THEN ZF = 1; DEST is undefined; ELSE ZF = 0; temp = 0; WHILE Bit(SRC, temp) = 0 DO temp = temp + 1; DEST = temp; OD; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand.
async def rename(self, name): await self._client.rename_conversation( hangouts_pb2.RenameConversationRequest( request_header=self._client.get_request_header(), new_name=name, event_request_header=self._get_event_request_header(), ) )
Rename this conversation. Hangouts only officially supports renaming group conversations, so custom names for one-to-one conversations may or may not appear in all first party clients. Args: name (str): New name. Raises: .NetworkError: If conversation cannot be renamed.
def assume_localhost(self): if not self.genv.host_string: self.genv.host_string = 'localhost' self.genv.hosts = ['localhost'] self.genv.user = getpass.getuser()
Sets connection parameters to localhost, if not set already.
def _BYTES_TO_BITS(): the_table = 256*[None] bits_per_byte = list(range(7, -1, -1)) for n in range(256): l = n bits = 8*[None] for i in bits_per_byte: bits[i] = '01'[n & 1] n >>= 1 the_table[l] = ''.join(bits) return the_table
Generate a table to convert a whole byte to binary. This code was taken from the Python Cookbook, 2nd edition - O'Reilly.
def check_readable(self, timeout): rlist, wlist, xlist = select.select([self._stdout], [], [], timeout) return bool(len(rlist))
Poll ``self.stdout`` and return True if it is readable. :param float timeout: seconds to wait I/O :return: True if readable, else False :rtype: boolean
def get_outgoing_sequence_names(self): return sorted([s.name for s in list(self.outgoing_sequence_flows_by_id.values())])
Returns a list of the names of outgoing sequences. Some may be None.
def _autocov(s, **kwargs): debias = kwargs.pop('debias', True) axis = kwargs.get('axis', -1) if debias: s = _remove_bias(s, axis) kwargs['debias'] = False return _crosscov(s, s, **kwargs)
Returns the autocovariance of signal s at all lags. Adheres to the definition sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]} where E{} is the expectation operator, and S is a zero mean process
def create_scalingip(context, content): LOG.info('create_scalingip for tenant %s and body %s', context.tenant_id, content) network_id = content.get('scaling_network_id') ip_address = content.get('scaling_ip_address') requested_ports = content.get('ports', []) network = _get_network(context, network_id) port_fixed_ips = {} for req_port in requested_ports: port = _get_port(context, req_port['port_id']) fixed_ip = _get_fixed_ip(context, req_port.get('fixed_ip_address'), port) port_fixed_ips[port.id] = {"port": port, "fixed_ip": fixed_ip} scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING) _create_flip(context, scip, port_fixed_ips) return v._make_scaling_ip_dict(scip)
Allocate or reallocate a scaling IP. :param context: neutron api request context. :param content: dictionary describing the scaling ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present.
def main(): description = 'Validate a CSV data file.' parser = argparse.ArgumentParser(description=description) parser.add_argument('file', metavar='FILE', help='a file to be validated') parser.add_argument('-l', '--limit', dest='limit', type=int, action='store', default=0, help='limit the number of problems reported' ) parser.add_argument('-s', '--summarize', dest='summarize', action='store_true', default=False, help='output only a summary of the different types of problem found' ) parser.add_argument('-e', '--report-unexpected-exceptions', dest='report_unexpected_exceptions', action='store_true', default=False, help='report any unexpected exceptions as problems' ) args = parser.parse_args() if not os.path.isfile(args.file): print '%s is not a file' % args.file sys.exit(1) with open(args.file, 'r') as f: data = csv.reader(f, delimiter='\t') validator = create_validator() problems = validator.validate(data, summarize=args.summarize, report_unexpected_exceptions=args.report_unexpected_exceptions, context={'file': args.file}) write_problems(problems, sys.stdout, summarize=args.summarize, limit=args.limit) if problems: sys.exit(1) else: sys.exit(0)
Main function.
def functional(self): if self._model: tree, _ = parse_gpr(self.gene_reaction_rule) return eval_gpr(tree, {gene.id for gene in self.genes if not gene.functional}) return True
All required enzymes for reaction are functional. Returns ------- bool True if the gene-protein-reaction (GPR) rule is fulfilled for this reaction, or if reaction is not associated to a model, otherwise False.
def get_canonical_headers(cls, req, include=None): if include is None: include = cls.default_include_headers include = [x.lower() for x in include] headers = req.headers.copy() if 'host' not in headers: headers['host'] = urlparse(req.url).netloc.split(':')[0] cano_headers_dict = {} for hdr, val in headers.items(): hdr = hdr.strip().lower() val = cls.amz_norm_whitespace(val).strip() if (hdr in include or '*' in include or ('x-amz-*' in include and hdr.startswith('x-amz-') and not hdr == 'x-amz-client-context')): vals = cano_headers_dict.setdefault(hdr, []) vals.append(val) cano_headers = '' signed_headers_list = [] for hdr in sorted(cano_headers_dict): vals = cano_headers_dict[hdr] val = ','.join(sorted(vals)) cano_headers += '{}:{}\n'.format(hdr, val) signed_headers_list.append(hdr) signed_headers = ';'.join(signed_headers_list) return (cano_headers, signed_headers)
Generate the Canonical Headers section of the Canonical Request. Return the Canonical Headers and the Signed Headers strs as a tuple (canonical_headers, signed_headers). req -- Requests PreparedRequest object include -- List of headers to include in the canonical and signed headers. It's primarily included to allow testing against specific examples from Amazon. If omitted or None it includes host, content-type and any header starting 'x-amz-' except for x-amz-client context, which appears to break mobile analytics auth if included. Except for the x-amz-client-context exclusion these defaults are per the AWS documentation.
def get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid): DataSharingConsent = apps.get_model('consent', 'DataSharingConsent') return DataSharingConsent.objects.proxied_get( username=username, course_id=course_id, enterprise_customer__uuid=enterprise_customer_uuid )
Get the data sharing consent object associated with a certain user of a customer for a course. :param username: The user that grants consent. :param course_id: The course for which consent is granted. :param enterprise_customer_uuid: The consent requester. :return: The data sharing consent object
def cliques(graph, threshold=3): cliques = [] for n in graph.nodes: c = clique(graph, n.id) if len(c) >= threshold: c.sort() if c not in cliques: cliques.append(c) return cliques
Returns all the cliques in the graph of at least the given size.
def _ensure_regexp(source, n): markers = '(+~"\'=[%:?!*^|&-,;/\\' k = 0 while True: k += 1 if n - k < 0: return True char = source[n - k] if char in markers: return True if char != ' ' and char != '\n': break return False
returns True if regexp starts at n else returns False checks whether it is not a division
def record_make_all_subfields_volatile(rec): for tag in rec.keys(): for field_position, field in enumerate(rec[tag]): for subfield_position, subfield in enumerate(field[0]): if subfield[1][:9] != "VOLATILE:": record_modify_subfield(rec, tag, subfield[0], "VOLATILE:" + subfield[1], subfield_position, field_position_local=field_position)
Turns all subfields to volatile