code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _get_worker_id(self, conn): if self._worker_id is None: self._worker_id = conn.incr(self._key_worker()) return self._worker_id
Get the worker ID, using a preestablished connection.
def _size_36(): from shutil import get_terminal_size dim = get_terminal_size() if isinstance(dim, list): return dim[0], dim[1] return dim.lines, dim.columns
returns the rows, columns of terminal
def _LLSGuessPayloadClass(p, **kargs): cls = conf.raw_layer if len(p) >= 3: typ = struct.unpack("!H", p[0:2])[0] clsname = _OSPF_LLSclasses.get(typ, "LLS_Generic_TLV") cls = globals()[clsname] return cls(p, **kargs)
Guess the correct LLS class for a given payload
def pack(x: Any) -> bytes: try: return msgpack.packb(x, default=encoders) except TypeError as exc: message = ('Serialization error, check the data passed to a do_ command. ' 'Cannot serialize this object:\n') + str(exc)[16:] raise SerializationError(message)
Encode ``x`` into msgpack with additional encoders.
def _get_name_from_content_type(self, request): content_type = request.META.get('CONTENT_TYPE', None) if content_type: return util.strip_charset(content_type) return None
Get name from Content-Type header
def sub_sam(sam, percent, sort = True, sbuffer = False): mapping = sort_sam(sam, sort) pool = [1 for i in range(0, percent)] + [0 for i in range(0, 100 - percent)] c = cycle([1, 2]) for line in mapping: line = line.strip().split() if line[0].startswith('@'): yield line continue if int(line[1]) <= 20: if random.choice(pool) == 1: yield line else: n = next(c) if n == 1: prev = line if n == 2 and random.choice(pool) == 1: yield prev yield line
randomly subset sam file
def print_permutations(self): index = 0 permutations = [] for p in self._input_permutations: permutations.append({'index': index, 'args': p}) index += 1 with open('permutations.json', 'w') as fh: json.dump(permutations, fh, indent=2) print('All permutations written to the "permutations.json" file.')
Print all valid permutations.
def explicit_rel_links(self, rels=('homepage', 'download')): for match in self._rel_re.finditer(self.content): found_rels = match.group(1).lower().split() for rel in rels: if rel in found_rels: break else: continue match = self._href_re.search(match.group(0)) if not match: continue url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self)
Yields all links with the given relations
def _get_vispy_caller(): records = inspect.stack() for record in records[5:]: module = record[0].f_globals['__name__'] if module.startswith('vispy'): line = str(record[0].f_lineno) func = record[3] cls = record[0].f_locals.get('self', None) clsname = "" if cls is None else cls.__class__.__name__ + '.' caller = "{0}:{1}{2}({3}): ".format(module, clsname, func, line) return caller return 'unknown'
Helper to get vispy calling function from the stack
def labels(ctx): config = ctx.obj['agile'] repos = config.get('repositories') labels = config.get('labels') if not isinstance(repos, list): raise CommandError( 'You need to specify the "repos" list in the config' ) if not isinstance(labels, dict): raise CommandError( 'You need to specify the "labels" dictionary in the config' ) git = GithubApi() for repo in repos: repo = git.repo(repo) for label, color in labels.items(): if repo.label(label, color): click.echo('Created label "%s" @ %s' % (label, repo)) else: click.echo('Updated label "%s" @ %s' % (label, repo))
Crate or update labels in github
def dfsummary_made(self): try: empty = self.dfsummary.empty except AttributeError: empty = True return not empty
check if the summary table exists
def cumulative_gaps_to(self, when: datetime.datetime) -> datetime.timedelta: gaps = self.gaps() return gaps.cumulative_time_to(when)
Return the cumulative time within our gaps, up to ``when``.
def clean_cache(cached, **kwargs): " Generate cache key and clean cached value. " if isinstance(cached, basestring): cached = _str_to_model(cached) cache_key = generate_cache_key(cached, **kwargs) cache.delete(cache_key)
Generate cache key and clean cached value.
def libname_from_dir(dirname): parts = [] for part in dirname.split('-'): if part[0].isdigit(): break parts.append(part) return '-'.join(parts)
Reconstruct the library name without it's version
def assertSameType(a, b): if not isinstance(b, type(a)): raise NotImplementedError("This operation is only supported for " \ "elements of the same type. Instead found {} and {}". format(type(a), type(b)))
Raises an exception if @b is not an instance of type(@a)
def SendGrrMessageThroughFleetspeak(grr_id, msg): fs_msg = fs_common_pb2.Message( message_type="GrrMessage", destination=fs_common_pb2.Address( client_id=GRRIDToFleetspeakID(grr_id), service_name="GRR")) fs_msg.data.Pack(msg.AsPrimitiveProto()) fleetspeak_connector.CONN.outgoing.InsertMessage(fs_msg)
Sends the given GrrMessage through FS.
def list_websites(self): self.connect() results = self.server.list_websites(self.session_id) return results
Return all websites, name is not a key
def check_deleted_nodes(self): imported_nodes = Node.objects.filter(data__contains=['imported']) deleted_nodes = [] for node in imported_nodes: if OldNode.objects.filter(pk=node.pk).count() == 0: user = node.user deleted_nodes.append(node) node.delete() if user.node_set.count() == 0: user.delete() if len(deleted_nodes) > 0: self.message('deleted %d imported nodes from local DB' % len(deleted_nodes)) self.deleted_nodes = deleted_nodes
delete imported nodes that are not present in the old database
def initialize_snapshot(self): logger.debug('Initializing DAG snapshot for job {0}'.format(self.name)) if self.snapshot is not None: logging.warn("Attempting to initialize DAG snapshot without " + "first destroying old snapshot.") snapshot_to_validate = deepcopy(self.graph) is_valid, reason = self.validate(snapshot_to_validate) if not is_valid: raise DagobahError(reason) self.snapshot = snapshot_to_validate
Copy the DAG and validate
def GetArtifactsDependenciesClosure(name_list, os_name=None): artifacts = set(REGISTRY.GetArtifacts(os_name=os_name, name_list=name_list)) dependencies = set() for art in artifacts: dependencies.update(GetArtifactDependencies(art, recursive=True)) if dependencies: artifacts.update( set( REGISTRY.GetArtifacts( os_name=os_name, name_list=list(dependencies)))) return artifacts
For all the artifacts in the list returns them and their dependencies.
def update(self, resource, rid, updates): if resource[-1] != '/': resource += '/' resource += str(rid) return self.put(resource, data=updates)
Updates the resource with id 'rid' with the given updates dictionary.
def getNumberTLD(): total = 0 for typeTld in TLD.keys(): total+= len(TLD[typeTld]) return total
Counting the total number of TLD being processed.
def run(self, path): SEPARATOR = '=' * 40 summary = {} res = True for _f in utils.get_files_by_path(path): L.info(SEPARATOR) status, summ = self._check_file(_f) res &= status if summ is not None: summary.update(summ) L.info(SEPARATOR) status = 'PASS' if res else 'FAIL' return {'files': summary, 'STATUS': status}
Test a bunch of files and return a summary JSON report
async def service_messages(self, msg, _context): msgs = self.service_manager.service_messages(msg.get('name')) return [x.to_dict() for x in msgs]
Get all messages for a service.
def make_serializable(data): if is_serializable(data): return data try: return data.tolist() except AttributeError: pass except Exception as e: logger.debug('{} exception ({}): {}'.format(type(e).__name__, e, data)) if isinstance(data, dict): return {key: make_serializable(value) for key, value in data.items()} try: return [make_serializable(element) for element in data] except TypeError: pass except Exception: logger.debug('Could not serialize {}; converting to string'.format(data)) return str(data)
Ensure data is serializable.
def temp(dev, target): click.echo("Current target temp: %s" % dev.target_temperature) if target: click.echo("Setting target temp: %s" % target) dev.target_temperature = target
Gets or sets the target temperature.
def load_stock_links(self): links = self.__get_session().query(dal.AssetClassStock).all() for entity in links: stock: Stock = Stock(entity.symbol) parent: AssetClass = self.model.get_class_by_id(entity.assetclassid) if parent: parent.stocks.append(stock) self.model.stocks.append(stock)
Read stock links into the model
def children( record, index, key='refs', stop_types=STOP_TYPES ): result = [] for ref in record.get( key,[]): try: record = index[ref] except KeyError, err: pass else: if record['type'] not in stop_types: result.append( record ) return result
Retrieve children records for given record
def toggle_object_status(self, objname): o = getattr(self.system, objname) o.status = not o.status self.system.flush() return o.status
Toggle boolean-valued sensor status between ``True`` and ``False``.
def simple_write(self, s, frame, node=None): self.start_write(frame, node) self.write(s) self.end_write(frame)
Simple shortcut for start_write + write + end_write.
def midi_event(self, event_type, channel, param1, param2=None): assert event_type < 0x80 and event_type >= 0 assert channel < 16 and channel >= 0 tc = a2b_hex('%x%x' % (event_type, channel)) if param2 is None: params = a2b_hex('%02x' % param1) else: params = a2b_hex('%02x%02x' % (param1, param2)) return self.delta_time + tc + params
Convert and return the paraters as a MIDI event in bytes.
def dataoneTypes(request): if is_v1_api(request): return d1_common.types.dataoneTypes_v1_1 elif is_v2_api(request) or is_diag_api(request): return d1_common.types.dataoneTypes_v2_0 else: raise d1_common.types.exceptions.ServiceFailure( 0, 'Unknown version designator in URL. url="{}"'.format(request.path) )
Return the PyXB binding to use when handling a request.
def wr_xlsx(fout_xlsx, data_xlsx, **kws): from goatools.wr_tbl_class import WrXlsx items_str = kws.get("items", "items") if "items" not in kws else kws["items"] if data_xlsx: xlsxobj = WrXlsx(fout_xlsx, data_xlsx[0]._fields, **kws) worksheet = xlsxobj.add_worksheet() row_idx = xlsxobj.wr_title(worksheet) row_idx = xlsxobj.wr_hdrs(worksheet, row_idx) row_idx_data0 = row_idx row_idx = xlsxobj.wr_data(data_xlsx, row_idx, worksheet) xlsxobj.workbook.close() sys.stdout.write(" {N:>5} {ITEMS} WROTE: {FOUT}\n".format( N=row_idx-row_idx_data0, ITEMS=items_str, FOUT=fout_xlsx)) else: sys.stdout.write(" 0 {ITEMS}. NOT WRITING {FOUT}\n".format( ITEMS=items_str, FOUT=fout_xlsx))
Write a spreadsheet into a xlsx file.
def _receive(self): result = self._talk.get() if not result: self._logger.error('Failed to receive') return result
Receive a chunk of request from client.
def _from_class(cls, class_name, module_name=None, *args, **kwargs): def _get_module(module_name): names = module_name.split(".") module = __import__(names[0]) for i in xrange(1, len(names)): module = getattr(module, names[i]) return module if module_name: module = _get_module(module_name) class_ = getattr(module, class_name) else: class_ = globals()[class_name] if not issubclass(class_, PersistentObject): t = class_.__name__, PersistentObject.__name__ raise TypeError, 'Requested object type %s must be subtype of %s ' % t name = str(args[0]) if args else cls.__name__ name = kwargs['name'] if 'name' in kwargs else name if hasattr(cls, 'get'): instance = cls.get(name) if instance: return instance instance = class_.__new__(class_, *args, **kwargs) instance.__init__(*args, **kwargs) return instance
class method to create object of a given class
def key_pair_paths(key_name): public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name)) private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name)) return public_key_path, private_key_path
Returns public and private key paths for a given key_name.
def start_numbered_list(self): self._ordered = True self.start_container(List, stylename='_numbered_list') self.set_next_paragraph_style('numbered-list-paragraph' if self._item_level <= 0 else 'sublist-paragraph')
Start a numbered list.
def __add_prop(self, key, admin=False): def getter(self): return self.config[key] def setter(self, val): if admin and not self.admin: raise RuntimeError( f"You can't set the {key} key without mod privileges" ) self.__set_config_value(self.config.get_real_key(key), val) setattr(self.__class__, key, property(getter, setter))
Add gettable and settable room config property during runtime
def deflate(f, *args, **kwargs): data = f(*args, **kwargs) if isinstance(data, Response): content = data.data else: content = data deflater = zlib.compressobj() deflated_data = deflater.compress(content) deflated_data += deflater.flush() if isinstance(data, Response): data.data = deflated_data data.headers['Content-Encoding'] = 'deflate' data.headers['Content-Length'] = str(len(data.data)) return data return deflated_data
Deflate Flask Response Decorator.
def nodes_gcp(c_obj): gcp_nodes = [] try: gcp_nodes = c_obj.list_nodes(ex_use_disk_cache=True) except BaseHTTPError as e: abort_err("\r HTTP Error with GCP: {}".format(e)) gcp_nodes = adj_nodes_gcp(gcp_nodes) return gcp_nodes
Get node objects from GCP.
def _makeJobGraphs(self, jobGraph, jobStore): jobsToJobGraphs = {self:jobGraph} for successors in (self._followOns, self._children): jobs = [successor._makeJobGraphs2(jobStore, jobsToJobGraphs) for successor in successors] jobGraph.stack.append(jobs) return jobsToJobGraphs
Creates a jobGraph for each job in the job graph, recursively.
def fields(self): return (self.attributes.values() + self.lists.values() + self.references.values())
Returns the list of field names of the model.
def dfbool2intervals(df,colbool): df.index=range(len(df)) intervals=bools2intervals(df[colbool]) for intervali,interval in enumerate(intervals): df.loc[interval[0]:interval[1],f'{colbool} interval id']=intervali df.loc[interval[0]:interval[1],f'{colbool} interval start']=interval[0] df.loc[interval[0]:interval[1],f'{colbool} interval stop']=interval[1] df.loc[interval[0]:interval[1],f'{colbool} interval length']=interval[1]-interval[0]+1 df.loc[interval[0]:interval[1],f'{colbool} interval within index']=range(interval[1]-interval[0]+1) df[f'{colbool} interval index']=df.index return df
ds contains bool values
def _send_err(self, msg, errName, errMsg): r = message.ErrorMessage( errName, msg.serial, body=[errMsg], signature='s', destination=msg.sender, ) self.conn.sendMessage(r)
Helper method for sending error messages
def validate(self): self.phase = PHASE.VALIDATE self.logger.info("Validating %s..." % self.namespace) self.instantiate_features() context_dict = {} if self.target: for s in self.target.formula_sections(): context_dict["%s:root_dir" % s] = self.directory.install_directory(s) context_dict['config:root_dir'] = self.directory.root_dir context_dict['config:node'] = system.NODE self.target.add_additional_context(context_dict) for feature in self.features.run_order: self.run_action(feature, 'validate', run_if_error=True)
Validate the target environment
def get(self, url): self._query() return Enclosure(self._resp.get(url), url)
Get the response for the given enclosure URL
def _add_backend(self, backend): md_type = backend.verbose_name base = backend().get_model(self) new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ } new_md_meta = {} new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type) new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type) new_md_meta['unique_together'] = base._meta.unique_together new_md_attrs['Meta'] = type("Meta", (), new_md_meta) new_md_attrs['_metadata_type'] = backend.name model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy()) self.models[backend.name] = model globals()[model.__name__] = model
Builds a subclass model for the given backend
def move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum): for iattr, curattr in enumerate(point): cl_attr_sum[to_clust][iattr] += curattr cl_attr_sum[from_clust][iattr] -= curattr cl_memb_sum[to_clust] += 1 cl_memb_sum[from_clust] -= 1 return cl_attr_sum, cl_memb_sum
Move point between clusters, numerical attributes.
def run_server(conn, command, sock_path, debug, timeout): ret = 0 try: handler = protocol.Handler(conn=conn, debug=debug) with serve(handler=handler, sock_path=sock_path, timeout=timeout) as env: if command: ret = server.run_process(command=command, environ=env) else: signal.pause() except KeyboardInterrupt: log.info('server stopped') return ret
Common code for run_agent and run_git below.
def Encrypt(self, data, iv=None): if iv is None: iv = rdf_crypto.EncryptionKey.GenerateKey(length=128) cipher = rdf_crypto.AES128CBCCipher(self.cipher.key, iv) return iv, cipher.Encrypt(data)
Symmetrically encrypt the data using the optional iv.
def replace_strings_in_list(array_of_strigs, replace_with_strings): "A value in replace_with_strings can be either single string or list of strings" potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs] return list(flatten(potentially_nested_list))
A value in replace_with_strings can be either single string or list of strings
def power_on(env, identifier): mgr = SoftLayer.HardwareManager(env.client) hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware') env.client['Hardware_Server'].powerOn(id=hw_id)
Power on a server.
def write_text(_command, txt_file): command = _command.strip() with open(txt_file, 'w') as txt: txt.writelines(command)
Dump SQL command to a text file.
def find_balance_index(source, start='{', end='}'): state = 1 for index, char in enumerate(source): if char == start: state += 1 elif char == end: state -= 1 if state == 0: return index raise RuntimeError('This should not happen: Balance Not Found')
Get the first balance index.
def super_lm_moe(): hparams = super_lm_base() hparams.layers = ( ("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d") hparams.moe_num_experts = 32 hparams.moe_hidden_sizes = "1024" return hparams
Add mixture of experts with ~1B params.
def _unpack_images(self, rdata): image = rdata.get('image') originalimage = rdata.get('originalimage') thumbnail = rdata.get('thumbnail') if image or originalimage or thumbnail: if 'image' not in self.data: self.data['image'] = [] def file_url(info): if 'source' in info: info['url'] = info['source'] info['file'] = info['source'].split('/')[-1] del info['source'] return info if image: img = {'kind': 'restbase-image'} img.update(image) self.data['image'].append(file_url(img)) if originalimage: img = {'kind': 'restbase-original'} img.update(originalimage) self.data['image'].append(file_url(img)) if thumbnail: img = {'kind': 'restbase-thumb'} img.update(thumbnail) self.data['image'].append(file_url(img))
Set image data from RESTBase response
def fit(self, X, y): self._data = X self._classes = np.unique(y) self._labels = y self._is_fitted = True
Fit the model using X as training data and y as target values
def key_description(self): "Return a description of the key" vk, scan, flags = self._get_key_info() desc = '' if vk: if vk in CODE_NAMES: desc = CODE_NAMES[vk] else: desc = "VK %d"% vk else: desc = "%s"% self.key return desc
Return a description of the key
def parse(self, string, parent): result = {} for member in self.RE_MEMBERS.finditer(string): mems = self._process_member(member, parent, string) for onemem in mems: result[onemem.name.lower()] = onemem return result
Parses all the value code elements from the specified string.
def used_states(self): 'a list of the used states in the order they appear' c = itertools.count() canonical_ids = collections.defaultdict(lambda: next(c)) for s in self.states_list: for state in s.stateseq: canonical_ids[state] return list(map(operator.itemgetter(0), sorted(canonical_ids.items(),key=operator.itemgetter(1))))
a list of the used states in the order they appear
def send(self, s, end=os.linesep, signal=False): if self.blocking: raise RuntimeError('send can only be used on non-blocking commands.') if not signal: if self._uses_subprocess: return self.subprocess.communicate(s + end) else: return self.subprocess.send(s + end) else: self.subprocess.send_signal(s)
Sends the given string or signal to std_in.
def fetch_releases(self, package_name): package_name = self.source.normalize_package_name(package_name) releases = self.source.get_package_versions(package_name) releases_with_index_url = [(item, self.index_url) for item in releases] return package_name, releases_with_index_url
Fetch package and index_url for a package_name.
def _metric_value(value_str, metric_type): if metric_type in (int, float): try: return metric_type(value_str) except ValueError: raise ValueError("Invalid {} metric value: {!r}". format(metric_type.__class__.__name__, value_str)) elif metric_type is six.text_type: return value_str.strip('"').encode('utf-8').decode('unicode_escape') else: assert metric_type is bool lower_str = value_str.lower() if lower_str == 'true': return True elif lower_str == 'false': return False else: raise ValueError("Invalid boolean metric value: {!r}". format(value_str))
Return a Python-typed metric value from a metric value string.
def load_data_table(file, index_col, show_progress=False): with ZipFile(file) as zip_file: file_names = zip_file.namelist() assert len(file_names) == 1, "Expected a single file from Quandl." wiki_prices = file_names.pop() with zip_file.open(wiki_prices) as table_file: if show_progress: log.info('Parsing raw data.') data_table = pd.read_csv( table_file, parse_dates=['date'], index_col=index_col, usecols=[ 'ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'ex-dividend', 'split_ratio', ], ) data_table.rename( columns={ 'ticker': 'symbol', 'ex-dividend': 'ex_dividend', }, inplace=True, copy=False, ) return data_table
Load data table from zip file provided by Quandl.
def orders(self): return [order_cmd for order_cmd in dir(self.handler) if getattr(getattr(self.handler, order_cmd), "bot_order", False)]
Return method tagged "order" in the handler.
def contribute_error_pages(self): static_dir = self.settings.STATIC_ROOT if not static_dir: import tempfile static_dir = os.path.join(tempfile.gettempdir(), self.project_name) self.settings.STATIC_ROOT = static_dir self.section.routing.set_error_pages( common_prefix=os.path.join(static_dir, 'uwsgify'))
Contributes generic static error massage pages to an existing section.
def _index_classes(self) -> Dict[Text, Type[Platform]]: out = {} for p in get_platform_settings(): cls: Type[Platform] = import_class(p['class']) if 'name' in p: out[p['name']] = cls else: out[cls.NAME] = cls return out
Build a name index for all platform classes
def parse_env(s): m = ENV_RE.search(s) if m is None: return {} g1 = m.group(1) env = dict(ENV_SPLIT_RE.findall(g1)) return env
Parses the environment portion of string into a dict.
def _api_path(self, item): if self.base_url is None: raise NotImplementedError("base_url not set") path = "/".join([x.blob["id"] for x in item.path]) return "/".join([self.base_url, path])
Get the API path for the current cursor position.
def SetSelected(self, node): self.selected_node = node index = self.NodeToIndex(node) if index != -1: self.Focus(index) self.Select(index, True) return index
Set our selected node
def _revision_url(cls, rev, branch, target_platform): namespace = 'gecko.v2.mozilla-' + branch + '.revision.' + rev product = 'mobile' if 'android' in target_platform else 'firefox' return cls.URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform
Retrieve the URL for revision based builds
def load_yaml_from_docstring(docstring): split_lines = trim_docstring(docstring).split("\n") for index, line in enumerate(split_lines): line = line.strip() if line.startswith("---"): cut_from = index break else: return {} yaml_string = "\n".join(split_lines[cut_from:]) yaml_string = dedent(yaml_string) return yaml.safe_load(yaml_string) or {}
Loads YAML from docstring.
def on_window_width_value_changed(self, wscale): val = wscale.get_value() self.settings.general.set_int('window-width', int(val))
Changes the value of window_width in dconf
def hull(self): from scipy.spatial import ConvexHull if len(self.coordinates) >= 4: inds = ConvexHull(self.coordinates).vertices return self.coordinates[inds] else: return self.coordinates
Bounding polygon as a convex hull.
def _get_file_paths(cur): out = [] if isinstance(cur, (list, tuple)): for x in cur: new = _get_file_paths(x) if new: out.extend(new) elif isinstance(cur, dict): if "class" in cur: out.append(cur["path"]) else: for k, v in cur.items(): new = _get_file_paths(v) if new: out.extend(new) return out
Retrieve a list of file paths, recursively traversing the
def create_fc_template(self, out_path, out_name): fields = self.fields objectIdField = self.objectIdField geomType = self.geometryType wkid = self.parentLayer.spatialReference['wkid'] return create_feature_class(out_path, out_name, geomType, wkid, fields, objectIdField)
creates a featureclass template on local disk
def on_disconnect(self): "Called when the stream disconnects" if self._stream is not None: self._stream = None if self._buffer is not None: self._buffer.close() self._buffer = None self.encoding = None
Called when the stream disconnects
def lambert_xticks(ax, ticks): te = lambda xy: xy[0] lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te) ax.xaxis.tick_bottom() ax.set_xticks(xticks) ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
Draw ticks on the bottom x-axis of a Lambert Conformal projection.
def K_chol(self): if self._K_chol is None: self._K_chol = jitchol(self._K) return self._K_chol
Cholesky of the prior covariance K
def _from_dict(cls, _dict): args = {} if 'authors' in _dict: args['authors'] = [ Author._from_dict(x) for x in (_dict.get('authors')) ] if 'publication_date' in _dict: args['publication_date'] = _dict.get('publication_date') if 'title' in _dict: args['title'] = _dict.get('title') if 'image' in _dict: args['image'] = _dict.get('image') if 'feeds' in _dict: args['feeds'] = [Feed._from_dict(x) for x in (_dict.get('feeds'))] return cls(**args)
Initialize a AnalysisResultsMetadata object from a json dictionary.
def paint(self): self.image = Image.new(mode='RGB', size=(self.width, self.height), color=(47, 98, 135)) self.paint_pattern() self.image.save(fp=self.filename)
Saves the wallpaper as the specified filename.
def cprint(msg, reset=True, template=ColorTemplate): print(cformat(msg, reset, template))
Same as cformat but prints a string.
def user_logged_out(self, sender, request: AxesHttpRequest, user, **kwargs): clean_expired_user_attempts(request.axes_attempt_time) username = user.get_username() client_str = get_client_str(username, request.axes_ip_address, request.axes_user_agent, request.axes_path_info) log.info('AXES: Successful logout by %s.', client_str) if username and not settings.AXES_DISABLE_ACCESS_LOG: AccessLog.objects.filter( username=username, logout_time__isnull=True, ).update( logout_time=request.axes_attempt_time, )
When user logs out, update the AccessLog related to the user.
def try_recover_from_autosave(self): autosave_dir = get_conf_path('autosave') autosave_mapping = CONF.get('editor', 'autosave_mapping', {}) dialog = RecoveryDialog(autosave_dir, autosave_mapping, parent=self.editor) dialog.exec_if_nonempty() self.recover_files_to_open = dialog.files_to_open[:]
Offer to recover files from autosave.
def load_jupyter_server_extension(app): if isinstance(app.contents_manager_class, TextFileContentsManager): app.log.info("[Jupytext Server Extension] NotebookApp.contents_manager_class is " "(a subclass of) jupytext.TextFileContentsManager already - OK") return app.log.info('[Jupytext Server Extension] Changing NotebookApp.contents_manager_class ' 'from {} to jupytext.TextFileContentsManager'.format(app.contents_manager_class.__name__)) app.contents_manager_class = TextFileContentsManager try: app.contents_manager = app.contents_manager_class(parent=app, log=app.log) app.session_manager.contents_manager = app.contents_manager app.web_app.settings['contents_manager'] = app.contents_manager except Exception: app.log.error( ) raise
Use Jupytext's contents manager
def determine_type(filename): ftype = magic.from_file(filename, mime=True).decode('utf8') if ftype == 'text/plain': ftype = 'text' elif ftype == 'image/svg+xml': ftype = 'svg' else: ftype = ftype.split('/')[1] return ftype
Determine the file type and return it.
def next(self): if self.idx >= len(self.page_list): raise StopIteration() page = self.page_list[self.idx] self.idx += 1 return page
Provide the next element of the list.
def _write_cvvr(self, f, data): f.seek(0, 2) byte_loc = f.tell() cSize = len(data) block_size = CDF.CVVR_BASE_SIZE64 + cSize section_type = CDF.CVVR_ rfuA = 0 cvvr1 = bytearray(24) cvvr1[0:8] = struct.pack('>q', block_size) cvvr1[8:12] = struct.pack('>i', section_type) cvvr1[12:16] = struct.pack('>i', rfuA) cvvr1[16:24] = struct.pack('>q', cSize) f.write(cvvr1) f.write(data) return byte_loc
Write compressed "data" variable to the end of the file in a CVVR
def deep_copy(self): c = KalmanState(self.observation_matrix, self.translation_matrix) c.state_vec = self.state_vec.copy() c.state_cov = self.state_cov.copy() c.noise_var = self.noise_var.copy() c.state_noise = self.state_noise.copy() c.state_noise_idx = self.state_noise_idx.copy() return c
Return a deep copy of the state
def begin_pending_transactions(self): while not self._pending_sessions.empty(): session = self._pending_sessions.get() session._transaction.begin() super(TransactionPingingPool, self).put(session)
Begin all transactions for sessions added to the pool.
def delete_metric(self, slug): prefix = "m:{0}:*".format(slug) keys = self.r.keys(prefix) self.r.delete(*keys) self.r.srem(self._metric_slugs_key, slug)
Removes all keys for the given ``slug``.
def session(connection_string=None): global _session_makers connection_string = connection_string or oz.settings["db"] if not connection_string in _session_makers: _session_makers[connection_string] = sessionmaker(bind=engine(connection_string=connection_string)) return _session_makers[connection_string]()
Gets a SQLAlchemy session
def validate(self, value): try: if value: v = float(value) if (v != 0 and v < self.fmin) or v > self.fmax: return None if abs(round(100000*v)-100000*v) > 1.e-12: return None return value except ValueError: return None
This prevents setting any value more precise than 0.00001
def json_to_file(data, filename, pretty=False): kwargs = dict(indent=4) if pretty else {} dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) dump = json.dumps(api.__schema__, **kwargs) with open(filename, 'wb') as f: f.write(dump.encode('utf-8'))
Dump JSON data to a file
def on_save(self, event): dlg = wx.FileDialog(None, self.settings.get_title(), '', "", '*.*', wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if dlg.ShowModal() == wx.ID_OK: self.settings.save(dlg.GetPath())
called on save button
def _get_bootstrap_content(directory='.'): try: with salt.utils.files.fopen(os.path.join( os.path.abspath(directory), 'bootstrap.py')) as fic: oldcontent = salt.utils.stringutils.to_unicode( fic.read() ) except (OSError, IOError): oldcontent = '' return oldcontent
Get the current bootstrap.py script content
def _sort_cards(self, cards: Generator) -> list: return sorted([card.__dict__ for card in cards], key=itemgetter('blocknum', 'blockseq', 'cardseq'))
sort cards by blocknum and blockseq
def escapePathForShell(path): if platform.system() == 'Windows': return '"{}"'.format(path.replace('"', '""')) else: return shellescape.quote(path)
Escapes a filesystem path for use as a command-line argument
def getGroups(self, proteinId): return [self.groups[gId] for gId in self._proteinToGroupIds[proteinId]]
Return a list of protein groups a protein is associated with.
def parse_time(time): unit = time[-1] if unit not in ['s', 'm', 'h', 'd']: print_error('the unit of time could only from {s, m, h, d}') exit(1) time = time[:-1] if not time.isdigit(): print_error('time format error!') exit(1) parse_dict = {'s':1, 'm':60, 'h':3600, 'd':86400} return int(time) * parse_dict[unit]
Change the time to seconds