code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def _trackInstanceAndCheckForConcurrencyViolation(self): global g_max_concurrency, g_max_concurrency_raise_exception assert g_max_concurrency is not None assert self not in self._clsOutstandingInstances, repr(self) self._creationTracebackString = traceback.format_stack() if self._clsNumOutstanding >= g_max_concurrency: errorMsg = ("With numOutstanding=%r, exceeded concurrency limit=%r " "when requesting %r. OTHER TRACKED UNRELEASED " "INSTANCES (%s): %r") % ( self._clsNumOutstanding, g_max_concurrency, self, len(self._clsOutstandingInstances), self._clsOutstandingInstances,) self._logger.error(errorMsg) if g_max_concurrency_raise_exception: raise ConcurrencyExceededError(errorMsg) self._clsOutstandingInstances.add(self) self._addedToInstanceSet = True return
Check for concurrency violation and add self to _clsOutstandingInstances. ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is incremented
def filter_dict(d, exclude): ret = {} for key, value in d.items(): if key not in exclude: ret.update({key: value}) return ret
Return a new dict with specified keys excluded from the origional dict Args: d (dict): origional dict exclude (list): The keys that are excluded
def get_sampled(data, totn, node): names = sorted(totn) cdict = {name: idx for idx, name in enumerate(names)} if (node.is_leaf() or node.is_root()): return 0 else: if len(node.children) > 2: down_r = node.children[0] down_l = node.children[1] for child in node.children[2:]: down_l += child else: down_r, down_l = node.children lendr = set(cdict[i] for i in down_r.get_leaf_names()) lendl = set(cdict[i] for i in down_l.get_leaf_names()) up_r = node.get_sisters()[0] lenur = set(cdict[i] for i in up_r.get_leaf_names()) lenul = set(cdict[i] for i in totn) - set.union(lendr, lendl, lenur) idx = 0 sampled = 0 with h5py.File(data.database.output, 'r') as io5: end = io5["quartets"].shape[0] while 1: if idx >= end: break qrts = io5["quartets"][idx:idx+data._chunksize] for qrt in qrts: sqrt = set(qrt) if all([sqrt.intersection(i) for i in [lendr, lendl, lenur, lenul]]): sampled += 1 idx += data._chunksize return sampled
get total number of quartets sampled for a split
def _get_resource(self, url, data_key=None): headers = {"Accept": "application/json"} if self.token: headers["W-Token"] = "%s" % self.token response = WhenIWork_DAO().getURL(url, headers) if response.status != 200: raise DataFailureException(url, response.status, response.data) return json.loads(response.data)
When I Work GET method. Return representation of the requested resource.
def appendRecord(self, record): assert self._file is not None assert self._mode == self._FILE_WRITE_MODE assert isinstance(record, (list, tuple)), \ "unexpected record type: " + repr(type(record)) assert len(record) == self._fieldCount, \ "len(record): %s, fieldCount: %s" % (len(record), self._fieldCount) if self._recordCount == 0: names, types, specials = zip(*self.getFields()) for line in names, types, specials: self._writer.writerow(line) self._updateSequenceInfo(record) line = [self._adapters[i](f) for i, f in enumerate(record)] self._writer.writerow(line) self._recordCount += 1
Saves the record in the underlying csv file. :param record: a list of Python objects that will be string-ified
def _warning(code): if isinstance(code, str): return code message = '' if isinstance(code, tuple): if isinstance(code[0], str): message = code[1] code = code[0] return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message
Return a warning message of code 'code'. If code = (cd, str) it returns the warning message of code 'cd' and appends str at the end
def format_objects(objects, children=False, columns=None, header=True): columns = columns or ('NAME', 'TYPE', 'PATH') objects = sorted(objects, key=_type_and_name) data = [] for obj in objects: if isinstance(obj, cpenv.VirtualEnvironment): data.append(get_info(obj)) modules = obj.get_modules() if children and modules: for mod in modules: data.append(get_info(mod, indent=2, root=obj.path)) else: data.append(get_info(obj)) maxes = [len(max(col, key=len)) for col in zip(*data)] tmpl = '{:%d} {:%d} {:%d}' % tuple(maxes) lines = [] if header: lines.append('\n' + bold_blue(tmpl.format(*columns))) for obj_data in data: lines.append(tmpl.format(*obj_data)) return '\n'.join(lines)
Format a list of environments and modules for terminal output
def code_challenge(verifier): digest = hashlib.sha256(verifier).digest() return base64.urlsafe_b64encode(digest).rstrip(b'=')
Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding.
def write_config(self): with open(self.config_file, "w") as config_file: self.cfg.write(config_file)
Writes `self.cfg` to `self.config_file`.
def get(self, key, default=None): if self.in_memory: return self._memory_db.get(key, default) else: db = self._read_file() return db.get(key, default)
Get key value, return default if key doesn't exist
def popUpItem(self, *args): self.Press() time.sleep(.5) return self._menuItem(self, *args)
Return the specified item in a pop up menu.
def distribute_package(roles, cl_args): Log.info("Distributing heron package to nodes (this might take a while)...") masters = roles[Role.MASTERS] slaves = roles[Role.SLAVES] tar_file = tempfile.NamedTemporaryFile(suffix=".tmp").name Log.debug("TAR file %s to %s" % (cl_args["heron_dir"], tar_file)) make_tarfile(tar_file, cl_args["heron_dir"]) dist_nodes = masters.union(slaves) scp_package(tar_file, dist_nodes, cl_args)
distribute Heron packages to all nodes
def window_at(self, geom, window_shape): y_size, x_size = window_shape[0], window_shape[1] bounds = box(*geom.bounds) px = ops.transform(self.__geo_transform__.rev, bounds).centroid miny, maxy = int(px.y - y_size/2), int(px.y + y_size/2) minx, maxx = int(px.x - x_size/2), int(px.x + x_size/2) _, y_max, x_max = self.shape if minx < 0 or miny < 0 or maxx > x_max or maxy > y_max: raise ValueError("Input geometry resulted in a window outside of the image") return self[:, miny:maxy, minx:maxx]
Return a subsetted window of a given size, centered on a geometry object Useful for generating training sets from vector training data Will throw a ValueError if the window is not within the image bounds Args: geom (shapely,geometry): Geometry to center the image on window_shape (tuple): The desired shape of the image as (height, width) in pixels. Returns: image: image object of same type
def build_pipeline(cls, project, zones, min_cores, min_ram, disk_size, boot_disk_size, preemptible, accelerator_type, accelerator_count, image, script_name, envs, inputs, outputs, pipeline_name): if min_cores is None: min_cores = job_model.DEFAULT_MIN_CORES if min_ram is None: min_ram = job_model.DEFAULT_MIN_RAM if disk_size is None: disk_size = job_model.DEFAULT_DISK_SIZE if boot_disk_size is None: boot_disk_size = job_model.DEFAULT_BOOT_DISK_SIZE if preemptible is None: preemptible = job_model.DEFAULT_PREEMPTIBLE docker_command = cls._build_pipeline_docker_command(script_name, inputs, outputs, envs) input_envs = [{ 'name': SCRIPT_VARNAME }] + [{ 'name': env.name } for env in envs if env.value] input_files = [ cls._build_pipeline_input_file_param(var.name, var.docker_path) for var in inputs if not var.recursive and var.value ] output_files = [ cls._build_pipeline_file_param(var.name, var.docker_path) for var in outputs if not var.recursive and var.value ] return { 'ephemeralPipeline': { 'projectId': project, 'name': pipeline_name, 'resources': { 'minimumCpuCores': min_cores, 'minimumRamGb': min_ram, 'bootDiskSizeGb': boot_disk_size, 'preemptible': preemptible, 'zones': google_base.get_zones(zones), 'acceleratorType': accelerator_type, 'acceleratorCount': accelerator_count, 'disks': [{ 'name': 'datadisk', 'autoDelete': True, 'sizeGb': disk_size, 'mountPoint': providers_util.DATA_MOUNT_POINT, }], }, 'inputParameters': input_envs + input_files, 'outputParameters': output_files, 'docker': { 'imageName': image, 'cmd': docker_command, } } }
Builds a pipeline configuration for execution. Args: project: string name of project. zones: list of zone names for jobs to be run at. min_cores: int number of CPU cores required per job. min_ram: int GB of RAM required per job. disk_size: int GB of disk to attach under /mnt/data. boot_disk_size: int GB of disk for boot. preemptible: use a preemptible VM for the job accelerator_type: string GCE defined accelerator type. accelerator_count: int number of accelerators of the specified type to attach. image: string Docker image name in which to run. script_name: file name of the script to run. envs: list of EnvParam objects specifying environment variables to set within each job. inputs: list of FileParam objects specifying input variables to set within each job. outputs: list of FileParam objects specifying output variables to set within each job. pipeline_name: string name of pipeline. Returns: A nested dictionary with one entry under the key ephemeralPipeline containing the pipeline configuration.
def add_record_length_check(self, code=RECORD_LENGTH_CHECK_FAILED, message=MESSAGES[RECORD_LENGTH_CHECK_FAILED], modulus=1): t = code, message, modulus self._record_length_checks.append(t)
Add a record length check, i.e., check whether the length of a record is consistent with the number of expected fields. Arguments --------- `code` - problem code to report if a record is not valid, defaults to `RECORD_LENGTH_CHECK_FAILED` `message` - problem message to report if a record is not valid `modulus` - apply the check to every nth record, defaults to 1 (check every record)
def set_directory(path=None): old_path = get_directory() terminate_server() cache.clear() if path: cache['language_check_dir'] = path try: get_jar_info() except Error: cache['language_check_dir'] = old_path raise
Set LanguageTool directory.
def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False): ndet = len(fmags) if ndet > 9: medmag = npmedian(fmags) delta_prefactor = (ndet/(ndet - 1)) sigma_i = delta_prefactor*(fmags - medmag)/ferrs sigma_j = nproll(sigma_i,1) if weightbytimediff: difft = npdiff(ftimes) deltat = npmedian(difft) weights_i = npexp(- difft/deltat ) products = (weights_i*sigma_i[1:]*sigma_j[1:]) else: products = (sigma_i*sigma_j)[1:] stetsonj = ( npsum(npsign(products) * npsqrt(npabs(products))) ) / ndet return stetsonj else: LOGERROR('not enough detections in this magseries ' 'to calculate stetson J index') return npnan
This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index.
def dumps(obj, startindex=1, separator=DEFAULT, index_separator=DEFAULT): try: firstkey = next(iter(obj.keys())) except StopIteration: return str() if isinstance(firstkey, six.text_type): io = StringIO() else: io = BytesIO() dump( obj=obj, fp=io, startindex=startindex, separator=separator, index_separator=index_separator, ) return io.getvalue()
Dump an object in req format to a string. :param Mapping obj: The object to serialize. Must have a keys method. :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types. :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types.
def SCAS(cpu, dest, src): dest_reg = dest.reg mem_reg = src.mem.base size = dest.size arg0 = dest.read() arg1 = src.read() res = arg0 - arg1 cpu._calculate_CMP_flags(size, res, arg0, arg1) increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8) cpu.write_register(mem_reg, cpu.read_register(mem_reg) + increment)
Scans String. Compares the byte, word, or double word specified with the memory operand with the value in the AL, AX, EAX, or RAX register, and sets the status flags according to the results. The memory operand address is read from either the ES:RDI, ES:EDI or the ES:DI registers (depending on the address-size attribute of the instruction, 32 or 16, respectively):: IF (byte comparison) THEN temp = AL - SRC; SetStatusFlags(temp); THEN IF DF = 0 THEN (E)DI = (E)DI + 1; ELSE (E)DI = (E)DI - 1; FI; ELSE IF (word comparison) THEN temp = AX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 2; ELSE (E)DI = (E)DI - 2; FI; ELSE (* doubleword comparison *) temp = EAX - SRC; SetStatusFlags(temp) THEN IF DF = 0 THEN (E)DI = (E)DI + 4; ELSE (E)DI = (E)DI - 4; FI; FI; FI; :param cpu: current CPU. :param dest: destination operand. :param src: source operand.
def add_mip_obj(model): if len(model.variables) > 1e4: LOGGER.warning("the MIP version of minimal media is extremely slow for" " models that large :(") exchange_rxns = find_boundary_types(model, "exchange") big_m = max(abs(b) for r in exchange_rxns for b in r.bounds) prob = model.problem coefs = {} to_add = [] for rxn in exchange_rxns: export = len(rxn.reactants) == 1 indicator = prob.Variable("ind_" + rxn.id, lb=0, ub=1, type="binary") if export: vrv = rxn.reverse_variable indicator_const = prob.Constraint( vrv - indicator * big_m, ub=0, name="ind_constraint_" + rxn.id) else: vfw = rxn.forward_variable indicator_const = prob.Constraint( vfw - indicator * big_m, ub=0, name="ind_constraint_" + rxn.id) to_add.extend([indicator, indicator_const]) coefs[indicator] = 1 model.add_cons_vars(to_add) model.solver.update() model.objective.set_linear_coefficients(coefs) model.objective.direction = "min"
Add a mixed-integer version of a minimal medium to the model. Changes the optimization objective to finding the medium with the least components:: minimize size(R) where R part of import_reactions Arguments --------- model : cobra.model The model to modify.
def create_query(self, attr): field = attr[0] operator = attr[1] value = attr[2] model = self.model if '.' in field: field_items = field.split('.') field_name = getattr(model, field_items[0], None) class_name = field_name.property.mapper.class_ new_model = getattr(class_name, field_items[1]) return field_name.has(OPERATORS[operator](new_model, value)) return OPERATORS[operator](getattr(model, field, None), value)
Mix all values and make the query
def _parse_notes_dict(sbase): notes = sbase.getNotesString() if notes and len(notes) > 0: pattern = r"<p>\s*(\w+\s*\w*)\s*:\s*([\w|\s]+)<" matches = re.findall(pattern, notes) d = {k.strip(): v.strip() for (k, v) in matches} return {k: v for k, v in d.items() if len(v) > 0} else: return {}
Creates dictionary of COBRA notes. Parameters ---------- sbase : libsbml.SBase Returns ------- dict of notes
def model_to_pymatbridge(model, variable_name="model", matlab=None): if scipy_sparse is None: raise ImportError("`model_to_pymatbridge` requires scipy!") if matlab is None: from IPython import get_ipython matlab = get_ipython().magics_manager.registry["MatlabMagics"].Matlab model_info = create_mat_dict(model) S = model_info["S"].todok() model_info["S"] = 0 temp_S_name = "cobra_pymatbridge_temp_" + uuid4().hex _check(matlab.set_variable(variable_name, model_info)) _check(matlab.set_variable(temp_S_name, S)) _check(matlab.run_code("%s.S = %s;" % (variable_name, temp_S_name))) for i in model_info.keys(): if i == "S": continue _check(matlab.run_code("{0}.{1} = {0}.{1}';".format(variable_name, i))) _check(matlab.run_code("clear %s;" % temp_S_name))
send the model to a MATLAB workspace through pymatbridge This model can then be manipulated through the COBRA toolbox Parameters ---------- variable_name : str The variable name to which the model will be assigned in the MATLAB workspace matlab : None or pymatbridge.Matlab instance The MATLAB workspace to which the variable will be sent. If this is None, then this will be sent to the same environment used in IPython magics.
def create(self): query = ( ).format(self.__tablename__, self.__key__, self.__value__) connection = sqlite3.connect(self.sqlite_file) cursor = connection.cursor() cursor.execute(query) connection.commit()
Create the new table in the SQLite database
def with_revision(self, label, number): t = self.clone() t.revision = Revision(label, number) return t
Returns a Tag with a given revision
def queries_map(): qs = _all_metric_queries() return dict(zip(qs[0], qs[1]) + zip(qs[2], qs[3]))
map from query parameter to query name
def run(self, clock): if clock.timestep_ix >= self.period_count: return for c in self.components: c.run(clock, self.gl) self._perform_year_end_procedure(clock)
Execute the entity at the current clock cycle. :param clock: The clock containing the current execution time and period information.
def verifyscrollbarhorizontal(self, window_name, object_name): try: object_handle = self._get_object_handle(window_name, object_name) if object_handle.AXOrientation == "AXHorizontalOrientation": return 1 except: pass return 0
Verify scrollbar is horizontal @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
async def set_group_link_sharing_enabled( self, set_group_link_sharing_enabled_request ): response = hangouts_pb2.SetGroupLinkSharingEnabledResponse() await self._pb_request('conversations/setgrouplinksharingenabled', set_group_link_sharing_enabled_request, response) return response
Set whether group link sharing is enabled for a conversation.
def get_volume(self, datacenter_id, volume_id): response = self._perform_request( '/datacenters/%s/volumes/%s' % (datacenter_id, volume_id)) return response
Retrieves a single volume by ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume_id: The unique ID of the volume. :type volume_id: ``str``
def get(self, key): node = self.get_node(key) if node is None: raise KeyError('No object named %s in the file' % key) if hasattr(node, 'attrs'): if 'pandas_type' in node.attrs: return self._read_group(node) return self._read_array(node)
Retrieve pandas object or group of Numpy ndarrays stored in file Parameters ---------- key : object Returns ------- obj : type of object stored in file
def _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding=False): flags = 0 if rsa_oaep_padding: flags = Advapi32Const.CRYPT_OAEP ciphertext = ciphertext[::-1] buffer = buffer_from_bytes(ciphertext) out_len = new(advapi32, 'DWORD *', len(ciphertext)) res = advapi32.CryptDecrypt( private_key.ex_key_handle, null(), True, flags, buffer, out_len ) handle_error(res) return bytes_from_buffer(buffer, deref(out_len))
Encrypts a value using an RSA private key via CryptoAPI :param private_key: A PrivateKey instance to decrypt with :param ciphertext: A byte string of the data to decrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext
def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
Custom version of splitext that doesn't perform splitext on directories
def fix_js_args(func): fcode = six.get_function_code(func) fargs = fcode.co_varnames[fcode.co_argcount - 2:fcode.co_argcount] if fargs == ('this', 'arguments') or fargs == ('arguments', 'var'): return func code = append_arguments(six.get_function_code(func), ('this', 'arguments')) return types.FunctionType( code, six.get_function_globals(func), func.__name__, closure=six.get_function_closure(func))
Use this function when unsure whether func takes this and arguments as its last 2 args. It will append 2 args if it does not.
def fromdict(cls, config, check_fields=True): m = super(Config, cls).__new__(cls) m.path = '.' m.verbose = False m.config = m._merge_defaults(config) if check_fields: m._check_fields() return m
Create a Config object from config dict directly.
def check(self): self._validate_settings() r = self.local_renderer r.env.alias = r.env.aliases[0] r.sudo(r.env.check_command_template)
Run inadyn from the commandline to test the configuration. To be run like: fab role inadyn.check
def root_path(): module_dir = os.path.dirname(globals()['__file__']) return os.path.dirname(os.path.dirname(module_dir))
Get the absolute path to the root of the demosys package
def close_cursor(self, handle): if handle in self.cursors: self.cursors[handle].close() else: raise KeyError('cursor with handle %s was not found' % handle)
Closes the cursor specified and removes it from the `self.cursors` dictionary.
def get_current_course_run(course, users_active_course_runs): current_course_run = None filtered_course_runs = [] all_course_runs = course['course_runs'] if users_active_course_runs: current_course_run = get_closest_course_run(users_active_course_runs) else: for course_run in all_course_runs: if is_course_run_enrollable(course_run) and is_course_run_upgradeable(course_run): filtered_course_runs.append(course_run) if not filtered_course_runs: filtered_course_runs = all_course_runs if filtered_course_runs: current_course_run = get_closest_course_run(filtered_course_runs) return current_course_run
Return the current course run on the following conditions. - If user has active course runs (already enrolled) then return course run with closest start date Otherwise it will check the following logic: - Course run is enrollable (see is_course_run_enrollable) - Course run has a verified seat and the upgrade deadline has not expired. - Course run start date is closer to now than any other enrollable/upgradeable course runs. - If no enrollable/upgradeable course runs, return course run with most recent start date.
def delete_async(self, url, name, callback=None, params=None, headers=None): if not name: name = '' params = params or {} headers = headers or {} endpoint = self._build_endpoint_url(url, name) self._authenticate(params, headers) process_pool.apply_async(make_delete_request, args=(endpoint, params, headers), callback=callback)
Asynchronous DELETE request with the process pool.
def resources(ctx, gpu): user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job')) try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().job.resources(user, project_name, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)
Get job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon job -j 2 resources ``` For GPU resources \b ```bash $ polyaxon job -j 2 resources --gpu ```
def check_and_load_ssh_auth(): mac_username = get_config_value(constants.CONFIG_MAC_USERNAME_KEY) if not mac_username: logging.info("Can't setup ssh authorization; no mac_username specified") return if not _running_on_mac(): logging.info("Skipping SSH load, we are not running on Mac") return if _mac_version_is_post_yosemite(): _load_ssh_auth_post_yosemite(mac_username) else: _load_ssh_auth_pre_yosemite()
Will check the mac_username config value; if it is present, will load that user's SSH_AUTH_SOCK environment variable to the current environment. This allows git clones to behave the same for the daemon as they do for the user
def delete(path, verbose=False): if not os.path.exists(path): if os.path.islink(path): if verbose: print('Deleting broken link="{}"'.format(path)) os.unlink(path) elif os.path.isdir(path): if verbose: print('Deleting broken directory link="{}"'.format(path)) os.rmdir(path) elif os.path.isfile(path): if verbose: print('Deleting broken file link="{}"'.format(path)) os.unlink(path) else: if verbose: print('Not deleting non-existant path="{}"'.format(path)) else: if os.path.islink(path): if verbose: print('Deleting symbolic link="{}"'.format(path)) os.unlink(path) elif os.path.isfile(path): if verbose: print('Deleting file="{}"'.format(path)) os.unlink(path) elif os.path.isdir(path): if verbose: print('Deleting directory="{}"'.format(path)) if sys.platform.startswith('win32'): from ubelt import _win32_links _win32_links._win32_rmtree(path, verbose=verbose) else: import shutil shutil.rmtree(path)
Removes a file or recursively removes a directory. If a path does not exist, then this is does nothing. Args: path (PathLike): file or directory to remove verbose (bool): if True prints what is being done SeeAlso: send2trash - A cross-platform Python package for sending files to the trash instead of irreversibly deleting them. https://github.com/hsoft/send2trash Doctest: >>> import ubelt as ub >>> base = ub.ensure_app_cache_dir('ubelt', 'delete_test') >>> dpath1 = ub.ensuredir(join(base, 'dir')) >>> ub.ensuredir(join(base, 'dir', 'subdir')) >>> ub.touch(join(base, 'dir', 'to_remove1.txt')) >>> fpath1 = join(base, 'dir', 'subdir', 'to_remove3.txt') >>> fpath2 = join(base, 'dir', 'subdir', 'to_remove2.txt') >>> ub.touch(fpath1) >>> ub.touch(fpath2) >>> assert all(map(exists, (dpath1, fpath1, fpath2))) >>> ub.delete(fpath1) >>> assert all(map(exists, (dpath1, fpath2))) >>> assert not exists(fpath1) >>> ub.delete(dpath1) >>> assert not any(map(exists, (dpath1, fpath1, fpath2))) Doctest: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt', 'delete_test2') >>> dpath1 = ub.ensuredir(join(dpath, 'dir')) >>> fpath1 = ub.touch(join(dpath1, 'to_remove.txt')) >>> assert exists(fpath1) >>> ub.delete(dpath) >>> assert not exists(fpath1)
def get_subnets(context, limit=None, page_reverse=False, sorts=['id'], marker=None, filters=None, fields=None): LOG.info("get_subnets for tenant %s with filters %s fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} subnets = db_api.subnet_find(context, limit=limit, page_reverse=page_reverse, sorts=sorts, marker_obj=marker, join_dns=True, join_routes=True, join_pool=True, **filters) for subnet in subnets: cache = subnet.get("_allocation_pool_cache") if not cache: db_api.subnet_update_set_alloc_pool_cache( context, subnet, subnet.allocation_pools) return v._make_subnets_list(subnets, fields=fields)
Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
def sell(self, item_id, bid, buy_now, duration=3600, fast=False): method = 'POST' url = 'auctionhouse' data = {'buyNowPrice': buy_now, 'startingBid': bid, 'duration': duration, 'itemData': {'id': item_id}} rc = self.__request__(method, url, data=json.dumps(data), params={'sku_b': self.sku_b}) if not fast: self.tradeStatus(rc['id']) return rc['id']
Start auction. Returns trade_id. :params item_id: Item id. :params bid: Stard bid. :params buy_now: Buy now price. :params duration: Auction duration in seconds (Default: 3600).
def model_stoch_vol(data, samples=2000, progressbar=True): from pymc3.distributions.timeseries import GaussianRandomWalk with pm.Model() as model: nu = pm.Exponential('nu', 1. / 10, testval=5.) sigma = pm.Exponential('sigma', 1. / .02, testval=.1) s = GaussianRandomWalk('s', sigma**-2, shape=len(data)) volatility_process = pm.Deterministic('volatility_process', pm.math.exp(-2 * s)) pm.StudentT('r', nu, lam=volatility_process, observed=data) trace = pm.sample(samples, progressbar=progressbar) return model, trace
Run stochastic volatility model. This model estimates the volatility of a returns series over time. Returns are assumed to be T-distributed. lambda (width of T-distributed) is assumed to follow a random-walk. Parameters ---------- data : pandas.Series Return series to model. samples : int, optional Posterior samples to draw. Returns ------- model : pymc.Model object PyMC3 model containing all random variables. trace : pymc3.sampling.BaseTrace object A PyMC3 trace object that contains samples for each parameter of the posterior. See Also -------- plot_stoch_vol : plotting of tochastic volatility model
def getnodefor(self, name): "Return the node where the ``name`` would land to" node = self._getnodenamefor(name) return {node: self.cluster['nodes'][node]}
Return the node where the ``name`` would land to
def _fly(self, board, layers, things, the_plot): if (self.character in the_plot['bunker_hitters'] or self.character in the_plot['marauder_hitters']): return self._teleport((-1, -1)) self._north(board, the_plot)
Handles the behaviour of visible bolts flying toward Marauders.
def print_round_trip_stats(round_trips, hide_pos=False): stats = gen_round_trip_stats(round_trips) print_table(stats['summary'], float_format='{:.2f}'.format, name='Summary stats') print_table(stats['pnl'], float_format='${:.2f}'.format, name='PnL stats') print_table(stats['duration'], float_format='{:.2f}'.format, name='Duration stats') print_table(stats['returns'] * 100, float_format='{:.2f}%'.format, name='Return stats') if not hide_pos: stats['symbols'].columns = stats['symbols'].columns.map(format_asset) print_table(stats['symbols'] * 100, float_format='{:.2f}%'.format, name='Symbol stats')
Print various round-trip statistics. Tries to pretty-print tables with HTML output if run inside IPython NB. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips See also -------- round_trips.gen_round_trip_stats
def basecaller(arrayed, mindepth_majrule, mindepth_statistical, estH, estE): cons = np.zeros(arrayed.shape[1], dtype=np.uint8) cons.fill(78) arr = arrayed.view(np.uint8) for col in xrange(arr.shape[1]): carr = arr[:, col] mask = carr == 45 mask += carr == 78 marr = carr[~mask] if not marr.shape[0]: cons[col] = 78 elif np.all(marr == marr[0]): cons[col] = marr[0] else: counts = np.bincount(marr) pbase = np.argmax(counts) nump = counts[pbase] counts[pbase] = 0 qbase = np.argmax(counts) numq = counts[qbase] counts[qbase] = 0 rbase = np.argmax(counts) numr = counts[rbase] bidepth = nump + numq if bidepth < mindepth_majrule: cons[col] = 78 else: if bidepth > 500: base1 = int(500 * (nump / float(bidepth))) base2 = int(500 * (numq / float(bidepth))) else: base1 = nump base2 = numq if bidepth >= mindepth_statistical: ishet, prob = get_binom(base1, base2, estE, estH) if prob < 0.95: cons[col] = 78 else: if ishet: cons[col] = TRANS[(pbase, qbase)] else: cons[col] = pbase else: if nump == numq: cons[col] = TRANS[(pbase, qbase)] else: cons[col] = pbase return cons.view("S1")
call all sites in a locus array.
def stop_erps(self, stop_erps): _set_params(self.ode_obj, 'StopERP', stop_erps, self.ADOF + self.LDOF)
Set the ERP values for this object's DOF limits. Parameters ---------- stop_erps : float or sequence of float An ERP value to set on all degrees of freedom limits, or a list containing one such value for each degree of freedom limit.
def transfer(self, data, assert_ss=True, deassert_ss=True): if self._mosi is None: raise RuntimeError('Write attempted with no MOSI pin specified.') if self._miso is None: raise RuntimeError('Read attempted with no MISO pin specified.') if assert_ss and self._ss is not None: self._gpio.set_low(self._ss) result = bytearray(len(data)) for i in range(len(data)): for j in range(8): if self._write_shift(data[i], j) & self._mask: self._gpio.set_high(self._mosi) else: self._gpio.set_low(self._mosi) self._gpio.output(self._sclk, not self._clock_base) if self._read_leading: if self._gpio.is_high(self._miso): result[i] |= self._read_shift(self._mask, j) else: result[i] &= ~self._read_shift(self._mask, j) self._gpio.output(self._sclk, self._clock_base) if not self._read_leading: if self._gpio.is_high(self._miso): result[i] |= self._read_shift(self._mask, j) else: result[i] &= ~self._read_shift(self._mask, j) if deassert_ss and self._ss is not None: self._gpio.set_high(self._ss) return result
Full-duplex SPI read and write. If assert_ss is true, the SS line will be asserted low, the specified bytes will be clocked out the MOSI line while bytes will also be read from the MISO line, and if deassert_ss is true the SS line will be put back high. Bytes which are read will be returned as a bytearray object.
def to_str(prev, encoding=None): first = next(prev) if isinstance(first, str): if encoding is None: yield first for s in prev: yield s else: yield first.encode(encoding) for s in prev: yield s.encode(encoding) else: if encoding is None: encoding = sys.stdout.encoding or 'utf-8' yield first.decode(encoding) for s in prev: yield s.decode(encoding)
Convert data from previous pipe with specified encoding.
def reset_counter(self): self._cnt_retries = 0 for i in self._url_counter: self._url_counter[i] = 0
reset the failed connection counters
def clean(ctx): ctx.run(f'python setup.py clean') dist = ROOT.joinpath('dist') print(f'removing {dist}') shutil.rmtree(str(dist))
Clean previously built package artifacts.
def calculate(self, T, P, zs, ws, method): r if method == SIMPLE: Cplms = [i(T) for i in self.HeatCapacityLiquids] return mixing_simple(zs, Cplms) elif method == LALIBERTE: ws = list(ws) ; ws.pop(self.index_w) Cpl = Laliberte_heat_capacity(T, ws, self.wCASs) MW = mixing_simple(zs, self.MWs) return property_mass_to_molar(Cpl, MW) else: raise Exception('Method not valid')
r'''Method to calculate heat capacity of a liquid mixture at temperature `T`, pressure `P`, mole fractions `zs` and weight fractions `ws` with a given method. This method has no exception handling; see `mixture_property` for that. Parameters ---------- T : float Temperature at which to calculate the property, [K] P : float Pressure at which to calculate the property, [Pa] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Name of the method to use Returns ------- Cplm : float Molar heat capacity of the liquid mixture at the given conditions, [J/mol]
def execute(cur, *args): stmt = args[0] if len(args) > 1: stmt = stmt.replace('%', '%%').replace('?', '%r') print(stmt % (args[1])) return cur.execute(*args)
Utility function to print sqlite queries before executing. Use instead of cur.execute(). First argument is cursor. cur.execute(stmt) becomes util.execute(cur, stmt)
def awake(self, procid): logger.debug(f"Remove procid:{procid} from waitlists and reestablish it in the running list") for wait_list in self.rwait: if procid in wait_list: wait_list.remove(procid) for wait_list in self.twait: if procid in wait_list: wait_list.remove(procid) self.timers[procid] = None self.running.append(procid) if self._current is None: self._current = procid
Remove procid from waitlists and reestablish it in the running list
def is_ipv6_available(): try: socket.socket(socket.AF_INET6).close() except (socket.error, AttributeError): return False return True
Check if IPv6 is available. :Return: `True` when an IPv6 socket can be created.
def from_server(cls, server, slug, identifier): task = server.get( 'task', replacements={ 'slug': slug, 'identifier': identifier}) return cls(**task)
Retrieve a task from the server
def stop(ctx, commit, yes): user, project_name = get_project_or_local(ctx.obj.get('project')) if not yes and not click.confirm("Are sure you want to stop notebook " "for project `{}/{}`".format(user, project_name)): click.echo('Existing without stopping notebook.') sys.exit(1) if commit is None: commit = True try: PolyaxonClient().project.stop_notebook(user, project_name, commit) Printer.print_success('Notebook is being deleted') except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop notebook project `{}`.'.format(project_name)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)
Stops the notebook deployment for this project if it exists. Uses [Caching](/references/polyaxon-cli/#caching)
def list_compounds(): print('Compounds currently loaded:') for compound in sorted(compounds.keys()): phases = compounds[compound].get_phase_list() print('%s: %s' % (compound, ', '.join(phases)))
List all compounds that are currently loaded in the thermo module, and their phases.
def auto_constraints(self, component=None): if not component: for table in self.tables: self.auto_constraints(table) return if not component.tableSchema.primaryKey: idcol = component.get_column(term_uri('id')) if idcol: component.tableSchema.primaryKey = [idcol.name] self._auto_foreign_keys(component) try: table_type = self.get_tabletype(component) except ValueError: return for table in self.tables: self._auto_foreign_keys(table, component=component, table_type=table_type)
Use CLDF reference properties to implicitely create foreign key constraints. :param component: A Table object or `None`.
def _credentials_from_request(request): if (oauth2_settings.storage_model is None or request.user.is_authenticated()): return get_storage(request).get() else: return None
Gets the authorized credentials for this flow, if they exist.
def get_style(self, name, workspace=None): styles = self.get_styles(names=name, workspaces=workspace) return self._return_first_item(styles)
returns a single style object. Will return None if no style is found. Will raise an error if more than one style with the same name is found.
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message): try: if message is None: raise ValueError("message was none") encoded_message = bytes(message, "utf-8") if encoded_message is None: raise ValueError("utf-8 encoding of message failed") if domain_name: try: UDP_IP = socket.gethostbyname(domain_name) except Exception: pass if UDP_IP is None: raise Exception("UDP_IP is None") if UDP_PORT is None: raise Exception("UDP_PORT is None") sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(sock_timeout) sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT)) sock.close() except socket.timeout: logger.debug("Failed to send usage tracking data: socket timeout") except OSError as e: logger.debug("Failed to send usage tracking data: OSError: {}".format(e)) except Exception as e: logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
Send UDP messages to usage tracker asynchronously This multiprocessing based messenger was written to overcome the limitations of signalling/terminating a thread that is blocked on a system call. This messenger is created as a separate process, and initialized with 2 queues, to_send to receive messages to be sent to the internet. Args: - domain_name (str) : Domain name string - UDP_IP (str) : IP address YYY.YYY.YYY.YYY - UDP_PORT (int) : UDP port to send out on - sock_timeout (int) : Socket timeout - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
def checkSerial(self): for item in self.rxSerial(self._TUN._tun.mtu): try: self._TUN._tun.write(item) except pytun.Error as error: print("pytun error writing: {0}".format(item)) print(error)
Check the serial port for data to write to the TUN adapter.
def conference_speak(self, call_params): path = '/' + self.api_version + '/ConferenceSpeak/' method = 'POST' return self.request(path, method, call_params)
REST Conference Speak helper
def shuffle_srv(records): if not records: return [] ret = [] while len(records) > 1: weight_sum = 0 for rrecord in records: weight_sum += rrecord.weight + 0.1 thres = random.random() * weight_sum weight_sum = 0 for rrecord in records: weight_sum += rrecord.weight + 0.1 if thres < weight_sum: records.remove(rrecord) ret.append(rrecord) break ret.append(records[0]) return ret
Randomly reorder SRV records using their weights. :Parameters: - `records`: SRV records to shuffle. :Types: - `records`: sequence of :dns:`dns.rdtypes.IN.SRV` :return: reordered records. :returntype: `list` of :dns:`dns.rdtypes.IN.SRV`
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False): arr_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True) segmap = SegmentationMapOnImage(arr_padded, shape=self.shape) segmap.input_was = self.input_was if return_pad_amounts: return segmap, pad_amounts else: return segmap
Pad the segmentation map on its sides so that its matches a target aspect ratio. Depending on which dimension is smaller (height or width), only the corresponding sides (left/right or top/bottom) will be padded. In each case, both of the sides will be padded equally. Parameters ---------- aspect_ratio : float Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice as much width as height. mode : str, optional Padding mode to use. See :func:`numpy.pad` for details. cval : number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. return_pad_amounts : bool, optional If False, then only the padded image will be returned. If True, a tuple with two entries will be returned, where the first entry is the padded image and the second entry are the amounts by which each image side was padded. These amounts are again a tuple of the form (top, right, bottom, left), with each value being an integer. Returns ------- segmap : imgaug.SegmentationMapOnImage Padded segmentation map as SegmentationMapOnImage object. pad_amounts : tuple of int Amounts by which the segmentation map was padded on each side, given as a tuple ``(top, right, bottom, left)``. This tuple is only returned if `return_pad_amounts` was set to True.
def post(self, url, params={}, files=None): params.update({'api_key': self.api_key}) try: response = requests.post(self.host + url, data=params, files=files) return self.json_parse(response.content) except RequestException as e: return self.json_parse(e.args)
Issues a POST request against the API, allows for multipart data uploads :param url: a string, the url you are requesting :param params: a dict, the key-value of all the parameters needed in the request :param files: a list, the list of tuples of files :returns: a dict parsed of the JSON response
def find_imports(self, pbds): imports = list(set(self.uses).difference(set(self.defines))) for imp in imports: for p in pbds: if imp in p.defines: self.imports.append(p.name) break self.imports = list(set(self.imports)) for import_file in self.imports: self.lines.insert(2, 'import "{}";'.format(import_file))
Find all missing imports in list of Pbd instances.
def load_image(self): try: image = initializers.load_tiff(self.filename) image = initializers.normalize( image, invert=self.invert, scale=self.exposure, dtype=self.float_precision ) except IOError as e: log.error("Could not find image '%s'" % self.filename) raise e return image
Read the file and perform any transforms to get a loaded image
def rotation( x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1 ): if is_random: theta = np.pi / 180 * np.random.uniform(-rg, rg) else: theta = np.pi / 180 * rg rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) h, w = x.shape[row_index], x.shape[col_index] transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w) x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order) return x
Rotate an image randomly or non-randomly. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). rg : int or float Degree to rotate, usually 0 ~ 180. is_random : boolean If True, randomly rotate. Default is False row_index col_index and channel_index : int Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). fill_mode : str Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__ cval : float Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0 order : int The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__ Returns ------- numpy.array A processed image. Examples --------- >>> x --> [row, col, 1] >>> x = tl.prepro.rotation(x, rg=40, is_random=False) >>> tl.vis.save_image(x, 'im.png')
def _select_manager(backend_name): if backend_name == 'RedisBackend': lock_manager = _LockManagerRedis elif backend_name == 'DatabaseBackend': lock_manager = _LockManagerDB else: raise NotImplementedError return lock_manager
Select the proper LockManager based on the current backend used by Celery. :raise NotImplementedError: If Celery is using an unsupported backend. :param str backend_name: Class name of the current Celery backend. Usually value of current_app.extensions['celery'].celery.backend.__class__.__name__. :return: Class definition object (not instance). One of the _LockManager* classes.
def check_consistency(self): error = False regex = re.compile('([a-zA-Z_][a-zA-Z0-9_]*)') if 'full' not in self.modelstr: raise ModelError( 'Model must contain a `full` key describing ' 'the entire image formation' ) for name, eq in iteritems(self.modelstr): var = regex.findall(eq) for v in var: v = re.sub(r"^d", '', v) if v not in self.varmap: log.error( "Variable '%s' (eq. '%s': '%s') not found in category map %r" % (v, name, eq, self.varmap) ) error = True if error: raise ModelError('Inconsistent varmap and modelstr descriptions')
Make sure that the required comps are included in the list of components supplied by the user. Also check that the parameters are consistent across the many components.
def squad(self, squad_id=0, persona_id=None): method = 'GET' url = 'squad/%s/user/%s' % (squad_id, persona_id or self.persona_id) events = [self.pin.event('page_view', 'Hub - Squads')] self.pin.send(events) rc = self.__request__(method, url) events = [self.pin.event('page_view', 'Squad Details'), self.pin.event('page_view', 'Squads - Squad Overview')] self.pin.send(events) return [itemParse(i) for i in rc.get('players', ())]
Return a squad. :params squad_id: Squad id.
def proc_elms(**kwargs) -> list: return [ (ELEM_KEYS.get(k, k), ELEM_VALS.get(ELEM_KEYS.get(k, k), dict()).get(v, v)) for k, v in kwargs.items() if (k in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values())) and (k not in PRSV_COLS) ]
Bloomberg overrides for elements Args: **kwargs: overrides Returns: list of tuples Examples: >>> proc_elms(PerAdj='A', Per='W') [('periodicityAdjustment', 'ACTUAL'), ('periodicitySelection', 'WEEKLY')] >>> proc_elms(Days='A', Fill='B') [('nonTradingDayFillOption', 'ALL_CALENDAR_DAYS'), ('nonTradingDayFillMethod', 'NIL_VALUE')] >>> proc_elms(CshAdjNormal=False, CshAdjAbnormal=True) [('adjustmentNormal', False), ('adjustmentAbnormal', True)] >>> proc_elms(Per='W', Quote='Average', start_date='2018-01-10') [('periodicitySelection', 'WEEKLY'), ('overrideOption', 'OVERRIDE_OPTION_GPA')] >>> proc_elms(QuoteType='Y') [('pricingOption', 'PRICING_OPTION_YIELD')] >>> proc_elms(QuoteType='Y', cache=True) [('pricingOption', 'PRICING_OPTION_YIELD')]
def _interrupt_read(self): data = self._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT) LOGGER.debug('Read data: %r', data) return data
Read data from device.
def create_project_thread(session, member_ids, project_id, message): return create_thread(session, member_ids, 'project', project_id, message)
Create a project thread
def changeset_info(changeset): keys = [tag.attrib.get('k') for tag in changeset.getchildren()] keys += ['id', 'user', 'uid', 'bbox', 'created_at'] values = [tag.attrib.get('v') for tag in changeset.getchildren()] values += [ changeset.get('id'), changeset.get('user'), changeset.get('uid'), get_bounds(changeset), changeset.get('created_at') ] return dict(zip(keys, values))
Return a dictionary with id, user, user_id, bounds, date of creation and all the tags of the changeset. Args: changeset: the XML string of the changeset.
def format_docstring(*args, **kwargs): def decorator(func): func.__doc__ = getdoc(func).format(*args, **kwargs) return func return decorator
Decorator for clean docstring formatting
def _add_id_to_keys(self, pk, conn=None): if conn is None: conn = self._get_connection() conn.sadd(self._get_ids_key(), pk)
_add_id_to_keys - Adds primary key to table internal
def add(path): click.echo('\nAdding {} to cache......'.format(path), nl=False) try: r = cpenv.resolve(path) except Exception as e: click.echo(bold_red('FAILED')) click.echo(e) return if isinstance(r.resolved[0], cpenv.VirtualEnvironment): EnvironmentCache.add(r.resolved[0]) EnvironmentCache.save() click.echo(bold_green('OK!'))
Add an environment to the cache. Allows you to activate the environment by name instead of by full path
def bulkDetails(self, packageNames): params = {'au': '1'} req = googleplay_pb2.BulkDetailsRequest() req.docid.extend(packageNames) data = req.SerializeToString() message = self.executeRequestApi2(BULK_URL, post_data=data.decode("utf-8"), content_type=CONTENT_TYPE_PROTO, params=params) response = message.payload.bulkDetailsResponse return [None if not utils.hasDoc(entry) else utils.parseProtobufObj(entry.doc) for entry in response.entry]
Get several apps details from a list of package names. This is much more efficient than calling N times details() since it requires only one request. If an item is not found it returns an empty object instead of throwing a RequestError('Item not found') like the details() function Args: packageNames (list): a list of app IDs (usually starting with 'com.'). Returns: a list of dictionaries containing docv2 data, or None if the app doesn't exist
def experiment(ctx, project, experiment): ctx.obj = ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment'] = experiment
Commands for experiments.
def parse_scalar(scalar_data, version): try: return hs_scalar[version].parseString(scalar_data, parseAll=True)[0] except pp.ParseException as pe: raise ZincParseException( 'Failed to parse scalar: %s' % reformat_exception(pe), scalar_data, 1, pe.col) except: LOG.debug('Failing scalar data: %r (version %r)', scalar_data, version)
Parse a Project Haystack scalar in ZINC format.
def get_moderation(request): with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute( ) moderations = [x[0] for x in cursor.fetchall()] return moderations
Return the list of publications that need moderation.
def cmd_status(opts): config = load_config(opts.config) b = get_blockade(config, opts) containers = b.status() print_containers(containers, opts.json)
Print status of containers and networks
def _aggr_mode(inList): valueCounts = dict() nonNone = 0 for elem in inList: if elem == SENTINEL_VALUE_FOR_MISSING_DATA: continue nonNone += 1 if elem in valueCounts: valueCounts[elem] += 1 else: valueCounts[elem] = 1 if nonNone == 0: return None sortedCounts = valueCounts.items() sortedCounts.sort(cmp=lambda x,y: x[1] - y[1], reverse=True) return sortedCounts[0][0]
Returns most common value seen in the non-None elements of the list
def main(): cred_search = CredentialSearch() arg = argparse.ArgumentParser(parents=[cred_search.argparser], conflict_handler='resolve') arg.add_argument('-c', '--count', help="Only show the number of results", action="store_true") arguments = arg.parse_args() if arguments.count: print_line("Number of credentials: {}".format(cred_search.argument_count())) else: response = cred_search.get_credentials() for hit in response: print_json(hit.to_dict(include_meta=True))
Main credentials tool
def attach(self, observer): if not observer in self._observers: self._observers.append(observer) return self
Attach an observer. Args: observer (func): A function to be called when new messages arrive Returns: :class:`Stream`. Current instance to allow chaining
def edges(self, **kwargs): edges = self._query('edges', **kwargs) for edge in edges: identifier_source = edge['source_type'] + \ '[' + edge['source_title'] + ']' identifier_target = edge['target_type'] + \ '[' + edge['target_title'] + ']' yield Edge(source=self.resources[identifier_source], target=self.resources[identifier_target], relationship=edge['relationship'], node=edge['certname'])
Get the known catalog edges, formed between two resources. :param \*\*kwargs: The rest of the keyword arguments are passed to the _query function. :returns: A generating yielding Edges. :rtype: :class:`pypuppetdb.types.Edge`
def check_mro(self, bases): try: self.add_node("temp") for base in bases: nx.DiGraph.add_edge(self, base, "temp") result = self.get_mro("temp")[1:] finally: self.remove_node("temp") return result
Check if C3 MRO is possible with given bases
def template_uploader_yaml(cl_args, masters): single_master = masters[0] uploader_config_template = "%s/standalone/templates/uploader.template.yaml" \ % cl_args["config_path"] uploader_config_actual = "%s/standalone/uploader.yaml" % cl_args["config_path"] template_file(uploader_config_template, uploader_config_actual, {"<http_uploader_uri>": "http://%s:9000/api/v1/file/upload" % single_master})
Tempate uploader.yaml
def delete_category(category_id): try: res = _pybossa_req('delete', 'category', category_id) if type(res).__name__ == 'bool': return True else: return res except: raise
Delete a Category with id = category_id. :param category_id: PYBOSSA Category ID :type category_id: integer :returns: True -- the response status code
def compile_protofile(proto_file_path): out_file = tempfile.mkstemp()[1] try: subprocess.check_output(['protoc', '--include_source_info', '--descriptor_set_out', out_file, proto_file_path]) except subprocess.CalledProcessError as e: sys.exit('protoc returned status {}'.format(e.returncode)) return out_file
Compile proto file to descriptor set. Args: proto_file_path: Path to proto file to compile. Returns: Path to file containing compiled descriptor set. Raises: SystemExit if the compilation fails.
def _get_request_type(self): value = self.document.tag.lower() if value in allowed_request_types[self.params['service']]: self.params["request"] = value else: raise OWSInvalidParameterValue("Request type %s is not supported" % value, value="request") return self.params["request"]
Find requested request type in POST request.
def add_directory(self, *args, **kwargs): exc = kwargs.get('exclusions', None) for path in args: self.files.append(DirectoryPath(path, self, exclusions=exc))
Add directory or directories list to bundle :param exclusions: List of excluded paths :type path: str|unicode :type exclusions: list
def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names): print('Converting lrelu ...') if names == 'short': tf_name = 'lRELU' + random_string(3) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) leakyrelu = \ keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name) layers[scope_name] = leakyrelu(layers[inputs[0]])
Convert leaky relu layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers