code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def SETB(cpu, dest): dest.write(Operators.ITEBV(dest.size, cpu.CF, 1, 0))
Sets byte if below. :param cpu: current CPU. :param dest: destination operand.
def get_topology_info(*args): instance = tornado.ioloop.IOLoop.instance() try: return instance.run_sync(lambda: API.get_topology_info(*args)) except Exception: Log.debug(traceback.format_exc()) raise
Synced API call to get topology information
def get_user(self, user_id, depth=1): response = self._perform_request( '/um/users/%s?depth=%s' % (user_id, str(depth))) return response
Retrieves a single user by ID. :param user_id: The unique ID of the user. :type user_id: ``str`` :param depth: The depth of the response data. :type depth: ``int``
def _parse_documentclass(self): command = LatexCommand( 'documentclass', {'name': 'options', 'required': False, 'bracket': '['}, {'name': 'class_name', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no documentclass') self._document_options = [] try: content = parsed['options'] self._document_options = [opt.strip() for opt in content.split(',')] except KeyError: self._logger.warning('lsstdoc has no documentclass options') self._document_options = []
Parse documentclass options. Sets the the ``_document_options`` attribute.
def create_item(self, token, name, parent_id, **kwargs): parameters = dict() parameters['token'] = token parameters['name'] = name parameters['parentid'] = parent_id optional_keys = ['description', 'uuid', 'privacy'] for key in optional_keys: if key in kwargs: parameters[key] = kwargs[key] response = self.request('midas.item.create', parameters) return response
Create an item to the server. :param token: A valid token for the user in question. :type token: string :param name: The name of the item to be created. :type name: string :param parent_id: The id of the destination folder. :type parent_id: int | long :param description: (optional) The description text of the item. :type description: string :param uuid: (optional) The UUID for the item. It will be generated if not given. :type uuid: string :param privacy: (optional) The privacy state of the item ('Public' or 'Private'). :type privacy: string :returns: Dictionary containing the details of the created item. :rtype: dict
def indexesOptional(f): stack = inspect.stack() _NO_INDEX_CHECK_NEEDED.add('%s.%s.%s' % (f.__module__, stack[1][3], f.__name__)) del stack return f
Decorate test methods with this if you don't require strict index checking
def add_annotation_type(self, doc, annotation_type): if len(doc.annotations) != 0: if not self.annotation_type_set: self.annotation_type_set = True if validations.validate_annotation_type(annotation_type): doc.annotations[-1].annotation_type = annotation_type return True else: raise SPDXValueError('Annotation::AnnotationType') else: raise CardinalityError('Annotation::AnnotationType') else: raise OrderError('Annotation::AnnotationType')
Sets the annotation type. Raises CardinalityError if already set. OrderError if no annotator defined before. Raises SPDXValueError if invalid value.
def select(self, sql_string, cols, *args, **kwargs): working_columns = None if kwargs.get('columns') is not None: working_columns = kwargs.pop('columns') query = self._assemble_select(sql_string, cols, *args, *kwargs) return self._execute(query, working_columns=working_columns)
Execute a SELECT statement :sql_string: An SQL string template :columns: A list of columns to be returned by the query :*args: Arguments to be passed for query parameters. :returns: Psycopg2 result
def monitor_wrapper(f, task_id, monitoring_hub_url, run_id, sleep_dur): def wrapped(*args, **kwargs): p = Process(target=monitor, args=(os.getpid(), task_id, monitoring_hub_url, run_id, sleep_dur)) p.start() try: return f(*args, **kwargs) finally: p.terminate() p.join() return wrapped
Internal Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.
def get_clan_image(self, obj: BaseAttrDict): try: badge_id = obj.clan.badge_id except AttributeError: try: badge_id = obj.badge_id except AttributeError: return 'https://i.imgur.com/Y3uXsgj.png' if badge_id is None: return 'https://i.imgur.com/Y3uXsgj.png' for i in self.constants.alliance_badges: if i.id == badge_id: return 'https://royaleapi.github.io/cr-api-assets/badges/' + i.name + '.png'
Get the clan badge image URL Parameters --------- obj: official_api.models.BaseAttrDict An object that has the clan badge ID either in ``.clan.badge_id`` or ``.badge_id`` Can be a clan or a profile for example. Returns str
def init(self, hosts=None, cacert=None, client_cert=None, client_key=None): try: import etcd self.module = etcd except ImportError: pass if not self.module: return self._parse_jetconfig() hosts = env('PYCONFIG_ETCD_HOSTS', hosts) protocol = env('PYCONFIG_ETCD_PROTOCOL', None) cacert = env('PYCONFIG_ETCD_CACERT', cacert) client_cert = env('PYCONFIG_ETCD_CERT', client_cert) client_key = env('PYCONFIG_ETCD_KEY', client_key) username = None password = None auth = env('PYCONFIG_ETCD_AUTH', None) if auth: auth = auth.split(':') auth.append('') username = auth[0] password = auth[1] hosts = self._parse_hosts(hosts) if hosts is None: return kw = {} kw['allow_reconnect'] = True if protocol: kw['protocol'] = protocol if username: kw['username'] = username if password: kw['password'] = password if cacert: kw['ca_cert'] = os.path.abspath(cacert) if client_cert and client_key: kw['cert'] = ((os.path.abspath(client_cert), os.path.abspath(client_key))) elif client_cert: kw['cert'] = os.path.abspath(client_cert) if cacert or client_cert or client_key: kw['protocol'] = 'https' self.client = self.module.Client(hosts, **kw)
Handle creating the new etcd client instance and other business. :param hosts: Host string or list of hosts (default: `'127.0.0.1:2379'`) :param cacert: CA cert filename (optional) :param client_cert: Client cert filename (optional) :param client_key: Client key filename (optional) :type ca: str :type cert: str :type key: str
def _bufcountlines(filename, gzipped): if gzipped: fin = gzip.open(filename) else: fin = open(filename) nlines = 0 buf_size = 1024 * 1024 read_f = fin.read buf = read_f(buf_size) while buf: nlines += buf.count('\n') buf = read_f(buf_size) fin.close() return nlines
fast line counter. Used to quickly sum number of input reads when running link_fastqs to append files.
def route(bp, *args, **kwargs): kwargs['strict_slashes'] = kwargs.pop('strict_slashes', False) body = _validate_schema(kwargs.pop('_body', None)) query = _validate_schema(kwargs.pop('_query', None)) output = _validate_schema(kwargs.pop('marshal_with', None)) validate = kwargs.pop('validate', True) def decorator(f): @bp.route(*args, **kwargs) @wraps(f) def wrapper(*inner_args, **inner_kwargs): try: if query is not None: query.strict = validate url = furl(request.url) inner_kwargs['_query'] = query.load(data=url.args) if body is not None: body.strict = validate json_data = request.get_json() if json_data is None: json_data = {} inner_kwargs['_body'] = body.load(data=json_data) except ValidationError as err: return jsonify(err.messages), 422 if output: data = output.dump(f(*inner_args, **inner_kwargs)) return jsonify(data[0]) return f(*inner_args, **inner_kwargs) return f return decorator
Journey route decorator Enables simple serialization, deserialization and validation of Flask routes with the help of Marshmallow. :param bp: :class:`flask.Blueprint` object :param args: args to pass along to `Blueprint.route` :param kwargs: - :strict_slashes: Enable / disable strict slashes (default False) - :validate: Enable / disable body/query validation (default True) - :_query: Unmarshal Query string into this schema - :_body: Unmarshal JSON body into this schema - :marshal_with: Serialize the output with this schema :raises: - ValidationError if the query parameters or JSON body fails validation
def _handle_variant(self): def the_func(a_tuple, variant=0): (signature, an_obj) = a_tuple (func, sig) = self.COMPLETE.parseString(signature)[0] assert sig == signature (xformed, _) = func(an_obj, variant=variant + 1) return (xformed, xformed.variant_level) return (the_func, 'v')
Generate the correct function for a variant signature. :returns: function that returns an appropriate value :rtype: ((str * object) or list)-> object
def property_derivative_T(self, T, P, zs, ws, order=1): r sorted_valid_methods = self.select_valid_methods(T, P, zs, ws) for method in sorted_valid_methods: try: return self.calculate_derivative_T(T, P, zs, ws, method, order) except: pass return None
r'''Method to calculate a derivative of a mixture property with respect to temperature at constant pressure and composition, of a given order. Methods found valid by `select_valid_methods` are attempted until a method succeeds. If no methods are valid and succeed, None is returned. Calls `calculate_derivative_T` internally to perform the actual calculation. .. math:: \text{derivative} = \frac{d (\text{property})}{d T}|_{P, z} Parameters ---------- T : float Temperature at which to calculate the derivative, [K] P : float Pressure at which to calculate the derivative, [Pa] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] order : int Order of the derivative, >= 1 Returns ------- d_prop_d_T_at_P : float Calculated derivative property, [`units/K^order`]
def _forbidden_attributes(obj): for key in list(obj.data.keys()): if key in list(obj.reserved_keys.keys()): obj.data.pop(key) return obj
Return the object without the forbidden attributes.
def _sorted_copy(self, comparison, reversed=False): sorted = self.copy() _list.sort(sorted, comparison) if reversed: _list.reverse(sorted) return sorted
Returns a sorted copy with the colors arranged according to the given comparison.
def create_template(material, path, show=False): file_name = 'dataset-%s.csv' % material.lower() file_path = os.path.join(path, file_name) with open(file_path, 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['Name', material]) writer.writerow(['Description', '<Add a data set description ' 'here.>']) writer.writerow(['Reference', '<Add a reference to the source of ' 'the data set here.>']) writer.writerow(['Temperature', '<parameter 1 name>', '<parameter 2 name>', '<parameter 3 name>']) writer.writerow(['T', '<parameter 1 display symbol>', '<parameter 2 display symbol>', '<parameter 3 display symbol>']) writer.writerow(['K', '<parameter 1 units>', '<parameter 2 units>', '<parameter 3 units>']) writer.writerow(['T', '<parameter 1 symbol>', '<parameter 2 symbol>', '<parameter 3 symbol>']) for i in range(10): writer.writerow([100.0 + i*50, float(i), 10.0 + i, 100.0 + i]) if show is True: webbrowser.open_new(file_path)
Create a template csv file for a data set. :param material: the name of the material :param path: the path of the directory where the file must be written :param show: a boolean indicating whether the created file should be \ displayed after creation
def detect(self): self.log.info("initializing AP detection on all sweeps...") t1=cm.timeit() for sweep in range(self.abf.sweeps): self.detectSweep(sweep) self.log.info("AP analysis of %d sweeps found %d APs (completed in %s)", self.abf.sweeps,len(self.APs),cm.timeit(t1))
runs AP detection on every sweep.
def create(self): dtype = NP_COMPONENT_DTYPE[self.component_type.value] data = numpy.frombuffer( self.buffer.read(byte_length=self.byte_length, byte_offset=self.byte_offset), count=self.count * self.components, dtype=dtype, ) return dtype, data
Create the VBO
def hide_variables_window(self): if self.var_window is not None: self.var_window.window.destroy() self.var_window = None
Hide the variables window
def unregister_fetcher(self, object_class): self._lock.acquire() try: cache = self._caches.get(object_class) if not cache: return cache.set_fetcher(None) finally: self._lock.release()
Unregister a fetcher class for an object class. :Parameters: - `object_class`: class retrieved by the fetcher. :Types: - `object_class`: `classobj`
def to_uint8(self): arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255) arr_uint8 = arr_0to255.astype(np.uint8) return arr_uint8
Convert this heatmaps object to a 0-to-255 array. Returns ------- arr_uint8 : (H,W,C) ndarray Heatmap as a 0-to-255 array (dtype is uint8).
def export_as_csv_action(description="Export selected objects as CSV file", fields=None, header=True): def export_as_csv(modeladmin, request, queryset): opts = modeladmin.model._meta if not fields: field_names = [field.name for field in opts.fields] else: field_names = fields response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = "attachment; filename={filename}.csv".format( filename=str(opts).replace(".", "_") ) writer = unicodecsv.writer(response, encoding="utf-8") if header: writer.writerow(field_names) for obj in queryset: row = [] for field_name in field_names: field = getattr(obj, field_name) if callable(field): value = field() else: value = field if value is None: row.append("[Not Set]") elif not value and isinstance(value, string_types): row.append("[Empty]") else: row.append(value) writer.writerow(row) return response export_as_csv.short_description = description return export_as_csv
Return an export csv action. Arguments: description (string): action description fields ([string]): list of model fields to include header (bool): whether or not to output the column names as the first row
def _get_param(self, param, allowed_values=None, optional=False): request_params = self._request_params() if param in request_params: value = request_params[param].lower() if allowed_values is not None: if value in allowed_values: self.params[param] = value else: raise OWSInvalidParameterValue("%s %s is not supported" % (param, value), value=param) elif optional: self.params[param] = None else: raise OWSMissingParameterValue('Parameter "%s" is missing' % param, value=param) return self.params[param]
Get parameter in GET request.
def check_password(self, username, password, properties): logger.debug("check_password{0!r}".format( (username, password, properties))) pwd, pwd_format = self.get_password(username, (u"plain", u"md5:user:realm:password"), properties) if pwd_format == u"plain": logger.debug("got plain password: {0!r}".format(pwd)) return pwd is not None and password == pwd elif pwd_format in (u"md5:user:realm:password"): logger.debug("got md5:user:realm:password password: {0!r}" .format(pwd)) realm = properties.get("realm") if realm is None: realm = "" else: realm = realm.encode("utf-8") username = username.encode("utf-8") password = password.encode("utf-8") urp_hash = hashlib.md5(b"%s:%s:%s").hexdigest() return urp_hash == pwd logger.debug("got password in unknown format: {0!r}".format(pwd_format)) return False
Check the password validity. Used by plain-text authentication mechanisms. Default implementation: retrieve a "plain" password for the `username` and `realm` using `self.get_password` and compare it with the password provided. May be overridden e.g. to check the password against some external authentication mechanism (PAM, LDAP, etc.). :Parameters: - `username`: the username for which the password verification is requested. - `password`: the password to verify. - `properties`: mapping with authentication properties (those provided to the authenticator's ``start()`` method plus some already obtained via the mechanism). :Types: - `username`: `unicode` - `password`: `unicode` - `properties`: mapping :return: `True` if the password is valid. :returntype: `bool`
def shuffle_node_data(graph: BELGraph, key: str, percentage: Optional[float] = None) -> BELGraph: percentage = percentage or 0.3 assert 0 < percentage <= 1 n = graph.number_of_nodes() swaps = int(percentage * n * (n - 1) / 2) result: BELGraph = graph.copy() for _ in range(swaps): s, t = random.sample(result.node, 2) result.nodes[s][key], result.nodes[t][key] = result.nodes[t][key], result.nodes[s][key] return result
Shuffle the node's data. Useful for permutation testing. :param graph: A BEL graph :param key: The node data dictionary key :param percentage: What percentage of possible swaps to make
def set_device_id(self, dev, id): if id < 0 or id > 255: raise ValueError("ID must be an unsigned byte!") com, code, ok = io.send_packet( CMDTYPE.SETID, 1, dev, self.baudrate, 5, id) if not ok: raise_error(code)
Set device ID to new value. :param str dev: Serial device address/path :param id: Device ID to set
def power_btn(self, interval=200): if self.__power_btn_port is None: cij.err("cij.usb.relay: Invalid USB_RELAY_POWER_BTN") return 1 return self.__press(self.__power_btn_port, interval=interval)
TARGET power button
def manifest(): prune = options.paved.dist.manifest.prune graft = set() if options.paved.dist.manifest.include_sphinx_docroot: docroot = options.get('docroot', 'docs') graft.update([docroot]) if options.paved.dist.manifest.exclude_sphinx_builddir: builddir = docroot + '/' + options.get("builddir", ".build") prune.update([builddir]) with open(options.paved.cwd / 'MANIFEST.in', 'w') as fo: for item in graft: fo.write('graft %s\n' % item) for item in options.paved.dist.manifest.include: fo.write('include %s\n' % item) for item in options.paved.dist.manifest.recursive_include: fo.write('recursive-include %s\n' % item) for item in prune: fo.write('prune %s\n' % item)
Guarantee the existence of a basic MANIFEST.in. manifest doc: http://docs.python.org/distutils/sourcedist.html#manifest `options.paved.dist.manifest.include`: set of files (or globs) to include with the `include` directive. `options.paved.dist.manifest.recursive_include`: set of files (or globs) to include with the `recursive-include` directive. `options.paved.dist.manifest.prune`: set of files (or globs) to exclude with the `prune` directive. `options.paved.dist.manifest.include_sphinx_docroot`: True -> sphinx docroot is added as `graft` `options.paved.dist.manifest.include_sphinx_docroot`: True -> sphinx builddir is added as `prune`
def serialize_rules(self, rules): serialized = [] for rule in rules: direction = rule["direction"] source = '' destination = '' if rule.get("remote_ip_prefix"): prefix = rule["remote_ip_prefix"] if direction == "ingress": source = self._convert_remote_network(prefix) else: if (Capabilities.EGRESS not in CONF.QUARK.environment_capabilities): raise q_exc.EgressSecurityGroupRulesNotEnabled() else: destination = self._convert_remote_network(prefix) optional_fields = {} protocol_map = protocols.PROTOCOL_MAP[rule["ethertype"]] if rule["protocol"] == protocol_map["icmp"]: optional_fields["icmp type"] = rule["port_range_min"] optional_fields["icmp code"] = rule["port_range_max"] else: optional_fields["port start"] = rule["port_range_min"] optional_fields["port end"] = rule["port_range_max"] payload = {"ethertype": rule["ethertype"], "protocol": rule["protocol"], "source network": source, "destination network": destination, "action": "allow", "direction": direction} payload.update(optional_fields) serialized.append(payload) return serialized
Creates a payload for the redis server.
def has_degradation_increases_activity(data: Dict) -> bool: return part_has_modifier(data, SUBJECT, DEGRADATION) and part_has_modifier(data, OBJECT, ACTIVITY)
Check if the degradation of source causes activity of target.
def find_service(self, uuid): for service in self.list_services(): if service.uuid == uuid: return service return None
Return the first child service found that has the specified UUID. Will return None if no service that matches is found.
def load(self): self.create_effect_classes() self._add_resource_descriptions_to_pools(self.create_external_resources()) self._add_resource_descriptions_to_pools(self.create_resources()) for meta, resource in resources.textures.load_pool(): self._textures[meta.label] = resource for meta, resource in resources.programs.load_pool(): self._programs[meta.label] = resource for meta, resource in resources.scenes.load_pool(): self._scenes[meta.label] = resource for meta, resource in resources.data.load_pool(): self._data[meta.label] = resource self.create_effect_instances() self.post_load()
Loads this project instance
def process_incoming_tuples(self): if self.output_helper.is_out_queue_available(): self._read_tuples_and_execute() self.output_helper.send_out_tuples() else: self.bolt_metrics.update_out_queue_full_count()
Should be called when tuple was buffered into in_stream This method is equivalent to ``addBoltTasks()`` but is designed for event-driven single-thread bolt.
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj)
Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg.
def pin_type(self, pin): if type(pin) is list: return [self.pin_type(p) for p in pin] pin_id = self._pin_mapping.get(pin, None) if pin_id: return self._pin_type(pin_id) else: raise KeyError('Requested pin is not mapped: %s' % pin)
Gets the `ahio.PortType` this pin was set to. If you're developing a driver, implement _pin_type(self, pin) @arg pin the pin you want to see the mode @returns the `ahio.PortType` the pin is set to @throw KeyError if pin isn't mapped.
def mkclick(freq, sr=22050, duration=0.1): times = np.arange(int(sr * duration)) click = np.sin(2 * np.pi * times * freq / float(sr)) click *= np.exp(- times / (1e-2 * sr)) return click
Generate a click sample. This replicates functionality from mir_eval.sonify.clicks, but exposes the target frequency and duration.
def _fourier_func(fourierparams, phase, mags): order = int(len(fourierparams)/2) f_amp = fourierparams[:order] f_pha = fourierparams[order:] f_orders = [f_amp[x]*npcos(2.0*pi_value*x*phase + f_pha[x]) for x in range(order)] total_f = npmedian(mags) for fo in f_orders: total_f += fo return total_f
This returns a summed Fourier cosine series. Parameters ---------- fourierparams : list This MUST be a list of the following form like so:: [period, epoch, [amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X], [phase_1, phase_2, phase_3, ..., phase_X]] where X is the Fourier order. phase,mags : np.array The input phase and magnitude areas to use as the basis for the cosine series. The phases are used directly to generate the values of the function, while the mags array is used to generate the zeroth order amplitude coefficient. Returns ------- np.array The Fourier cosine series function evaluated over `phase`.
def reverse_dummies(self, X, mapping): out_cols = X.columns.values.tolist() mapped_columns = [] for switch in mapping: col = switch.get('col') mod = switch.get('mapping') insert_at = out_cols.index(mod.columns[0]) X.insert(insert_at, col, 0) positive_indexes = mod.index[mod.index > 0] for i in range(positive_indexes.shape[0]): existing_col = mod.columns[i] val = positive_indexes[i] X.loc[X[existing_col] == 1, col] = val mapped_columns.append(existing_col) X.drop(mod.columns, axis=1, inplace=True) out_cols = X.columns.values.tolist() return X
Convert dummy variable into numerical variables Parameters ---------- X : DataFrame mapping: list-like Contains mappings of column to be transformed to it's new columns and value represented Returns ------- numerical: DataFrame
def doStuff(ABFfolder,analyze=False,convert=False,index=True,overwrite=True, launch=True): IN=INDEX(ABFfolder) if analyze: IN.analyzeAll() if convert: IN.convertImages()
Inelegant for now, but lets you manually analyze every ABF in a folder.
def cache_func(prefix, method=False): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): cache_args = args if method: cache_args = args[1:] cache_key = get_cache_key(prefix, *cache_args, **kwargs) cached_value = cache.get(cache_key) if cached_value is None: cached_value = func(*args, **kwargs) cache.set(cache_key, cached_value) return cached_value return wrapper return decorator
Cache result of function execution into the django cache backend. Calculate cache key based on `prefix`, `args` and `kwargs` of the function. For using like object method set `method=True`.
def clean_channel_worker_username(self): channel_worker_username = self.cleaned_data['channel_worker_username'].strip() try: User.objects.get(username=channel_worker_username) except User.DoesNotExist: raise ValidationError( ValidationMessages.INVALID_CHANNEL_WORKER.format( channel_worker_username=channel_worker_username ) ) return channel_worker_username
Clean enterprise channel worker user form field Returns: str: the cleaned value of channel user username for transmitting courses metadata.
def save_photon_hdf5(self, identity=None, overwrite=True, path=None): filepath = self.filepath if path is not None: filepath = Path(path, filepath.name) self.merge_da() data = self._make_photon_hdf5(identity=identity) phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath), overwrite=overwrite)
Create a smFRET Photon-HDF5 file with current timestamps.
def channel_axis(self, batch): axis = self.__model.channel_axis() if not batch: axis = axis - 1 return axis
Interface to model.channel_axis for attacks. Parameters ---------- batch : bool Controls whether the index of the axis for a batch of images (4 dimensions) or a single image (3 dimensions) should be returned.
def calculate_integral_over_T(self, T1, T2, method): r return float(quad(lambda T: self.calculate(T, method)/T, T1, T2)[0])
r'''Method to calculate the integral of a property over temperature with respect to temperature, using a specified method. Uses SciPy's `quad` function to perform the integral, with no options. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- T1 : float Lower limit of integration, [K] T2 : float Upper limit of integration, [K] method : str Method for which to find the integral Returns ------- integral : float Calculated integral of the property over the given range, [`units`]
def node_get_args(node): obj = node[OBJ] key = node[KEY] boundargs = obj.formula.signature.bind(*key) boundargs.apply_defaults() return boundargs.arguments
Return an ordered mapping from params to args
def memoize(func): class Memodict(dict): def __getitem__(self, *key): return dict.__getitem__(self, key) def __missing__(self, key): ret = self[key] = func(*key) return ret return Memodict().__getitem__
Memoization decorator for a function taking one or more arguments.
def amount(self): return sum(self.get_compound_amount(c) for c in self.material.compounds)
Determine the sum of mole amounts of all the compounds. :returns: Amount. [kmol]
def start(self): start = time.time() self._kill_event = threading.Event() self.procs = {} for worker_id in range(self.worker_count): p = multiprocessing.Process(target=worker, args=(worker_id, self.uid, self.pending_task_queue, self.pending_result_queue, self.ready_worker_queue, )) p.start() self.procs[worker_id] = p logger.debug("Manager synced with workers") self._task_puller_thread = threading.Thread(target=self.pull_tasks, args=(self._kill_event,)) self._result_pusher_thread = threading.Thread(target=self.push_results, args=(self._kill_event,)) self._task_puller_thread.start() self._result_pusher_thread.start() logger.info("Loop start") self._kill_event.wait() logger.critical("[MAIN] Received kill event, terminating worker processes") self._task_puller_thread.join() self._result_pusher_thread.join() for proc_id in self.procs: self.procs[proc_id].terminate() logger.critical("Terminating worker {}:{}".format(self.procs[proc_id], self.procs[proc_id].is_alive())) self.procs[proc_id].join() logger.debug("Worker:{} joined successfully".format(self.procs[proc_id])) self.task_incoming.close() self.result_outgoing.close() self.context.term() delta = time.time() - start logger.info("process_worker_pool ran for {} seconds".format(delta)) return
Start the worker processes. TODO: Move task receiving to a thread
def load(self): self._open_image() components, data = image_data(self.image) texture = self.ctx.texture( self.image.size, components, data, ) texture.extra = {'meta': self.meta} if self.meta.mipmap: texture.build_mipmaps() self._close_image() return texture
Load a 2d texture
def from_der_data(cls, data): logger.debug("Decoding DER certificate: {0!r}".format(data)) if cls._cert_asn1_type is None: cls._cert_asn1_type = Certificate() cert = der_decoder.decode(data, asn1Spec = cls._cert_asn1_type)[0] result = cls() tbs_cert = cert.getComponentByName('tbsCertificate') subject = tbs_cert.getComponentByName('subject') logger.debug("Subject: {0!r}".format(subject)) result._decode_subject(subject) validity = tbs_cert.getComponentByName('validity') result._decode_validity(validity) extensions = tbs_cert.getComponentByName('extensions') if extensions: for extension in extensions: logger.debug("Extension: {0!r}".format(extension)) oid = extension.getComponentByName('extnID') logger.debug("OID: {0!r}".format(oid)) if oid != SUBJECT_ALT_NAME_OID: continue value = extension.getComponentByName('extnValue') logger.debug("Value: {0!r}".format(value)) if isinstance(value, Any): value = der_decoder.decode(value, asn1Spec = OctetString())[0] alt_names = der_decoder.decode(value, asn1Spec = GeneralNames())[0] logger.debug("SubjectAltName: {0!r}".format(alt_names)) result._decode_alt_names(alt_names) return result
Decode DER-encoded certificate. :Parameters: - `data`: the encoded certificate :Types: - `data`: `bytes` :Return: decoded certificate data :Returntype: ASN1CertificateData
async def add_user(self, add_user_request): response = hangouts_pb2.AddUserResponse() await self._pb_request('conversations/adduser', add_user_request, response) return response
Invite users to join an existing group conversation.
def deploy_code(self): assert self.genv.SITE, 'Site unspecified.' assert self.genv.ROLE, 'Role unspecified.' r = self.local_renderer if self.env.exclusions: r.env.exclusions_str = ' '.join( "--exclude='%s'" % _ for _ in self.env.exclusions) r.local(r.env.rsync_command) r.sudo('chown -R {rsync_chown_user}:{rsync_chown_group} {rsync_dst_dir}')
Generates a rsync of all deployable code.
def _slotnames(cls): names = cls.__dict__.get("__slotnames__") if names is not None: return names names = [] if not hasattr(cls, "__slots__"): pass else: for c in cls.__mro__: if "__slots__" in c.__dict__: slots = c.__dict__['__slots__'] if isinstance(slots, basestring): slots = (slots,) for name in slots: if name in ("__dict__", "__weakref__"): continue elif name.startswith('__') and not name.endswith('__'): names.append('_%s%s' % (c.__name__, name)) else: names.append(name) try: cls.__slotnames__ = names except: pass return names
Return a list of slot names for a given class. This needs to find slots defined by the class and its bases, so we can't simply return the __slots__ attribute. We must walk down the Method Resolution Order and concatenate the __slots__ of each class found there. (This assumes classes don't modify their __slots__ attribute to misrepresent their slots after the class is defined.)
def transitions(self, return_matrix=True): if return_matrix: mat = np.zeros((self.nV, self.nV)) for v in self.g.nodes(): ind = [e[1] for e in sorted(self.g.out_edges(v))] mat[v, ind] = self._route_probs[v] else: mat = { k: {e[1]: p for e, p in zip(sorted(self.g.out_edges(k)), value)} for k, value in enumerate(self._route_probs) } return mat
Returns the routing probabilities for each vertex in the graph. Parameters ---------- return_matrix : bool (optional, the default is ``True``) Specifies whether an :class:`~numpy.ndarray` is returned. If ``False``, a dict is returned instead. Returns ------- out : a dict or :class:`~numpy.ndarray` The transition probabilities for each vertex in the graph. If ``out`` is an :class:`~numpy.ndarray`, then ``out[v, u]`` returns the probability of a transition from vertex ``v`` to vertex ``u``. If ``out`` is a dict then ``out_edge[v][u]`` is the probability of moving from vertex ``v`` to the vertex ``u``. Examples -------- Lets change the routing probabilities: >>> import queueing_tool as qt >>> import networkx as nx >>> g = nx.sedgewick_maze_graph() >>> net = qt.QueueNetwork(g) Below is an adjacency list for the graph ``g``. >>> ans = qt.graph2dict(g, False) >>> {k: sorted(v) for k, v in ans.items()} ... # doctest: +NORMALIZE_WHITESPACE {0: [2, 5, 7], 1: [7], 2: [0, 6], 3: [4, 5], 4: [3, 5, 6, 7], 5: [0, 3, 4], 6: [2, 4], 7: [0, 1, 4]} The default transition matrix is every out edge being equally likely: >>> net.transitions(False) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE {0: {2: 0.333..., 5: 0.333..., 7: 0.333...}, 1: {7: 1.0}, 2: {0: 0.5, 6: 0.5}, 3: {4: 0.5, 5: 0.5}, 4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25}, 5: {0: 0.333..., 3: 0.333..., 4: 0.333...}, 6: {2: 0.5, 4: 0.5}, 7: {0: 0.333..., 1: 0.333..., 4: 0.333...}} Now we will generate a random routing matrix: >>> mat = qt.generate_transition_matrix(g, seed=96) >>> net.set_transitions(mat) >>> net.transitions(False) # doctest: +ELLIPSIS ... # doctest: +NORMALIZE_WHITESPACE {0: {2: 0.112..., 5: 0.466..., 7: 0.420...}, 1: {7: 1.0}, 2: {0: 0.561..., 6: 0.438...}, 3: {4: 0.545..., 5: 0.454...}, 4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...}, 5: {0: 0.265..., 3: 0.460..., 4: 0.274...}, 6: {2: 0.673..., 4: 0.326...}, 7: {0: 0.033..., 1: 0.336..., 4: 0.630...}} What this shows is the following: when an :class:`.Agent` is at vertex ``2`` they will transition to vertex ``0`` with probability ``0.561`` and route to vertex ``6`` probability ``0.438``, when at vertex ``6`` they will transition back to vertex ``2`` with probability ``0.673`` and route vertex ``4`` probability ``0.326``, etc.
def shellcmd(repo, args): with cd(repo.rootdir): result = run(args) return result
Run a shell command within the repo's context Parameters ---------- repo: Repository object args: Shell command
def get_self(session, user_details=None): if user_details: user_details['compact'] = True response = make_get_request(session, 'self', params_data=user_details) json_data = response.json() if response.status_code == 200: return json_data['result'] else: raise SelfNotRetrievedException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'] )
Get details about the currently authenticated user
def copy(self): return self.__class__( amount=self["amount"], asset=self["asset"].copy(), blockchain_instance=self.blockchain, )
Copy the instance and make sure not to use a reference
def node_theta(self, node): group = self.find_node_group_membership(node) return self.group_theta(group)
Convenience function to find the node's theta angle.
def validate_key(self, key): if not models.PasswordResetToken.valid_tokens.filter(key=key).exists(): raise serializers.ValidationError( _("The provided reset token does not exist, or is expired.") ) return key
Validate the provided reset key. Returns: The validated key. Raises: serializers.ValidationError: If the provided key does not exist.
def to_message(self): from .messages import ack return ack.Acknowledgement(self.code, self.args[0] if len(self.args) > 0 else '')
Creates an error Acknowledgement message. The message's code and message are taken from this exception. :return: the message representing this exception
def add_scalar_data(self, token, community_id, producer_display_name, metric_name, producer_revision, submit_time, value, **kwargs): parameters = dict() parameters['token'] = token parameters['communityId'] = community_id parameters['producerDisplayName'] = producer_display_name parameters['metricName'] = metric_name parameters['producerRevision'] = producer_revision parameters['submitTime'] = submit_time parameters['value'] = value optional_keys = [ 'config_item_id', 'test_dataset_id', 'truth_dataset_id', 'silent', 'unofficial', 'build_results_url', 'branch', 'extra_urls', 'params', 'submission_id', 'submission_uuid', 'unit', 'reproduction_command' ] for key in optional_keys: if key in kwargs: if key == 'config_item_id': parameters['configItemId'] = kwargs[key] elif key == 'test_dataset_id': parameters['testDatasetId'] = kwargs[key] elif key == 'truth_dataset_id': parameters['truthDatasetId'] = kwargs[key] elif key == 'build_results_url': parameters['buildResultsUrl'] = kwargs[key] elif key == 'extra_urls': parameters['extraUrls'] = json.dumps(kwargs[key]) elif key == 'params': parameters[key] = json.dumps(kwargs[key]) elif key == 'silent': if kwargs[key]: parameters[key] = kwargs[key] elif key == 'unofficial': if kwargs[key]: parameters[key] = kwargs[key] elif key == 'submission_id': parameters['submissionId'] = kwargs[key] elif key == 'submission_uuid': parameters['submissionUuid'] = kwargs[key] elif key == 'unit': parameters['unit'] = kwargs[key] elif key == 'reproduction_command': parameters['reproductionCommand'] = kwargs[key] else: parameters[key] = kwargs[key] response = self.request('midas.tracker.scalar.add', parameters) return response
Create a new scalar data point. :param token: A valid token for the user in question. :type token: string :param community_id: The id of the community that owns the producer. :type community_id: int | long :param producer_display_name: The display name of the producer. :type producer_display_name: string :param metric_name: The metric name that identifies which trend this point belongs to. :type metric_name: string :param producer_revision: The repository revision of the producer that produced this value. :type producer_revision: int | long | string :param submit_time: The submit timestamp. Must be parsable with PHP strtotime(). :type submit_time: string :param value: The value of the scalar. :type value: float :param config_item_id: (optional) If this value pertains to a specific configuration item, pass its id here. :type config_item_id: int | long :param test_dataset_id: (optional) If this value pertains to a specific test dataset, pass its id here. :type test_dataset_id: int | long :param truth_dataset_id: (optional) If this value pertains to a specific ground truth dataset, pass its id here. :type truth_dataset_id: int | long :param silent: (optional) If true, do not perform threshold-based email notifications for this scalar. :type silent: bool :param unofficial: (optional) If true, creates an unofficial scalar visible only to the user performing the submission. :type unofficial: bool :param build_results_url: (optional) A URL for linking to build results for this submission. :type build_results_url: string :param branch: (optional) The branch name in the source repository for this submission. :type branch: string :param submission_id: (optional) The id of the submission. :type submission_id: int | long :param submission_uuid: (optional) The uuid of the submission. If one does not exist, it will be created. :type submission_uuid: string :type branch: string :param params: (optional) Any key/value pairs that should be displayed with this scalar result. :type params: dict :param extra_urls: (optional) Other URL's that should be displayed with with this scalar result. Each element of the list should be a dict with the following keys: label, text, href :type extra_urls: list[dict] :param unit: (optional) The unit of the scalar value. :type unit: string :param reproduction_command: (optional) The command to reproduce this scalar. :type reproduction_command: string :returns: The scalar object that was created. :rtype: dict
def requireAnomalyModel(func): @wraps(func) def _decorator(self, *args, **kwargs): if not self.getInferenceType() == InferenceType.TemporalAnomaly: raise RuntimeError("Method required a TemporalAnomaly model.") if self._getAnomalyClassifier() is None: raise RuntimeError("Model does not support this command. Model must" "be an active anomalyDetector model.") return func(self, *args, **kwargs) return _decorator
Decorator for functions that require anomaly models.
def paginate_link_tag(item): a_tag = Page.default_link_tag(item) if item['type'] == 'current_page': return make_html_tag('li', a_tag, **{'class': 'blue white-text'}) return make_html_tag('li', a_tag)
Create an A-HREF tag that points to another page usable in paginate.
def read(cls, proto): tm = super(TemporalMemoryMonitorMixin, cls).read(proto) tm.mmName = None tm._mmTraces = None tm._mmData = None tm.mmClearHistory() tm._mmResetActive = True return tm
Intercepts TemporalMemory deserialization request in order to initialize `TemporalMemoryMonitorMixin` state @param proto (DynamicStructBuilder) Proto object @return (TemporalMemory) TemporalMemory shim instance
def _rindex(mylist: Sequence[T], x: T) -> int: return len(mylist) - mylist[::-1].index(x) - 1
Index of the last occurrence of x in the sequence.
def get_form_field_class(model_field): FIELD_MAPPING = { IntField: forms.IntegerField, StringField: forms.CharField, FloatField: forms.FloatField, BooleanField: forms.BooleanField, DateTimeField: forms.DateTimeField, DecimalField: forms.DecimalField, URLField: forms.URLField, EmailField: forms.EmailField } return FIELD_MAPPING.get(model_field.__class__, forms.CharField)
Gets the default form field for a mongoenigne field.
def load(self): data = self.get_data('floating_ips/%s' % self.ip, type=GET) floating_ip = data['floating_ip'] for attr in floating_ip.keys(): setattr(self, attr, floating_ip[attr]) return self
Load the FloatingIP object from DigitalOcean. Requires self.ip to be set.
def disableTap(self): if self._tapFileIn is not None: self._tapFileIn.close() self._tapFileIn = None if self._tapFileOut is not None: self._tapFileOut.close() self._tapFileOut = None
Disable writing of output tap files.
async def await_event(self, event=None, timeout=30): return await self._protocol.await_event(event, timeout=timeout)
Wait for an event from QTM. :param event: A :class:`qtm.QRTEvent` to wait for a specific event. Otherwise wait for any event. :param timeout: Max time to wait for event. :rtype: A :class:`qtm.QRTEvent`
def _make_celery_app(config): config.registry.celery_app.conf['pyramid_config'] = config return config.registry.celery_app
This exposes the celery app. The app is actually created as part of the configuration. However, this does make the celery app functional as a stand-alone celery application. This puts the pyramid configuration object on the celery app to be used for making the registry available to tasks running inside the celery worker process pool. See ``CustomTask.__call__``.
def print_state(self): def tile_string(value): if value > 0: return '% 5d' % (2 ** value,) return " " separator_line = '-' * 25 print(separator_line) for row in range(4): print("|" + "|".join([tile_string(v) for v in self._state[row, :]]) + "|") print(separator_line)
Prints the current state.
def remove_extension(module, name, code): key = (module, name) if (_extension_registry.get(key) != code or _inverted_registry.get(code) != key): raise ValueError("key %s is not registered with code %s" % (key, code)) del _extension_registry[key] del _inverted_registry[code] if code in _extension_cache: del _extension_cache[code]
Unregister an extension code. For testing only.
def parse_uri(self, raw_uri, recursive): if recursive: raw_uri = directory_fmt(raw_uri) file_provider = self.parse_file_provider(raw_uri) self._validate_paths_or_fail(raw_uri, recursive) uri, docker_uri = self.rewrite_uris(raw_uri, file_provider) uri_parts = job_model.UriParts( directory_fmt(os.path.dirname(uri)), os.path.basename(uri)) return docker_uri, uri_parts, file_provider
Return a valid docker_path, uri, and file provider from a flag value.
def _MergeMessageField(self, tokenizer, message, field): is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: sub_message = field.message_type._concrete_class() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems.
def _c2x(self, c): return 0.5 * (self.window[0] + self.window[1] + c * (self.window[1] - self.window[0]))
Convert cheb coordinates to windowdow coordinates
def indexOf(a, b): "Return the first index of b in a." for i, j in enumerate(a): if j == b: return i else: raise ValueError('sequence.index(x): x not in sequence')
Return the first index of b in a.
def pixel_wise_softmax(x, name='pixel_wise_softmax'): with tf.name_scope(name): return tf.nn.softmax(x)
Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. Usually be used for image segmentation. Parameters ---------- x : Tensor input. - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2. - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2. name : str function name (optional) Returns ------- Tensor A ``Tensor`` in the same type as ``x``. Examples -------- >>> outputs = pixel_wise_softmax(network.outputs) >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) References ---------- - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`__
def rename(self, new_name): return self.get_data( "images/%s" % self.id, type=PUT, params={"name": new_name} )
Rename an image
def extract_scheduler_location(self, topology): schedulerLocation = { "name": None, "http_endpoint": None, "job_page_link": None, } if topology.scheduler_location: schedulerLocation["name"] = topology.scheduler_location.topology_name schedulerLocation["http_endpoint"] = topology.scheduler_location.http_endpoint schedulerLocation["job_page_link"] = \ topology.scheduler_location.job_page_link[0] \ if len(topology.scheduler_location.job_page_link) > 0 else "" return schedulerLocation
Returns the representation of scheduler location that will be returned from Tracker.
def value_from_datadict(self, *args, **kwargs): value = super(RichTextWidget, self).value_from_datadict( *args, **kwargs) if value is not None: value = self.get_sanitizer()(value) return value
Pass the submitted value through the sanitizer before returning it.
def prune_unused_metabolites(cobra_model): output_model = cobra_model.copy() inactive_metabolites = [m for m in output_model.metabolites if len(m.reactions) == 0] output_model.remove_metabolites(inactive_metabolites) return output_model, inactive_metabolites
Remove metabolites that are not involved in any reactions and returns pruned model Parameters ---------- cobra_model: class:`~cobra.core.Model.Model` object the model to remove unused metabolites from Returns ------- output_model: class:`~cobra.core.Model.Model` object input model with unused metabolites removed inactive_metabolites: list of class:`~cobra.core.reaction.Reaction` list of metabolites that were removed
def auth_properties(self): props = dict(self.settings["extra_auth_properties"]) if self.transport: props.update(self.transport.auth_properties) props["local-jid"] = self.me props["service-type"] = "xmpp" return props
Authentication properties of the stream. Derived from the transport with 'local-jid' and 'service-type' added.
def is_partly_within_image(self, image): return not self.is_out_of_image(image, fully=True, partly=False)
Estimate whether the polygon is at least partially inside the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ndarray, its shape will be used. If a tuple, it is assumed to represent the image shape and must contain at least two integers. Returns ------- bool True if the polygon is at least partially inside the image area. False otherwise.
def send(self, str, end='\n'): return self._process.stdin.write(str+end)
Sends a line to std_in.
def get(self): if self.is_shutdown(): return None while len(self._states) == 0: if self.running == 0: return None if self.is_shutdown(): return None logger.debug("Waiting for available states") self._lock.wait() state_id = self._policy.choice(list(self._states)) if state_id is None: return None del self._states[self._states.index(state_id)] return state_id
Dequeue a state with the max priority
def setup_ipython(self): if self.is_ipysetup: return from ipykernel.kernelapp import IPKernelApp self.shell = IPKernelApp.instance().shell if not self.shell and is_ipython(): self.shell = get_ipython() if self.shell: shell_class = type(self.shell) shell_class.default_showtraceback = shell_class.showtraceback shell_class.showtraceback = custom_showtraceback self.is_ipysetup = True else: raise RuntimeError("IPython shell not found.")
Monkey patch shell's error handler. This method is to monkey-patch the showtraceback method of IPython's InteractiveShell to __IPYTHON__ is not detected when starting an IPython kernel, so this method is called from start_kernel in spyder-modelx.
def install(self): if not self.is_valid: raise PolyaxonDeploymentConfigError( 'Deployment type `{}` not supported'.format(self.deployment_type)) if self.is_kubernetes: self.install_on_kubernetes() elif self.is_docker_compose: self.install_on_docker_compose() elif self.is_docker: self.install_on_docker() elif self.is_heroku: self.install_on_heroku()
Install polyaxon using the current config to the correct platform.
def do_run_1(self): while not self.check_terminate(): self._has_run = True self._run1() self._num_iter += 1; self._inner_run_counter += 1
LM run, evaluating 1 step at a time. Broyden or eigendirection updates replace full-J updates until a full-J update occurs. Does not run with the calculated J (no internal run).
def unmap_memory_callback(self, start, size): logger.info(f"Unmapping memory from {hex(start)} to {hex(start + size)}") mask = (1 << 12) - 1 if (start & mask) != 0: logger.error("Memory to be unmapped is not aligned to a page") if (size & mask) != 0: size = ((size >> 12) + 1) << 12 logger.warning("Forcing unmap size to align to a page") self._emu.mem_unmap(start, size)
Unmap Unicorn maps when Manticore unmaps them
def curl(self, url, post): try: req = urllib2.Request(url) req.add_header("Content-type", "application/xml") data = urllib2.urlopen(req, post.encode('utf-8')).read() except urllib2.URLError, v: raise AmbientSMSError(v) return dictFromXml(data)
Inteface for sending web requests to the AmbientSMS API Server
def get_role_name(region, account_id, role): prefix = ARN_PREFIXES.get(region, 'aws') return 'arn:{0}:iam::{1}:role/{2}'.format(prefix, account_id, role)
Shortcut to insert the `account_id` and `role` into the iam string.
def set_pkg_desc(self, doc, text): self.assert_package_exists() if not self.package_desc_set: self.package_desc_set = True doc.package.description = text else: raise CardinalityError('Package::Description')
Set's the package's description. Raises CardinalityError if description already set. Raises OrderError if no package previously defined.
def push(self, message): if self._ignore_event(message): return None, None args = self._parse_message(message) self.log.debug("Searching for command using chunks: %s", args) cmd, msg_args = self._find_longest_prefix_command(args) if cmd is not None: if message.user is None: self.log.debug("Discarded message with no originating user: %s", message) return None, None sender = message.user.username if message.channel is not None: sender = " self.log.info("Received from %s: %s, args %s", sender, cmd, msg_args) f = self._get_command(cmd, message.user) if f: if self._is_channel_ignored(f, message.channel): self.log.info("Channel %s is ignored, discarding command %s", message.channel, cmd) return '_ignored_', "" return cmd, f.execute(message, msg_args) return '_unauthorized_', "Sorry, you are not authorized to run %s" % cmd return None, None
Takes a SlackEvent, parses it for a command, and runs against registered plugin
def show_image(setter, width, height, image_path='', image_obj=None, offset=(0, 0), bgcolor=COLORS.Off, brightness=255): bgcolor = color_scale(bgcolor, brightness) img = image_obj if image_path and not img: from PIL import Image img = Image.open(image_path) elif not img: raise ValueError('Must provide either image_path or image_obj') w = min(width - offset[0], img.size[0]) h = min(height - offset[1], img.size[1]) ox = offset[0] oy = offset[1] for x in range(ox, w + ox): for y in range(oy, h + oy): r, g, b, a = (0, 0, 0, 255) rgba = img.getpixel((x - ox, y - oy)) if isinstance(rgba, int): raise ValueError('Image must be in RGB or RGBA format!') if len(rgba) == 3: r, g, b = rgba elif len(rgba) == 4: r, g, b, a = rgba else: raise ValueError('Image must be in RGB or RGBA format!') if a == 0: r, g, b = bgcolor else: r, g, b = color_scale((r, g, b), a) if brightness != 255: r, g, b = color_scale((r, g, b), brightness) setter(x, y, (r, g, b))
Display an image on a matrix.
def __validateExperimentControl(self, control): taskList = control.get('tasks', None) if taskList is not None: taskLabelsList = [] for task in taskList: validateOpfJsonValue(task, "opfTaskSchema.json") validateOpfJsonValue(task['taskControl'], "opfTaskControlSchema.json") taskLabel = task['taskLabel'] assert isinstance(taskLabel, types.StringTypes), \ "taskLabel type: %r" % type(taskLabel) assert len(taskLabel) > 0, "empty string taskLabel not is allowed" taskLabelsList.append(taskLabel.lower()) taskLabelDuplicates = filter(lambda x: taskLabelsList.count(x) > 1, taskLabelsList) assert len(taskLabelDuplicates) == 0, \ "Duplcate task labels are not allowed: %s" % taskLabelDuplicates return
Validates control dictionary for the experiment context
def list_move_to_front(l,value='other'): l=list(l) if value in l: l.remove(value) l.insert(0,value) return l
if the value is in the list, move it to the front and return it.
def lookup_stdout(self, pk=None, start_line=None, end_line=None, full=True): uj_res = get_resource('unified_job') query_params = (('unified_job_node__workflow_job', pk), ('order_by', 'finished'), ('status__in', 'successful,failed,error')) jobs_list = uj_res.list(all_pages=True, query=query_params) if jobs_list['count'] == 0: return '' return_content = ResSubcommand(uj_res)._format_human(jobs_list) lines = return_content.split('\n') if not full: lines = lines[:-1] N = len(lines) start_range = start_line if start_line is None: start_range = 0 elif start_line > N: start_range = N end_range = end_line if end_line is None or end_line > N: end_range = N lines = lines[start_range:end_range] return_content = '\n'.join(lines) if len(lines) > 0: return_content += '\n' return return_content
Internal method that lies to our `monitor` method by returning a scorecard for the workflow job where the standard out would have been expected.
def getbalance(self, url='http://services.ambientmobile.co.za/credits'): postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) if result.get("credits", None): return result["credits"] else: raise AmbientSMSError(result["status"])
Get the number of credits remaining at AmbientSMS