code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def fit(self, X, y=None): n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals[:, None] self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T) return self
Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part.
def build_twisted_request(self, method, url, extra_headers={}, body_producer=None, full_url=False): uri = url if full_url else self._url(url) raw_headers = self.get_headers() if extra_headers: raw_headers.update(extra_headers) headers = http_headers.Headers() for header in raw_headers: headers.addRawHeader(header, raw_headers[header]) agent = client.Agent(reactor) request = agent.request(method, uri, headers, body_producer) return (reactor, request)
Build a request for twisted Args: method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None url (str): Destination URL (full, or relative) Kwargs: extra_headers (dict): Headers (override default connection headers, if any) body_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body full_url (bool): If False, URL is relative Returns: tuple. Tuple with two elements: reactor, and request
def add_item(self, key, value, after=False, index=None, pos_key=None, replace=True): if self._validate_fn: self._validate_fn(value) if (index is not None) and (pos_key is not None): raise ValueError('Either specify index or pos_key, not both.') elif pos_key is not None: try: index = self.index(pos_key) except ValueError: raise KeyError('%r not found' % pos_key) if after and (index is not None): index += 1 if key in self._values: if not replace: raise KeyError('%r is duplicate' % key) if index is not None: del self[key] else: self._values[key] = value return if index is not None: self._order.insert(index, key) else: self._order.append(key) self._values[key] = value
Add an item at a specific location, possibly replacing the existing item. If after is True, we insert *after* the given index, otherwise we insert before. The position is specified using either index or pos_key, the former specifies the position from the start of the array (base 0). pos_key specifies the name of another key, and positions the new key relative to that key. When replacing, the position will be left un-changed unless a location is specified explicitly.
def put_multiple(self, task_args_kwargs_list): if not self.isopen: logger = logging.getLogger(__name__) logger.warning('the drop box is not open') return packages = [ ] for t in task_args_kwargs_list: try: task = t['task'] args = t.get('args', ()) kwargs = t.get('kwargs', {}) package = TaskPackage(task=task, args=args, kwargs=kwargs) except TypeError: package = TaskPackage(task=t, args=(), kwargs={}) packages.append(package) return self.dropbox.put_multiple(packages)
put a list of tasks and their arguments This method can be used to put multiple tasks at once. Calling this method once with multiple tasks can be much faster than calling `put()` multiple times. Parameters ---------- task_args_kwargs_list : list A list of lists with three items that can be parameters of `put()`, i.e., `task`, `args`, `kwargs`. Returns ------- list A list of task IDs.
def cmd_add(opts): config = load_config(opts.config) b = get_blockade(config, opts) b.add_container(opts.containers)
Add one or more existing Docker containers to a Blockade group
def print_exception(etype, value, tb, limit=None, file=None): if file is None: file = open('/dev/stderr', 'w') if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) lines = format_exception_only(etype, value) for line in lines: _print(file, line, '')
Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error.
def calculate_concordance_by_annotation(graph, annotation, key, cutoff=None): return { value: calculate_concordance(subgraph, key, cutoff=cutoff) for value, subgraph in get_subgraphs_by_annotation(graph, annotation).items() }
Returns the concordance scores for each stratified graph based on the given annotation :param pybel.BELGraph graph: A BEL graph :param str annotation: The annotation to group by. :param str key: The node data dictionary key storing the logFC :param float cutoff: The optional logFC cutoff for significance :rtype: dict[str,tuple]
def Newline(loc=None): @llrule(loc, lambda parser: ["newline"]) def rule(parser): result = parser._accept("newline") if result is unmatched: return result return [] return rule
A rule that accepts token of kind ``newline`` and returns an empty list.
def FPE(N,rho, k=None): r fpe = rho * (N + k + 1.) / (N- k -1) return fpe
r"""Final prediction error criterion .. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k :validation: double checked versus octave.
def set_sim_params(self, nparams, attr_params): for name, value in nparams.items(): val = value[0] if value[0] is not None else 'none' self.h5file.create_array('/parameters', name, obj=val, title=value[1]) for name, value in attr_params.items(): self.h5file.set_node_attr('/parameters', name, value)
Store parameters in `params` in `h5file.root.parameters`. `nparams` (dict) A dict as returned by `get_params()` in `ParticlesSimulation()` The format is: keys: used as parameter name values: (2-elements tuple) first element is the parameter value second element is a string used as "title" (description) `attr_params` (dict) A dict whole items are stored as attributes in '/parameters'
def tf_loss(self, states, internals, reward, update, reference=None): prediction = self.predict(states=states, internals=internals, update=update) return tf.nn.l2_loss(t=(prediction - reward))
Creates the TensorFlow operations for calculating the L2 loss between predicted state values and actual rewards. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss tensor
def apt_key_exists(keyid): gpg_cmd = 'gpg --ignore-time-conflict --no-options --no-default-keyring --keyring /etc/apt/trusted.gpg' with settings(hide('everything'), warn_only=True): res = run('%(gpg_cmd)s --fingerprint %(keyid)s' % locals()) return res.succeeded
Check if the given key id exists in apt keyring.
def add_missing_particles(st, rad='calc', tries=50, **kwargs): if rad == 'calc': rad = guess_add_radii(st) guess, npart = feature_guess(st, rad, **kwargs) tries = np.min([tries, npart]) accepts, new_poses = check_add_particles( st, guess[:tries], rad=rad, **kwargs) return accepts, new_poses
Attempts to add missing particles to the state. Operates by: (1) featuring the difference image using feature_guess, (2) attempting to add the featured positions using check_add_particles. Parameters ---------- st : :class:`peri.states.State` The state to check adding particles to. rad : Float or 'calc', optional The radius of the newly-added particles and of the feature size for featuring. Default is 'calc', which uses the median of the state's current radii. tries : Int, optional How many particles to attempt to add. Only tries to add the first ``tries`` particles, in order of mass. Default is 50. Other Parameters ---------------- invert : Bool, optional Whether to invert the image. Default is ``True``, i.e. dark particles minmass : Float or None, optionals The minimum mass/masscut of a particle. Default is ``None``=calcualted by ``feature_guess``. use_tp : Bool, optional Whether to use trackpy in feature_guess. Default is False, since trackpy cuts out particles at the edge. do_opt : Bool, optional Whether to optimize the particle position before checking if it should be kept. Default is True (optimizes position). im_change_frac : Float, optional How good the change in error needs to be relative to the change in the difference image. Default is 0.2; i.e. if the error does not decrease by 20% of the change in the difference image, do not add the particle. min_derr : Float or '3sig', optional The minimal improvement in error to add a particle. Default is ``'3sig' = 3*st.sigma``. Returns ------- accepts : Int The number of added particles new_poses : [N,3] list List of the positions of the added particles. If ``do_opt==True``, then these positions will differ from the input 'guess'.
def reviews(self, packageName, filterByDevice=False, sort=2, nb_results=None, offset=None): path = REVIEWS_URL + "?doc={}&sort={}".format(requests.utils.quote(packageName), sort) if nb_results is not None: path += "&n={}".format(nb_results) if offset is not None: path += "&o={}".format(offset) if filterByDevice: path += "&dfil=1" data = self.executeRequestApi2(path) output = [] for review in data.payload.reviewResponse.getResponse.review: output.append(utils.parseProtobufObj(review)) return output
Browse reviews for an application Args: packageName (str): app unique ID. filterByDevice (bool): filter results for current device sort (int): sorting criteria (values are unknown) nb_results (int): max number of reviews to return offset (int): return reviews starting from an offset value Returns: dict object containing all the protobuf data returned from the api
def gce_list_aggregated(service=None, key_name='name', **kwargs): resp_list = [] req = service.aggregatedList(**kwargs) while req is not None: resp = req.execute() for location, item in resp['items'].items(): if key_name in item: resp_list.extend(item[key_name]) req = service.aggregatedList_next(previous_request=req, previous_response=resp) return resp_list
General aggregated list function for the GCE service.
def _epd_residual2(coeffs, times, mags, errs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd): f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd) residual = mags - f return residual
This is the residual function to minimize using scipy.optimize.least_squares. This variant is for :py:func:`.epd_magseries_extparams`.
def ddel_tasks(provider, user_ids=None, job_ids=None, task_ids=None, labels=None, create_time_min=None, create_time_max=None): deleted_tasks, error_messages = provider.delete_jobs( user_ids, job_ids, task_ids, labels, create_time_min, create_time_max) for msg in error_messages: print(msg) return deleted_tasks
Kill jobs or job tasks. This function separates ddel logic from flag parsing and user output. Users of ddel who intend to access the data programmatically should use this. Args: provider: an instantiated dsub provider. user_ids: a set of user ids who "own" the job(s) to delete. job_ids: a set of job ids to delete. task_ids: a set of task ids to delete. labels: a set of LabelParam, each must match the job(s) to be cancelled. create_time_min: a timezone-aware datetime value for the earliest create time of a task, inclusive. create_time_max: a timezone-aware datetime value for the most recent create time of a task, inclusive. Returns: list of job ids which were deleted.
def clique(graph, id): clique = [id] for n in graph.nodes: friend = True for id in clique: if n.id == id or graph.edge(n.id, id) == None: friend = False break if friend: clique.append(n.id) return clique
Returns the largest possible clique for the node with given id.
def save(self): repoInfoPath = os.path.join(self.__path, ".pyrepinfo") try: fdinfo = open(repoInfoPath, 'wb') except Exception as e: raise Exception("unable to open repository info for saving (%s)"%e) try: pickle.dump( self, fdinfo, protocol=2 ) except Exception as e: fdinfo.flush() os.fsync(fdinfo.fileno()) fdinfo.close() raise Exception( "Unable to save repository info (%s)"%e ) finally: fdinfo.flush() os.fsync(fdinfo.fileno()) fdinfo.close() repoTimePath = os.path.join(self.__path, ".pyrepstate") try: self.__state = ("%.6f"%time.time()).encode() with open(repoTimePath, 'wb') as fdtime: fdtime.write( self.__state ) fdtime.flush() os.fsync(fdtime.fileno()) except Exception as e: raise Exception("unable to open repository time stamp for saving (%s)"%e)
Save repository .pyrepinfo to disk.
def grab(bbox=None, childprocess=None, backend=None): if childprocess is None: childprocess = childprocess_default_value() return _grab( to_file=False, childprocess=childprocess, backend=backend, bbox=bbox)
Copy the contents of the screen to PIL image memory. :param bbox: optional bounding box (x1,y1,x2,y2) :param childprocess: pyscreenshot can cause an error, if it is used on more different virtual displays and back-end is not in different process. Some back-ends are always different processes: scrot, imagemagick The default is False if the program was started inside IDLE, otherwise it is True. :param backend: back-end can be forced if set (examples:scrot, wx,..), otherwise back-end is automatic
def search_point(self, lat, lng, filters=None, startDate=None, endDate=None, types=None, type=None): searchAreaWkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (lng, lat,lng,lat,lng,lat,lng,lat,lng,lat) return self.search(searchAreaWkt=searchAreaWkt, filters=filters, startDate=startDate, endDate=endDate, types=types)
Perform a catalog search over a specific point, specified by lat,lng Args: lat: latitude lng: longitude filters: Array of filters. Optional. Example: [ "(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')", "cloudCover < 10", "offNadirAngle < 10" ] startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" types: Array of types to search for. Optional. Example (and default): ["Acquisition"] Returns: catalog search resultset
def _get_key_internal(self, *args, **kwargs): if args[1] is not None and 'force' in args[1]: key, res = super(Bucket, self)._get_key_internal(*args, **kwargs) if key: mimicdb.backend.sadd(tpl.bucket % self.name, key.name) mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"'))) return key, res key = None if mimicdb.backend.sismember(tpl.bucket % self.name, args[0]): key = Key(self) key.name = args[0] return key, None
Return None if key is not in the bucket set. Pass 'force' in the headers to check S3 for the key, and after fetching the key from S3, save the metadata and key to the bucket set.
def is_lower(self): if not isinstance(self.val, str_types): raise TypeError('val is not a string') if len(self.val) == 0: raise ValueError('val is empty') if self.val != self.val.lower(): self._err('Expected <%s> to contain only lowercase chars, but did not.' % self.val) return self
Asserts that val is non-empty string and all characters are lowercase.
def copy(src, dst): (szip, dzip) = (src.endswith(".zip"), dst.endswith(".zip")) logging.info("Copy: %s => %s"%(src, dst)) if szip and dzip: shutil.copy2(src, dst) elif szip: with zipfile.ZipFile(src, mode='r') as z: tmpdir = tempfile.mkdtemp() try: z.extractall(tmpdir) if len(z.namelist()) != 1: raise RuntimeError("The zip file '%s' should only have one "\ "compressed file"%src) tmpfile = join(tmpdir,z.namelist()[0]) try: os.remove(dst) except OSError: pass shutil.move(tmpfile, dst) finally: shutil.rmtree(tmpdir, ignore_errors=True) elif dzip: with zipfile.ZipFile(dst, mode='w', compression=ZIP_DEFLATED) as z: z.write(src, arcname=basename(src)) else: shutil.copy2(src, dst)
File copy that support compress and decompress of zip files
async def receive_json(self, content, **kwargs): if isinstance(content, dict) and "stream" in content and "payload" in content: steam_name = content["stream"] payload = content["payload"] if steam_name not in self.applications_accepting_frames: raise ValueError("Invalid multiplexed frame received (stream not mapped)") await self.send_upstream( message={ "type": "websocket.receive", "text": await self.encode_json(payload) }, stream_name=steam_name ) return else: raise ValueError("Invalid multiplexed **frame received (no channel/payload key)")
Rout the message down the correct stream.
def leave(self): if self.joined: p=MucPresence(to_jid=self.room_jid,stanza_type="unavailable") self.manager.stream.send(p)
Send a leave request for the room.
def pretty_eta(seconds_left): minutes_left = seconds_left // 60 seconds_left %= 60 hours_left = minutes_left // 60 minutes_left %= 60 days_left = hours_left // 24 hours_left %= 24 def helper(cnt, name): return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else '')) if days_left > 0: msg = helper(days_left, 'day') if hours_left > 0: msg += ' and ' + helper(hours_left, 'hour') return msg if hours_left > 0: msg = helper(hours_left, 'hour') if minutes_left > 0: msg += ' and ' + helper(minutes_left, 'minute') return msg if minutes_left > 0: return helper(minutes_left, 'minute') return 'less than a minute'
Print the number of seconds in human readable format. Examples: 2 days 2 hours and 37 minutes less than a minute Paramters --------- seconds_left: int Number of seconds to be converted to the ETA Returns ------- eta: str String representing the pretty ETA.
def backends(cls): allowed = ( keyring for keyring in filter(backend._limit, backend.get_all_keyring()) if not isinstance(keyring, ChainerBackend) and keyring.priority > 0 ) return sorted(allowed, key=backend.by_priority, reverse=True)
Discover all keyrings for chaining.
def neighbors_from_pixelization(self, pixels, ridge_points): return pixelization_util.voronoi_neighbors_from_pixels_and_ridge_points(pixels=pixels, ridge_points=np.asarray(ridge_points))
Compute the neighbors of every Voronoi pixel as an ndarray of the pixel index's each pixel shares a \ vertex with. The ridge points of the Voronoi grid are used to derive this. Parameters ---------- ridge_points : scipy.spatial.Voronoi.ridge_points Each Voronoi-ridge (two indexes representing a pixel mapping_matrix).
def verify_authority(self): try: if not self.blockchain.rpc.verify_authority(self.json()): raise InsufficientAuthorityError except Exception as e: raise e
Verify the authority of the signed transaction
def _prompt_for_values(d): for key, value in d.items(): if isinstance(value, CommentedMap): _prompt_for_values(value) elif isinstance(value, list): for item in value: _prompt_for_values(item) else: typ = type(value) if isinstance(value, ScalarFloat): typ = float new_value = click.prompt(key, type=typ, default=value) d[key] = new_value return d
Update the descriptive metadata interactively. Uses values entered by the user. Note that the function keeps recursing whenever a value is another ``CommentedMap`` or a ``list``. The function works as passing dictionaries and lists into a function edits the values in place.
def echo(self, gain_in=0.8, gain_out=0.9, n_echos=1, delays=[60], decays=[0.4]): if not is_number(gain_in) or gain_in <= 0 or gain_in > 1: raise ValueError("gain_in must be a number between 0 and 1.") if not is_number(gain_out) or gain_out <= 0 or gain_out > 1: raise ValueError("gain_out must be a number between 0 and 1.") if not isinstance(n_echos, int) or n_echos <= 0: raise ValueError("n_echos must be a positive integer.") if not isinstance(delays, list): raise ValueError("delays must be a list") if len(delays) != n_echos: raise ValueError("the length of delays must equal n_echos") if any((not is_number(p) or p <= 0) for p in delays): raise ValueError("the elements of delays must be numbers > 0") if not isinstance(decays, list): raise ValueError("decays must be a list") if len(decays) != n_echos: raise ValueError("the length of decays must equal n_echos") if any((not is_number(p) or p <= 0 or p > 1) for p in decays): raise ValueError( "the elements of decays must be between 0 and 1" ) effect_args = ['echo', '{:f}'.format(gain_in), '{:f}'.format(gain_out)] for i in range(n_echos): effect_args.extend([ '{}'.format(delays[i]), '{}'.format(decays[i]) ]) self.effects.extend(effect_args) self.effects_log.append('echo') return self
Add echoing to the audio. Echoes are reflected sound and can occur naturally amongst mountains (and sometimes large buildings) when talking or shouting; digital echo effects emulate this behav- iour and are often used to help fill out the sound of a single instrument or vocal. The time differ- ence between the original signal and the reflection is the 'delay' (time), and the loudness of the reflected signal is the 'decay'. Multiple echoes can have different delays and decays. Parameters ---------- gain_in : float, default=0.8 Input volume, between 0 and 1 gain_out : float, default=0.9 Output volume, between 0 and 1 n_echos : int, default=1 Number of reflections delays : list, default=[60] List of delays in miliseconds decays : list, default=[0.4] List of decays, relative to gain in between 0 and 1 See Also -------- echos, reverb, chorus
def list(self, all_pages=False, **kwargs): self._separate(kwargs) return super(Resource, self).list(all_pages=all_pages, **kwargs)
Return a list of notification templates. Note here configuration-related fields like 'notification_configuration' and 'channels' will not be used even provided. If one or more filters are provided through keyword arguments, filter the results accordingly. If no filters are provided, return all results. =====API DOCS===== Retrieve a list of objects. :param all_pages: Flag that if set, collect all pages of content from the API when returning results. :type all_pages: bool :param page: The page to show. Ignored if all_pages is set. :type page: int :param query: Contains 2-tuples used as query parameters to filter resulting resource objects. :type query: list :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects. :returns: A JSON object containing details of all resource objects returned by Tower backend. :rtype: dict =====API DOCS=====
def load_npy_to_any(path='', name='file.npy'): file_path = os.path.join(path, name) try: return np.load(file_path).item() except Exception: return np.load(file_path) raise Exception("[!] Fail to load %s" % file_path)
Load `.npy` file. Parameters ------------ path : str Path to the file (optional). name : str File name. Examples --------- - see tl.files.save_any_to_npy()
def _saliency_map(self, a, image, target, labels, mask, fast=False): alphas = a.gradient(image, target) * mask if fast: betas = -np.ones_like(alphas) else: betas = np.sum([ a.gradient(image, label) * mask - alphas for label in labels], 0) salmap = np.abs(alphas) * np.abs(betas) * np.sign(alphas * betas) idx = np.argmin(salmap) idx = np.unravel_index(idx, mask.shape) pix_sign = np.sign(alphas)[idx] return idx, pix_sign
Implements Algorithm 3 in manuscript
def atomic_write(filename): f = _tempfile(os.fsencode(filename)) try: yield f finally: f.close() os.replace(f.name, filename)
Open a NamedTemoraryFile handle in a context manager
def list_trilegal_filtersystems(): print('%-40s %s' % ('FILTER SYSTEM NAME','DESCRIPTION')) print('%-40s %s' % ('------------------','-----------')) for key in sorted(TRILEGAL_FILTER_SYSTEMS.keys()): print('%-40s %s' % (key, TRILEGAL_FILTER_SYSTEMS[key]['desc']))
This just lists all the filter systems available for TRILEGAL.
def convert_concat(params, w_name, scope_name, inputs, layers, weights, names): print('Converting concat ...') concat_nodes = [layers[i] for i in inputs] if len(concat_nodes) == 1: layers[scope_name] = concat_nodes[0] return if names == 'short': tf_name = 'CAT' + random_string(5) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) cat = keras.layers.Concatenate(name=tf_name, axis=params['axis']) layers[scope_name] = cat(concat_nodes)
Convert concatenation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def free(self): if not self.borrowed: self.xmlnode.unlinkNode() self.xmlnode.freeNode() self.xmlnode=None
Unlink and free the XML node owned by `self`.
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False): hsv = rgb_to_hsv(im) if is_random: hout = np.random.uniform(-hout, hout) if is_offset: hsv[..., 0] += hout else: hsv[..., 0] = hout if is_clip: hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) rgb = hsv_to_rgb(hsv) return rgb
Adjust hue of an RGB image. This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type. For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__. Parameters ----------- im : numpy.array An image with values between 0 and 255. hout : float The scale value for adjusting hue. - If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue. - If is_offset is True, add this value as the offset to the hue channel. is_offset : boolean Whether `hout` is added on HSV as offset or not. Default is True. is_clip : boolean If HSV value smaller than 0, set to 0. Default is True. is_random : boolean If True, randomly change hue. Default is False. Returns ------- numpy.array A processed image. Examples --------- Random, add a random value between -0.2 and 0.2 as the offset to every hue values. >>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False) Non-random, make all hue to green. >>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False) References ----------- - `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__. - `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__. - `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
def intersection(self, other, default=None): x1_i = max(self.x1, other.x1) y1_i = max(self.y1, other.y1) x2_i = min(self.x2, other.x2) y2_i = min(self.y2, other.y2) if x1_i > x2_i or y1_i > y2_i: return default else: return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
Compute the intersection bounding box of this bounding box and another one. Note that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box will exist, but then also has a height and width of zero. Parameters ---------- other : imgaug.BoundingBox Other bounding box with which to generate the intersection. default : any, optional Default value to return if there is no intersection. Returns ------- imgaug.BoundingBox or any Intersection bounding box of the two bounding boxes if there is an intersection. If there is no intersection, the default value will be returned, which can by anything.
def pickByDistribution(distribution, r=None): if r is None: r = random x = r.uniform(0, sum(distribution)) for i, d in enumerate(distribution): if x <= d: return i x -= d
Pick a value according to the provided distribution. Example: :: pickByDistribution([.2, .1]) Returns 0 two thirds of the time and 1 one third of the time. :param distribution: Probability distribution. Need not be normalized. :param r: Instance of random.Random. Uses the system instance if one is not provided.
def prep(ctx, commit=True): cfg = config.load() scm = scm_provider(cfg.project_root, commit=commit, ctx=ctx) if not scm.workdir_is_clean(): notify.failure("You have uncommitted changes, please commit or stash them!") setup_cfg = cfg.rootjoin('setup.cfg') if os.path.exists(setup_cfg): with io.open(setup_cfg, encoding='utf-8') as handle: data = handle.readlines() changed = False for i, line in enumerate(data): if any(line.startswith(i) for i in ('tag_build', 'tag_date')): data[i] = ' changed = True if changed and commit: notify.info("Rewriting 'setup.cfg'...") with io.open(setup_cfg, 'w', encoding='utf-8') as handle: handle.write(''.join(data)) scm.add_file('setup.cfg') elif changed: notify.warning("WOULD rewrite 'setup.cfg', but --no-commit was passed") else: notify.warning("Cannot rewrite 'setup.cfg', none found!") ctx.run('python setup.py -q develop -U') version = capture('python setup.py --version') ctx.run('invoke clean --all build --docs release.dist') for distfile in os.listdir('dist'): trailer = distfile.split('-' + version)[1] trailer, _ = os.path.splitext(trailer) if trailer and trailer[0] not in '.-': notify.failure("The version found in 'dist' seems to be" " a pre-release one! [{}{}]".format(version, trailer)) scm.commit(ctx.rituals.release.commit.message.format(version=version)) scm.tag(ctx.rituals.release.tag.name.format(version=version), ctx.rituals.release.tag.message.format(version=version))
Prepare for a release.
def getTemporalDelay(inferenceElement, key=None): if inferenceElement in (InferenceElement.prediction, InferenceElement.encodings): return 1 if inferenceElement in (InferenceElement.anomalyScore, InferenceElement.anomalyLabel, InferenceElement.classification, InferenceElement.classConfidences): return 0 if inferenceElement in (InferenceElement.multiStepPredictions, InferenceElement.multiStepBestPredictions): return int(key) return 0
Returns the number of records that elapse between when an inference is made and when the corresponding input record will appear. For example, a multistep prediction for 3 timesteps out will have a delay of 3 Parameters: ----------------------------------------------------------------------- inferenceElement: The InferenceElement value being delayed key: If the inference is a dictionary type, this specifies key for the sub-inference that is being delayed
def flip(self, axis=HORIZONTAL): if axis == HORIZONTAL: self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT) if axis == VERTICAL: self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM)
Flips the layer, either HORIZONTAL or VERTICAL.
def launchapp(self, cmd, args=[], delay=0, env=1, lang="C"): try: atomac.NativeUIElement.launchAppByBundleId(cmd) return 1 except RuntimeError: if atomac.NativeUIElement.launchAppByBundlePath(cmd, args): try: time.sleep(int(delay)) except ValueError: time.sleep(5) return 1 else: raise LdtpServerException(u"Unable to find app '%s'" % cmd)
Launch application. @param cmd: Command line string to execute. @type cmd: string @param args: Arguments to the application @type args: list @param delay: Delay after the application is launched @type delay: int @param env: GNOME accessibility environment to be set or not @type env: int @param lang: Application language to be used @type lang: string @return: 1 on success @rtype: integer @raise LdtpServerException: When command fails
def from_spec(spec): exploration = util.get_object( obj=spec, predefined_objects=tensorforce.core.explorations.explorations ) assert isinstance(exploration, Exploration) return exploration
Creates an exploration object from a specification dict.
def set_physical_plan(self, physical_plan): if not physical_plan: self.physical_plan = None self.id = None else: self.physical_plan = physical_plan self.id = physical_plan.topology.id self.trigger_watches()
set physical plan
def tempo(self, factor, audio_type=None, quick=False): if not is_number(factor) or factor <= 0: raise ValueError("factor must be a positive number") if factor < 0.5 or factor > 2: logger.warning( "Using an extreme time stretching factor. " "Quality of results will be poor" ) if abs(factor - 1.0) <= 0.1: logger.warning( "For this stretch factor, " "the stretch effect has better performance." ) if audio_type not in [None, 'm', 's', 'l']: raise ValueError( "audio_type must be one of None, 'm', 's', or 'l'." ) if not isinstance(quick, bool): raise ValueError("quick must be a boolean.") effect_args = ['tempo'] if quick: effect_args.append('-q') if audio_type is not None: effect_args.append('-{}'.format(audio_type)) effect_args.append('{:f}'.format(factor)) self.effects.extend(effect_args) self.effects_log.append('tempo') return self
Time stretch audio without changing pitch. This effect uses the WSOLA algorithm. The audio is chopped up into segments which are then shifted in the time domain and overlapped (cross-faded) at points where their waveforms are most similar as determined by measurement of least squares. Parameters ---------- factor : float The ratio of new tempo to the old tempo. For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%. audio_type : str Type of audio, which optimizes algorithm parameters. One of: * m : Music, * s : Speech, * l : Linear (useful when factor is close to 1), quick : bool, default=False If True, this effect will run faster but with lower sound quality. See Also -------- stretch, speed, pitch
def load(self, reload=False, require_load=False): if reload: self.config = None if self.config: self._log.debug('Returning cached config instance. Use ' '``reload=True`` to avoid caching!') return path = self._effective_path() config_filename = self._effective_filename() self._active_path = [join(_, config_filename) for _ in path] for dirname in path: conf_name = join(dirname, config_filename) readable = self.check_file(conf_name) if readable: action = 'Updating' if self._loaded_files else 'Loading initial' self._log.info('%s config from %s', action, conf_name) self.read(conf_name) if conf_name == expanduser("~/.%s/%s/%s" % ( self.group_name, self.app_name, self.filename)): self._log.warning( "DEPRECATION WARNING: The file " "'%s/.%s/%s/app.ini' was loaded. The XDG " "Basedir standard requires this file to be in " "'%s/.config/%s/%s/app.ini'! This location " "will no longer be parsed in a future version of " "config_resolver! You can already (and should) move " "the file!", expanduser("~"), self.group_name, self.app_name, expanduser("~"), self.group_name, self.app_name) self._loaded_files.append(conf_name) if not self._loaded_files and not require_load: self._log.warning( "No config file named %s found! Search path was %r", config_filename, path) elif not self._loaded_files and require_load: raise IOError("No config file named %s found! Search path " "was %r" % (config_filename, path))
Searches for an appropriate config file. If found, loads the file into the current instance. This method can also be used to reload a configuration. Note that you may want to set ``reload`` to ``True`` to clear the configuration before loading in that case. Without doing that, values will remain available even if they have been removed from the config files. :param reload: if set to ``True``, the existing values are cleared before reloading. :param require_load: If set to ``True`` this will raise a :py:exc:`IOError` if no config file has been found to load.
def mock_request(): current_site = Site.objects.get_current() request = HttpRequest() request.META['SERVER_NAME'] = current_site.domain return request
Generate a fake request object to allow oEmbeds to use context processors.
def wait_for( self, timeout=10000, interval=1000, asserter=lambda x: x): if not callable(asserter): raise TypeError('Asserter must be callable.') @retry( retry_on_exception=lambda ex: isinstance(ex, WebDriverException), stop_max_delay=timeout, wait_fixed=interval ) def _wait_for(driver): asserter(driver) return driver return _wait_for(self)
Wait for driver till satisfy the given condition Support: Android iOS Web(WebView) Args: timeout(int): How long we should be retrying stuff. interval(int): How long between retries. asserter(callable): The asserter func to determine the result. Returns: Return the driver. Raises: WebDriverException.
def _raw_at_zoom(config, zooms): params_per_zoom = {} for zoom in zooms: params = {} for name, element in config.items(): if name not in _RESERVED_PARAMETERS: out_element = _element_at_zoom(name, element, zoom) if out_element is not None: params[name] = out_element params_per_zoom[zoom] = params return params_per_zoom
Return parameter dictionary per zoom level.
def _add_uninstall(self, context): contents = self._render_template('uninstall.sh', context) self.config.setdefault('files', []) self._add_unique_file({ "path": "/uninstall.sh", "contents": contents, "mode": "755" })
generates uninstall.sh and adds it to included files
def embed_code_links(app, exception): if exception is not None: return if not app.builder.config.plot_gallery: return if app.builder.name not in ['html', 'readthedocs']: return print('Embedding documentation hyperlinks in examples..') gallery_conf = app.config.sphinx_gallery_conf gallery_dirs = gallery_conf['gallery_dirs'] if not isinstance(gallery_dirs, list): gallery_dirs = [gallery_dirs] for gallery_dir in gallery_dirs: _embed_code_links(app, gallery_conf, gallery_dir)
Embed hyperlinks to documentation into example code
def SETGE(cpu, dest): dest.write(Operators.ITEBV(dest.size, cpu.SF == cpu.OF, 1, 0))
Sets byte if greater or equal. :param cpu: current CPU. :param dest: destination operand.
def _data_analysis(self, data_view_id): failure_message = "Error while retrieving data analysis for data view {}".format(data_view_id) return self._get_success_json(self._get(routes.data_analysis(data_view_id), failure_message=failure_message))
Data analysis endpoint. :param data_view_id: The model identifier (id number for data views) :type data_view_id: str :return: dictionary containing information about the data, e.g. dCorr and tsne
def call_method_with_acl(self, method_name, packet, *args): if not self.is_method_allowed(method_name): self.error('method_access_denied', 'You do not have access to method "%s"' % method_name) return return self.call_method(method_name, packet, *args)
You should always use this function to call the methods, as it checks if the user is allowed according to the ACLs. If you override :meth:`process_packet` or :meth:`process_event`, you should definitely want to use this instead of ``getattr(self, 'my_method')()``
def flag_inner_classes(obj): for tup in class_members(obj): tup[1]._parent = obj tup[1]._parent_inst = None tup[1].__getattr__ = my_getattr flag_inner_classes(tup[1])
Mutates any attributes on ``obj`` which are classes, with link to ``obj``. Adds a convenience accessor which instantiates ``obj`` and then calls its ``setup`` method. Recurses on those objects as well.
def proto_02_03_IVfast(abf=exampleABF): av1,sd1=swhlab.plot.IV(abf,.6,.9,True) swhlab.plot.save(abf,tag='iv1') Xs=abf.clampValues(.6) abf.saveThing([Xs,av1],'iv')
fast sweeps, 1 step per sweep, for clean IV without fast currents.
def calc_inbag(n_samples, forest): if not forest.bootstrap: e_s = "Cannot calculate the inbag from a forest that has " e_s = " bootstrap=False" raise ValueError(e_s) n_trees = forest.n_estimators inbag = np.zeros((n_samples, n_trees)) sample_idx = [] for t_idx in range(n_trees): sample_idx.append( _generate_sample_indices(forest.estimators_[t_idx].random_state, n_samples)) inbag[:, t_idx] = np.bincount(sample_idx[-1], minlength=n_samples) return inbag
Derive samples used to create trees in scikit-learn RandomForest objects. Recovers the samples in each tree from the random state of that tree using :func:`forest._generate_sample_indices`. Parameters ---------- n_samples : int The number of samples used to fit the scikit-learn RandomForest object. forest : RandomForest Regressor or Classifier object that is already fit by scikit-learn. Returns ------- Array that records how many times a data point was placed in a tree. Columns are individual trees. Rows are the number of times a sample was used in a tree.
def take_shas_of_all_files(G, settings): global ERROR_FN sprint = settings["sprint"] error = settings["error"] ERROR_FN = error sha_dict = {} all_files = [] for target in G.nodes(data=True): sprint("About to take shas of files in target '{}'".format(target[0]), level="verbose") if 'dependencies' in target[1]: sprint("It has dependencies", level="verbose") deplist = [] for dep in target[1]['dependencies']: glist = glob.glob(dep) if glist: for oneglob in glist: deplist.append(oneglob) else: deplist.append(dep) target[1]['dependencies'] = list(deplist) for dep in target[1]['dependencies']: sprint(" - {}".format(dep), level="verbose") all_files.append(dep) if 'output' in target[1]: sprint("It has outputs", level="verbose") for out in acts.get_all_outputs(target[1]): sprint(" - {}".format(out), level="verbose") all_files.append(out) if len(all_files): sha_dict['files'] = {} extant_files = [] for item in all_files: if item not in extant_files and os.path.isfile(item): extant_files.append(item) pool = Pool() results = pool.map(get_sha, extant_files) pool.close() pool.join() for fn, sha in zip(extant_files, results): sha_dict['files'][fn] = {'sha': sha} return sha_dict sprint("No dependencies", level="verbose")
Takes sha1 hash of all dependencies and outputs of all targets Args: The graph we are going to build The settings dictionary Returns: A dictionary where the keys are the filenames and the value is the sha1 hash
def pruneUI(dupeList, mainPos=1, mainLen=1): dupeList = sorted(dupeList) print for pos, val in enumerate(dupeList): print "%d) %s" % (pos + 1, val) while True: choice = raw_input("[%s/%s] Keepers: " % (mainPos, mainLen)).strip() if not choice: print ("Please enter a space/comma-separated list of numbers or " "'all'.") continue elif choice.lower() == 'all': return [] try: out = [int(x) - 1 for x in choice.replace(',', ' ').split()] return [val for pos, val in enumerate(dupeList) if pos not in out] except ValueError: print("Invalid choice. Please enter a space/comma-separated list" "of numbers or 'all'.")
Display a list of files and prompt for ones to be kept. The user may enter ``all`` or one or more numbers separated by spaces and/or commas. .. note:: It is impossible to accidentally choose to keep none of the displayed files. :param dupeList: A list duplicate file paths :param mainPos: Used to display "set X of Y" :param mainLen: Used to display "set X of Y" :type dupeList: :class:`~__builtins__.list` :type mainPos: :class:`~__builtins__.int` :type mainLen: :class:`~__builtins__.int` :returns: A list of files to be deleted. :rtype: :class:`~__builtins__.int`
def add_droplets(self, droplet): droplets = droplet if not isinstance(droplets, list): droplets = [droplet] resources = self.__extract_resources_from_droplets(droplets) if len(resources) > 0: return self.__add_resources(resources) return False
Add the Tag to a Droplet. Attributes accepted at creation time: droplet: array of string or array of int, or array of Droplets.
def type(self): robot_tables = [table for table in self.tables if not isinstance(table, UnknownTable)] if len(robot_tables) == 0: return None for table in self.tables: if isinstance(table, TestcaseTable): return "suite" return "resource"
Return 'suite' or 'resource' or None This will return 'suite' if a testcase table is found; It will return 'resource' if at least one robot table is found. If no tables are found it will return None
def rdkitmol_Hs(self): r if self.__rdkitmol_Hs: return self.__rdkitmol_Hs else: try: self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol) return self.__rdkitmol_Hs except: return None
r'''RDKit object of the chemical, with hydrogen. If RDKit is not available, holds None. For examples of what can be done with RDKit, see `their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
def record_conflict(self, assignment, var, val, delta): "Record conflicts caused by addition or deletion of a Queen." n = len(self.vars) self.rows[val] += delta self.downs[var + val] += delta self.ups[var - val + n - 1] += delta
Record conflicts caused by addition or deletion of a Queen.
def missing_particle(separation=0.0, radius=RADIUS, SNR=20): s = init.create_two_particle_state(imsize=6*radius+4, axis='x', sigma=1.0/SNR, delta=separation, radius=radius, stateargs={'varyn': True}, psfargs={'error': 1e-6}) s.obj.typ[1] = 0. s.reset() return s, s.obj.pos.copy()
create a two particle state and compare it to featuring using a single particle guess
def _create_element_list_(self): element_set = stoich.elements(self.compounds) return sorted(list(element_set))
Extract an alphabetically sorted list of elements from the compounds of the material. :returns: An alphabetically sorted list of elements.
def div_img(img1, div2): if is_img(div2): return img1.get_data()/div2.get_data() elif isinstance(div2, (float, int)): return img1.get_data()/div2 else: raise NotImplementedError('Cannot divide {}({}) by ' '{}({})'.format(type(img1), img1, type(div2), div2))
Pixelwise division or divide by a number
def create(self): data = { "name": self.name, "ip_address": self.ip_address, } domain = self.get_data("domains", type=POST, params=data) return domain
Create new doamin
def present(self, results): "Present the results as a list." for (score, d) in results: doc = self.documents[d] print ("%5.2f|%25s | %s" % (100 * score, doc.url, doc.title[:45].expandtabs()))
Present the results as a list.
def mmPrettyPrintConnections(self): text = "" text += ("Segments: (format => " "( text += "------------------------------------\n" columns = range(self.numberOfColumns()) for column in columns: cells = self.cellsForColumn(column) for cell in cells: segmentDict = dict() for seg in self.connections.segmentsForCell(cell): synapseList = [] for synapse in self.connections.synapsesForSegment(seg): synapseData = self.connections.dataForSynapse(synapse) synapseList.append( (synapseData.presynapticCell, synapseData.permanence)) synapseList.sort() synapseStringList = ["{0:3}={1:.2f}".format(sourceCell, permanence) for sourceCell, permanence in synapseList] segmentDict[seg] = "({0})".format(" ".join(synapseStringList)) text += ("Column {0:3} / Cell {1:3}:\t({2}) {3}\n".format( column, cell, len(segmentDict.values()), "[{0}]".format(", ".join(segmentDict.values())))) if column < len(columns) - 1: text += "\n" text += "------------------------------------\n" return text
Pretty print the connections in the temporal memory. TODO: Use PrettyTable. @return (string) Pretty-printed text
def sim(self, src, tar): if src == tar: return 1.0 if not src or not tar: return 0.0 return ( len(src) / len(tar) if len(src) < len(tar) else len(tar) / len(src) )
Return the length similarity of two strings. Length similarity is the ratio of the length of the shorter string to the longer. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Length similarity Examples -------- >>> cmp = Length() >>> cmp.sim('cat', 'hat') 1.0 >>> cmp.sim('Niall', 'Neil') 0.8 >>> cmp.sim('aluminum', 'Catalan') 0.875 >>> cmp.sim('ATCG', 'TAGC') 1.0
def predict_proba(self, a, b, **kwargs): return self.b_fit_score(b, a) - self.b_fit_score(a, b)
Infer causal relationships between 2 variables using the RECI statistic :param a: Input variable 1 :param b: Input variable 2 :return: Causation coefficient (Value : 1 if a->b and -1 if b->a) :rtype: float
def add(self, constraint, check=False): if isinstance(constraint, bool): constraint = BoolConstant(constraint) assert isinstance(constraint, Bool) constraint = simplify(constraint) if self._child is not None: raise Exception('ConstraintSet is frozen') if isinstance(constraint, BoolConstant): if not constraint.value: logger.info("Adding an impossible constant constraint") self._constraints = [constraint] else: return self._constraints.append(constraint) if check: from ...core.smtlib import solver if not solver.check(self): raise ValueError("Added an impossible constraint")
Add a constraint to the set :param constraint: The constraint to add to the set. :param check: Currently unused. :return:
def detail_view(self, request): context = { 'preview': self, } kwargs = {} if self.form_class: if request.GET: form = self.form_class(data=request.GET) else: form = self.form_class() context['form'] = form if not form.is_bound or not form.is_valid(): return render(request, 'mailviews/previews/detail.html', context) kwargs.update(form.get_message_view_kwargs()) message_view = self.get_message_view(request, **kwargs) message = message_view.render_to_message() raw = message.message() headers = OrderedDict((header, maybe_decode_header(raw[header])) for header in self.headers) context.update({ 'message': message, 'subject': message.subject, 'body': message.body, 'headers': headers, 'raw': raw.as_string(), }) alternatives = getattr(message, 'alternatives', []) try: html = next(alternative[0] for alternative in alternatives if alternative[1] == 'text/html') context.update({ 'html': html, 'escaped_html': b64encode(html.encode('utf-8')), }) except StopIteration: pass return render(request, self.template_name, context)
Renders the message view to a response.
def calculate(self, T, P, zs, ws, method): r if method == SIMPLE: ks = [i(T, P) for i in self.ThermalConductivityLiquids] return mixing_simple(zs, ks) elif method == DIPPR_9H: ks = [i(T, P) for i in self.ThermalConductivityLiquids] return DIPPR9H(ws, ks) elif method == FILIPPOV: ks = [i(T, P) for i in self.ThermalConductivityLiquids] return Filippov(ws, ks) elif method == MAGOMEDOV: k_w = self.ThermalConductivityLiquids[self.index_w](T, P) ws = list(ws) ; ws.pop(self.index_w) return thermal_conductivity_Magomedov(T, P, ws, self.wCASs, k_w) else: raise Exception('Method not valid')
r'''Method to calculate thermal conductivity of a liquid mixture at temperature `T`, pressure `P`, mole fractions `zs` and weight fractions `ws` with a given method. This method has no exception handling; see `mixture_property` for that. Parameters ---------- T : float Temperature at which to calculate the property, [K] P : float Pressure at which to calculate the property, [Pa] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Name of the method to use Returns ------- k : float Thermal conductivity of the liquid mixture, [W/m/K]
def allow(self, role, method, resource, with_children=True): if with_children: for r in role.get_children(): permission = (r.get_name(), method, resource) if permission not in self._allowed: self._allowed.append(permission) if role == 'anonymous': permission = (role, method, resource) else: permission = (role.get_name(), method, resource) if permission not in self._allowed: self._allowed.append(permission)
Add allowing rules. :param role: Role of this rule. :param method: Method to allow in rule, include GET, POST, PUT etc. :param resource: Resource also view function. :param with_children: Allow role's children in rule as well if with_children is `True`
def save(self, sess, save_path, timestep=None): if self._saver is None: raise TensorForceError("register_saver_ops should be called before save") return self._saver.save( sess=sess, save_path=save_path, global_step=timestep, write_meta_graph=False, write_state=True, )
Saves this component's managed variables. Args: sess: The session for which to save the managed variables. save_path: The path to save data to. timestep: Optional, the timestep to append to the file name. Returns: Checkpoint path where the model was saved.
def trun_to_file(trun, fpath=None): if fpath is None: fpath = yml_fpath(trun["conf"]["OUTPUT"]) with open(fpath, 'w') as yml_file: data = yaml.dump(trun, explicit_start=True, default_flow_style=False) yml_file.write(data)
Dump the given trun to file
def get_homogeneous(package_descriptors, targets, repos_data): homogeneous = {} for package_descriptor in package_descriptors.values(): pkg_name = package_descriptor.pkg_name debian_pkg_name = package_descriptor.debian_pkg_name versions = [] for repo_data in repos_data: versions.append(set([])) for target in targets: version = _strip_version_suffix( repo_data.get(target, {}).get(debian_pkg_name, None)) versions[-1].add(version) homogeneous[pkg_name] = max([len(v) for v in versions]) == 1 return homogeneous
For each package check if the version in one repo is equal for all targets. The version could be different in different repos though. :return: a dict indexed by package names containing a boolean flag
def copyto(self, new_abspath=None, new_dirpath=None, new_dirname=None, new_basename=None, new_fname=None, new_ext=None, overwrite=False, makedirs=False): self.assert_exists() p = self.change( new_abspath=new_abspath, new_dirpath=new_dirpath, new_dirname=new_dirname, new_basename=new_basename, new_fname=new_fname, new_ext=new_ext, ) if p.is_not_exist_or_allow_overwrite(overwrite=overwrite): if self.abspath != p.abspath: try: shutil.copy(self.abspath, p.abspath) except IOError as e: if makedirs: os.makedirs(p.parent.abspath) shutil.copy(self.abspath, p.abspath) else: raise e return p
Copy this file to other place.
def apt(self, package_names, raise_on_error=False): if isinstance(package_names, basestring): package_names = [package_names] cmd = "apt-get install -y %s" % (' '.join(package_names)) return self.wait(cmd, raise_on_error=raise_on_error)
Install specified packages using apt-get. -y options are automatically used. Waits for command to finish. Parameters ---------- package_names: list-like of str raise_on_error: bool, default False If True then raise ValueError if stderr is not empty debconf often gives tty error
def close(self): self.require_not_closed() if not self.streaming or self.asynchronous: if 'Content-Length' not in self.headers: self.headers['Content-Length'] = self.tell() self.flush() self._closed = True
Flush and close the stream. This is called automatically by the base resource on resources unless the resource is operating asynchronously; in that case, this method MUST be called in order to signal the end of the request. If not the request will simply hang as it is waiting for some thread to tell it to return to the client.
def fail(message=None, exit_status=None): print('Error:', message, file=sys.stderr) sys.exit(exit_status or 1)
Prints the specified message and exits the program with the specified exit status.
def _generateRangeDescription(self, ranges): desc = "" numRanges = len(ranges) for i in xrange(numRanges): if ranges[i][0] != ranges[i][1]: desc += "%.2f-%.2f" % (ranges[i][0], ranges[i][1]) else: desc += "%.2f" % (ranges[i][0]) if i < numRanges - 1: desc += ", " return desc
generate description from a text description of the ranges
def get(self, name): for c in self.comps: if c.category == name: return c return None
Return component by category name
def get_unique_pathname(path, root=''): path = os.path.join(root, path) potentialPaths = itertools.chain((path,), __get_numbered_paths(path)) potentialPaths = six.moves.filterfalse(os.path.exists, potentialPaths) return next(potentialPaths)
Return a pathname possibly with a number appended to it so that it is unique in the directory.
def compare_signature(expected: Union[str, bytes], actual: Union[str, bytes]) -> bool: expected = util.to_bytes(expected) actual = util.to_bytes(actual) return hmac.compare_digest(expected, actual)
Compares the given signatures. :param expected: The expected signature. :type expected: Union[str, bytes] :param actual: The actual signature. :type actual: Union[str, bytes] :return: Do the signatures match? :rtype: bool
def unhex(s): bits = 0 for c in s: if '0' <= c <= '9': i = ord('0') elif 'a' <= c <= 'f': i = ord('a')-10 elif 'A' <= c <= 'F': i = ord('A')-10 else: break bits = bits*16 + (ord(c) - i) return bits
Get the integer value of a hexadecimal number.
def env_key(key, default): env = key.upper().replace('.', '_') return os.environ.get(env, default)
Try to get `key` from the environment. This mutates `key` to replace dots with underscores and makes it all uppercase. my.database.host => MY_DATABASE_HOST
def simplified_edges(self): for group, edgelist in self.edges.items(): for u, v, d in edgelist: yield (u, v)
A generator for getting all of the edges without consuming extra memory.
def pop(self, key, default=_sentinel): if default is not _sentinel: tup = self._data.pop(key.lower(), default) else: tup = self._data.pop(key.lower()) if tup is not default: return tup[1] else: return default
Removes the specified key and returns the corresponding value. If key is not found, the default is returned if given, otherwise KeyError is raised. :param key: The key :param default: The default value :return: The value
def create_field(subfields=None, ind1=' ', ind2=' ', controlfield_value='', global_position=-1): if subfields is None: subfields = [] ind1, ind2 = _wash_indicators(ind1, ind2) field = (subfields, ind1, ind2, controlfield_value, global_position) _check_field_validity(field) return field
Return a field created with the provided elements. Global position is set arbitrary to -1.
def update(x, **entries): if isinstance(x, dict): x.update(entries) else: x.__dict__.update(entries) return x
Update a dict, or an object with slots, according to `entries` dict. >>> update({'a': 1}, a=10, b=20) {'a': 10, 'b': 20} >>> update(Struct(a=1), a=10, b=20) Struct(a=10, b=20)
def save(self, *args, **kwargs): current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME) is_active_changed = self.id is None or self.__original_activatable_value != current_activable_value self.__original_activatable_value = current_activable_value ret_val = super(BaseActivatableModel, self).save(*args, **kwargs) if is_active_changed: model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value) if self.activatable_field_updated: model_activations_updated.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value) return ret_val
A custom save method that handles figuring out when something is activated or deactivated.
def expire_password(self, username): r = self.local_renderer r.env.username = username r.sudo('chage -d 0 {username}')
Forces the user to change their password the next time they login.
def config2(self): config = [] data = {} self.cnxn.xfer([0x3D]) sleep(10e-3) for i in range(9): resp = self.cnxn.xfer([0x00])[0] config.append(resp) data["AMSamplingInterval"] = self._16bit_unsigned(config[0], config[1]) data["AMIdleIntervalCount"] = self._16bit_unsigned(config[2], config[3]) data['AMFanOnIdle'] = config[4] data['AMLaserOnIdle'] = config[5] data['AMMaxDataArraysInFile'] = self._16bit_unsigned(config[6], config[7]) data['AMOnlySavePMData'] = config[8] sleep(0.1) return data
Read the second set of configuration variables and return as a dictionary. **NOTE: This method is supported by firmware v18+.** :rtype: dictionary :Example: >>> a.config2() { 'AMFanOnIdle': 0, 'AMIdleIntervalCount': 0, 'AMMaxDataArraysInFile': 61798, 'AMSamplingInterval': 1, 'AMOnlySavePMData': 0, 'AMLaserOnIdle': 0 }
def filter(names, pat): import os result=[] try: re_pat = _cache[pat] except KeyError: res = translate(pat) if len(_cache) >= _MAXCACHE: globals()['_cache'] = {} _cache[pat] = re_pat = re.compile(res) match = re_pat.match if 1: for name in names: if match(name): result.append(name) else: for name in names: if match(os.path.normcase(name)): result.append(name) return result
Return the subset of the list NAMES that match PAT