code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def _encode_ids(*args): ids = [] for v in args: if isinstance(v, basestring): qv = v.encode('utf-8') if isinstance(v, unicode) else v ids.append(urllib.quote(qv)) else: qv = str(v) ids.append(urllib.quote(qv)) return ';'.join(ids)
Do url-encode resource ids
def drain_rois(img): img_data = get_img_data(img) out = np.zeros(img_data.shape, dtype=img_data.dtype) krn_dim = [3] * img_data.ndim kernel = np.ones(krn_dim, dtype=int) vals = np.unique(img_data) vals = vals[vals != 0] for i in vals: roi = img_data == i hits = scn.binary_hit_or_miss(roi, kernel) roi[hits] = 0 out[roi > 0] = i return out
Find all the ROIs in img and returns a similar volume with the ROIs emptied, keeping only their border voxels. This is useful for DTI tractography. Parameters ---------- img: img-like object or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- np.ndarray an array of same shape as img_data
def is_local_ip(ip_address): try: ip = ipaddress.ip_address(u'' + ip_address) return ip.is_loopback except ValueError as e: return None
Check if IP is local
def get_bibliography(lsst_bib_names=None, bibtex=None): bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names) pybtex_data = [pybtex.database.parse_string(_bibtex, 'bibtex') for _bibtex in bibtex_data.values()] if bibtex is not None: pybtex_data.append(pybtex.database.parse_string(bibtex, 'bibtex')) bib = pybtex_data[0] if len(pybtex_data) > 1: for other_bib in pybtex_data[1:]: for key, entry in other_bib.entries.items(): bib.add_entry(key, entry) return bib
Make a pybtex BibliographyData instance from standard lsst-texmf bibliography files and user-supplied bibtex content. Parameters ---------- lsst_bib_names : sequence of `str`, optional Names of lsst-texmf BibTeX files to include. For example: .. code-block:: python ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads'] Default is `None`, which includes all lsst-texmf bibtex files. bibtex : `str` BibTeX source content not included in lsst-texmf. This can be content from a import ``local.bib`` file. Returns ------- bibliography : `pybtex.database.BibliographyData` A pybtex bibliography database that includes all given sources: lsst-texmf bibliographies and ``bibtex``.
def SETNO(cpu, dest): dest.write(Operators.ITEBV(dest.size, cpu.OF == False, 1, 0))
Sets byte if not overflow. :param cpu: current CPU. :param dest: destination operand.
def __checkMaturity(self): if self._currentRecordIndex+1 < self._MIN_RECORDS_TO_BE_BEST: return if self._isMature: return metric = self._getMetrics()[self._optimizedMetricLabel] self._metricRegression.addPoint(x=self._currentRecordIndex, y=metric) pctChange, absPctChange = self._metricRegression.getPctChanges() if pctChange is not None and absPctChange <= self._MATURITY_MAX_CHANGE: self._jobsDAO.modelSetFields(self._modelID, {'engMatured':True}) self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED self._isMature = True self._logger.info("Model %d has matured (pctChange=%s, n=%d). \n"\ "Scores = %s\n"\ "Stopping execution",self._modelID, pctChange, self._MATURITY_NUM_POINTS, self._metricRegression._window)
Save the current metric value and see if the model's performance has 'leveled off.' We do this by looking at some number of previous number of recordings
def parse_requirements_alt(fname='requirements.txt'): import requirements from os.path import dirname, join, exists require_fpath = join(dirname(__file__), fname) if exists(require_fpath): with open(require_fpath, 'r') as file: requires = list(requirements.parse(file)) packages = [r.name for r in requires] return packages return []
pip install requirements-parser fname='requirements.txt'
def mfbe(a, b): return 2 * bias(a, b) / (a.mean() + b.mean())
Returns the mean fractionalized bias error
def dist_mlipns(src, tar, threshold=0.25, max_mismatches=2): return MLIPNS().dist(src, tar, threshold, max_mismatches)
Return the MLIPNS distance between two strings. This is a wrapper for :py:meth:`MLIPNS.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison threshold : float A number [0, 1] indicating the maximum similarity score, below which the strings are considered 'similar' (0.25 by default) max_mismatches : int A number indicating the allowable number of mismatches to remove before declaring two strings not similar (2 by default) Returns ------- float MLIPNS distance Examples -------- >>> dist_mlipns('cat', 'hat') 0.0 >>> dist_mlipns('Niall', 'Neil') 1.0 >>> dist_mlipns('aluminum', 'Catalan') 1.0 >>> dist_mlipns('ATCG', 'TAGC') 1.0
def argsort(indexable, key=None, reverse=False): if isinstance(indexable, collections_abc.Mapping): vk_iter = ((v, k) for k, v in indexable.items()) else: vk_iter = ((v, k) for k, v in enumerate(indexable)) if key is None: indices = [k for v, k in sorted(vk_iter, reverse=reverse)] else: indices = [k for v, k in sorted(vk_iter, key=lambda vk: key(vk[0]), reverse=reverse)] return indices
Returns the indices that would sort a indexable object. This is similar to `numpy.argsort`, but it is written in pure python and works on both lists and dictionaries. Args: indexable (Iterable or Mapping): indexable to sort by key (Callable, optional): customizes the ordering of the indexable reverse (bool, optional): if True returns in descending order Returns: list: indices: list of indices such that sorts the indexable Example: >>> import ubelt as ub >>> # argsort works on dicts by returning keys >>> dict_ = {'a': 3, 'b': 2, 'c': 100} >>> indices = ub.argsort(dict_) >>> assert list(ub.take(dict_, indices)) == sorted(dict_.values()) >>> # argsort works on lists by returning indices >>> indexable = [100, 2, 432, 10] >>> indices = ub.argsort(indexable) >>> assert list(ub.take(indexable, indices)) == sorted(indexable) >>> # Can use iterators, but be careful. It exhausts them. >>> indexable = reversed(range(100)) >>> indices = ub.argsort(indexable) >>> assert indices[0] == 99 >>> # Can use key just like sorted >>> indexable = [[0, 1, 2], [3, 4], [5]] >>> indices = ub.argsort(indexable, key=len) >>> assert indices == [2, 1, 0] >>> # Can use reverse just like sorted >>> indexable = [0, 2, 1] >>> indices = ub.argsort(indexable, reverse=True) >>> assert indices == [1, 2, 0]
def activatewindow(self, window_name): window_handle = self._get_window_handle(window_name) self._grabfocus(window_handle) return 1
Activate window. @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @return: 1 on success. @rtype: integer
def _configure_io_handler(self, handler): if self.check_events(): return if handler in self._unprepared_handlers: old_fileno = self._unprepared_handlers[handler] prepared = self._prepare_io_handler(handler) else: old_fileno = None prepared = True fileno = handler.fileno() if old_fileno is not None and fileno != old_fileno: tag = self._io_sources.pop(handler, None) if tag is not None: glib.source_remove(tag) if not prepared: self._unprepared_handlers[handler] = fileno if fileno is None: logger.debug(" {0!r}.fileno() is None, not polling" .format(handler)) return events = 0 if handler.is_readable(): logger.debug(" {0!r} readable".format(handler)) events |= glib.IO_IN | glib.IO_ERR if handler.is_writable(): logger.debug(" {0!r} writable".format(handler)) events |= glib.IO_OUT | glib.IO_HUP | glib.IO_ERR if events: logger.debug(" registering {0!r} handler fileno {1} for" " events {2}".format(handler, fileno, events)) glib.io_add_watch(fileno, events, self._io_callback, handler)
Register an io-handler with the glib main loop.
def _predict(self, my_task, seen=None, looked_ahead=0): if my_task._is_finished(): return if seen is None: seen = [] elif self in seen: return if not my_task._is_finished(): self._predict_hook(my_task) if not my_task._is_definite(): if looked_ahead + 1 >= self.lookahead: return seen.append(self) for child in my_task.children: child.task_spec._predict(child, seen[:], looked_ahead + 1)
Updates the branch such that all possible future routes are added. Should NOT be overwritten! Instead, overwrite _predict_hook(). :type my_task: Task :param my_task: The associated task in the task tree. :type seen: list[taskspec] :param seen: A list of already visited tasks. :type looked_ahead: integer :param looked_ahead: The depth of the predicted path so far.
def shapely_formatter(_, vertices, codes=None): elements = [] if codes is None: for vertices_ in vertices: if np.all(vertices_[0, :] == vertices_[-1, :]): if len(vertices) < 3: elements.append(Point(vertices_[0, :])) else: elements.append(LinearRing(vertices_)) else: elements.append(LineString(vertices_)) else: for vertices_, codes_ in zip(vertices, codes): starts = np.nonzero(codes_ == MPLPATHCODE.MOVETO)[0] stops = np.nonzero(codes_ == MPLPATHCODE.CLOSEPOLY)[0] try: rings = [LinearRing(vertices_[start:stop+1, :]) for start, stop in zip(starts, stops)] elements.append(Polygon(rings[0], rings[1:])) except ValueError as err: if np.any(stop - start - 1 == 0): if stops[0] < starts[0]+2: pass else: rings = [ LinearRing(vertices_[start:stop+1, :]) for start, stop in zip(starts, stops) if stop >= start+2] elements.append(Polygon(rings[0], rings[1:])) else: raise(err) return elements
`Shapely`_ style contour formatter. Contours are returned as a list of :class:`shapely.geometry.LineString`, :class:`shapely.geometry.LinearRing`, and :class:`shapely.geometry.Point` geometry elements. Filled contours return a list of :class:`shapely.geometry.Polygon` elements instead. .. note:: If possible, `Shapely speedups`_ will be enabled. .. _Shapely: http://toblerity.org/shapely/manual.html .. _Shapely speedups: http://toblerity.org/shapely/manual.html#performance See Also -------- `descartes <https://bitbucket.org/sgillies/descartes/>`_ : Use `Shapely`_ or GeoJSON-like geometric objects as matplotlib paths and patches.
def assert_operations(self, *args): if not set(args).issubset(self.allowed_operations): raise http.exceptions.Forbidden()
Assets if the requested operations are allowed in this context.
def _transaction_end(self): self._command.append('\x87') self._ft232h._write(''.join(self._command)) return bytearray(self._ft232h._poll_read(self._expected))
End I2C transaction and get response bytes, including ACKs.
def write(self, album): page = self.template.render(**self.generate_context(album)) output_file = os.path.join(album.dst_path, album.output_file) with open(output_file, 'w', encoding='utf-8') as f: f.write(page)
Generate the HTML page and save it.
async def get_group_conversation_url(self, get_group_conversation_url_request): response = hangouts_pb2.GetGroupConversationUrlResponse() await self._pb_request('conversations/getgroupconversationurl', get_group_conversation_url_request, response) return response
Get URL to allow others to join a group conversation.
def api_subclass_factory(name, docstring, remove_methods, base=SlackApi): methods = deepcopy(base.API_METHODS) for parent, to_remove in remove_methods.items(): if to_remove is ALL: del methods[parent] else: for method in to_remove: del methods[parent][method] return type(name, (base,), dict(API_METHODS=methods, __doc__=docstring))
Create an API subclass with fewer methods than its base class. Arguments: name (:py:class:`str`): The name of the new class. docstring (:py:class:`str`): The docstring for the new class. remove_methods (:py:class:`dict`): The methods to remove from the base class's :py:attr:`API_METHODS` for the subclass. The key is the name of the root method (e.g. ``'auth'`` for ``'auth.test'``, the value is either a tuple of child method names (e.g. ``('test',)``) or, if all children should be removed, the special value :py:const:`ALL`. base (:py:class:`type`, optional): The base class (defaults to :py:class:`SlackApi`). Returns: :py:class:`type`: The new subclass. Raises: :py:class:`KeyError`: If the method wasn't in the superclass.
def parse_operand(self, buf): buf = iter(buf) try: operand = 0 for _ in range(self.operand_size): operand <<= 8 operand |= next(buf) self._operand = operand except StopIteration: raise ParseError("Not enough data for decoding")
Parses an operand from buf :param buf: a buffer :type buf: iterator/generator/string
def set(self, path, value=None, filename=None): if filename is None: config = self._firstConfig()[1] else: config = self.configs[filename] path = _splitPath(path) for el in path[:-1]: if el in config: config = config[el] else: config[el] = OrderedDict() config = config[el] config[path[-1]] = value
Set a configuration value. If no filename is specified, the property is set in the first configuration file. Note that if a filename is specified and the property path is present in an earlier filename then set property will be hidden. usage: set('section.property', value='somevalue') Note that currently array indexes are not supported. You must set the whole array.
def fetch_course_organizations(course_key): queryset = internal.OrganizationCourse.objects.filter( course_id=text_type(course_key), active=True ).select_related('organization') return [serializers.serialize_organization_with_course(organization) for organization in queryset]
Retrieves the organizations linked to the specified course
def write_review(review, out): out.write(' write_value('Reviewer', review.reviewer, out) write_value('ReviewDate', review.review_date_iso_format, out) if review.has_comment: write_text_value('ReviewComment', review.comment, out)
Write the fields of a single review to out.
def _delete_transmissions(self, content_metadata_item_ids): ContentMetadataItemTransmission = apps.get_model( 'integrated_channel', 'ContentMetadataItemTransmission' ) ContentMetadataItemTransmission.objects.filter( enterprise_customer=self.enterprise_configuration.enterprise_customer, integrated_channel_code=self.enterprise_configuration.channel_code(), content_id__in=content_metadata_item_ids ).delete()
Delete ContentMetadataItemTransmision models associated with the given content metadata items.
def write_config(self, initialize_indices=False): if not os.path.exists(self.config_dir): os.mkdir(self.config_dir) with open(self.config_file, 'w') as configfile: self.config.write(configfile) if initialize_indices: index = self.get('jackal', 'index') from jackal import Host, Range, Service, User, Credential, Log from jackal.core import create_connection create_connection(self) Host.init(index="{}-hosts".format(index)) Range.init(index="{}-ranges".format(index)) Service.init(index="{}-services".format(index)) User.init(index="{}-users".format(index)) Credential.init(index="{}-creds".format(index)) Log.init(index="{}-log".format(index))
Write the current config to disk to store them.
def get_geoip(ip): reader = geolite2.reader() ip_data = reader.get(ip) or {} return ip_data.get('country', {}).get('iso_code')
Lookup country for IP address.
def elliptical_annular(cls, shape, pixel_scale,inner_major_axis_radius_arcsec, inner_axis_ratio, inner_phi, outer_major_axis_radius_arcsec, outer_axis_ratio, outer_phi, centre=(0.0, 0.0), invert=False): mask = mask_util.mask_elliptical_annular_from_shape_pixel_scale_and_radius(shape, pixel_scale, inner_major_axis_radius_arcsec, inner_axis_ratio, inner_phi, outer_major_axis_radius_arcsec, outer_axis_ratio, outer_phi, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
Setup a mask where unmasked pixels are within an elliptical annulus of input inner and outer arc second \ major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the inner ellipse within which pixels are masked. inner_axis_ratio : float The axis-ratio of the inner ellipse within which pixels are masked. inner_phi : float The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the \ positive x-axis). outer_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the outer ellipse within which pixels are unmasked. outer_axis_ratio : float The axis-ratio of the outer ellipse within which pixels are unmasked. outer_phi : float The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the \ positive x-axis). centre: (float, float) The centre of the elliptical annuli used to mask pixels.
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False): arr_0to1_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode, cval=cval, return_pad_amounts=True) heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value, max_value=self.max_value) if return_pad_amounts: return heatmaps, pad_amounts else: return heatmaps
Pad the heatmaps on their sides so that they match a target aspect ratio. Depending on which dimension is smaller (height or width), only the corresponding sides (left/right or top/bottom) will be padded. In each case, both of the sides will be padded equally. Parameters ---------- aspect_ratio : float Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice as much width as height. mode : str, optional Padding mode to use. See :func:`numpy.pad` for details. cval : number, optional Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details. return_pad_amounts : bool, optional If False, then only the padded image will be returned. If True, a tuple with two entries will be returned, where the first entry is the padded image and the second entry are the amounts by which each image side was padded. These amounts are again a tuple of the form (top, right, bottom, left), with each value being an integer. Returns ------- heatmaps : imgaug.HeatmapsOnImage Padded heatmaps as HeatmapsOnImage object. pad_amounts : tuple of int Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``. This tuple is only returned if `return_pad_amounts` was set to True.
def subseq(self, start, end): return Fasta(self.id, self.seq[start:end])
Returns Fasta object with the same name, of the bases from start to end, but not including end
def get_db_from_db(db_string): server = get_server_from_db(db_string) local_match = PLAIN_RE.match(db_string) remote_match = URL_RE.match(db_string) if local_match: return server[local_match.groupdict()['database']] elif remote_match: return server[remote_match.groupdict()['database']] raise ValueError('Invalid database string: %r' % (db_string,))
Return a CouchDB database instance from a database string.
def calculate(self, T, P, zs, ws, method): r if method == MIXING_LOG_MOLAR: mus = [i(T, P) for i in self.ViscosityLiquids] return mixing_logarithmic(zs, mus) elif method == MIXING_LOG_MASS: mus = [i(T, P) for i in self.ViscosityLiquids] return mixing_logarithmic(ws, mus) elif method == LALIBERTE_MU: ws = list(ws) ; ws.pop(self.index_w) return Laliberte_viscosity(T, ws, self.wCASs) else: raise Exception('Method not valid')
r'''Method to calculate viscosity of a liquid mixture at temperature `T`, pressure `P`, mole fractions `zs` and weight fractions `ws` with a given method. This method has no exception handling; see `mixture_property` for that. Parameters ---------- T : float Temperature at which to calculate the property, [K] P : float Pressure at which to calculate the property, [Pa] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Name of the method to use Returns ------- mu : float Viscosity of the liquid mixture, [Pa*s]
def create_link(self, token, folder_id, url, **kwargs): parameters = dict() parameters['token'] = token parameters['folderid'] = folder_id parameters['url'] = url optional_keys = ['item_name', 'length', 'checksum'] for key in optional_keys: if key in kwargs: if key == 'item_name': parameters['itemname'] = kwargs[key] continue parameters[key] = kwargs[key] response = self.request('midas.link.create', parameters) return response
Create a link bitstream. :param token: A valid token for the user in question. :type token: string :param folder_id: The id of the folder in which to create a new item that will contain the link. The new item will have the same name as the URL unless an item name is supplied. :type folder_id: int | long :param url: The URL of the link you will create, will be used as the name of the bitstream and of the item unless an item name is supplied. :type url: string :param item_name: (optional) The name of the newly created item, if not supplied, the item will have the same name as the URL. :type item_name: string :param length: (optional) The length in bytes of the file to which the link points. :type length: int | long :param checksum: (optional) The MD5 checksum of the file to which the link points. :type checksum: string :returns: The item information of the item created. :rtype: dict
def extract_from_image(self, image): ia.do_assert(image.ndim in [2, 3]) if len(self.exterior) <= 2: raise Exception("Polygon must be made up of at least 3 points to extract its area from an image.") bb = self.to_bounding_box() bb_area = bb.extract_from_image(image) if self.is_out_of_image(image, fully=True, partly=False): return bb_area xx = self.xx_int yy = self.yy_int xx_mask = xx - np.min(xx) yy_mask = yy - np.min(yy) height_mask = np.max(yy_mask) width_mask = np.max(xx_mask) rr_face, cc_face = skimage.draw.polygon(yy_mask, xx_mask, shape=(height_mask, width_mask)) mask = np.zeros((height_mask, width_mask), dtype=np.bool) mask[rr_face, cc_face] = True if image.ndim == 3: mask = np.tile(mask[:, :, np.newaxis], (1, 1, image.shape[2])) return bb_area * mask
Extract the image pixels within the polygon. This function will zero-pad the image if the polygon is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the polygon. Returns ------- result : (H',W') ndarray or (H',W',C) ndarray Pixels within the polygon. Zero-padded if the polygon is partially/fully outside of the image.
def check_add_particles(st, guess, rad='calc', do_opt=True, im_change_frac=0.2, min_derr='3sig', **kwargs): if min_derr == '3sig': min_derr = 3 * st.sigma accepts = 0 new_poses = [] if rad == 'calc': rad = guess_add_radii(st) message = ('-'*30 + 'ADDING' + '-'*30 + '\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1') with log.noformat(): CLOG.info(message) for a in range(guess.shape[0]): p0 = guess[a] absent_err = st.error absent_d = st.residuals.copy() ind = st.obj_add_particle(p0, rad) if do_opt: opt.do_levmarq_particles( st, ind, damping=1.0, max_iter=1, run_length=3, eig_update=False, include_rad=False) present_err = st.error present_d = st.residuals.copy() dont_kill = should_particle_exist( absent_err, present_err, absent_d, present_d, im_change_frac=im_change_frac, min_derr=min_derr) if dont_kill: accepts += 1 p = tuple(st.obj_get_positions()[ind].ravel()) r = tuple(st.obj_get_radii()[ind].ravel()) new_poses.append(p) part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % ( p + r + (absent_err, st.error)) with log.noformat(): CLOG.info(part_msg) else: st.obj_remove_particle(ind) if np.abs(absent_err - st.error) > 1e-4: raise RuntimeError('updates not exact?') return accepts, new_poses
Checks whether to add particles at a given position by seeing if adding the particle improves the fit of the state. Parameters ---------- st : :class:`peri.states.State` The state to check adding particles to. guess : [N,3] list-like The positions of particles to check to add. rad : {Float, ``'calc'``}, optional. The radius of the newly-added particles. Default is ``'calc'``, which uses the states current radii's median. do_opt : Bool, optional Whether to optimize the particle position before checking if it should be kept. Default is True (optimizes position). im_change_frac : Float How good the change in error needs to be relative to the change in the difference image. Default is 0.2; i.e. if the error does not decrease by 20% of the change in the difference image, do not add the particle. min_derr : Float or '3sig' The minimal improvement in error to add a particle. Default is ``'3sig' = 3*st.sigma``. Returns ------- accepts : Int The number of added particles new_poses : [N,3] list List of the positions of the added particles. If ``do_opt==True``, then these positions will differ from the input 'guess'.
def get_nginx_config(self): if os.path.exists(self._nginx_config): return open(self._nginx_config, 'r').read() else: return None
Gets the Nginx config for the project
def _warn_if_not_finite(X): X = np.asanyarray(X) if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum()) and not np.isfinite(X).all()): warnings.warn("Result contains NaN, infinity" " or a value too large for %r." % X.dtype, category=UserWarning)
UserWarning if array contains non-finite elements
def stop(self=None): if not self: instance = getattr(Runner.instance(), 'builder', None) self = instance and instance() if not self: return self._runner.stop() if self.project: self.project.stop() self.project = None
Stop the builder if it's running.
def molar_mass(compound=''): result = 0.0 if compound is None or len(compound) == 0: return result compound = compound.strip() parsed = parse_compound(compound) return parsed.molar_mass()
Determine the molar mass of a chemical compound. The molar mass is usually the mass of one mole of the substance, but here it is the mass of 1000 moles, since the mass unit used in auxi is kg. :param compound: Formula of a chemical compound, e.g. 'Fe2O3'. :returns: Molar mass. [kg/kmol]
async def connect(self, channel_id: int): ws = self._lavalink.bot._connection._get_websocket(int(self.guild_id)) await ws.voice_state(self.guild_id, str(channel_id))
Connects to a voice channel.
def _get_parser(extra_args): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) dirs = appdirs.AppDirs('hangups', 'hangups') default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt') parser.add_argument( '--token-path', default=default_token_path, help='path used to store OAuth refresh token' ) parser.add_argument( '-d', '--debug', action='store_true', help='log detailed debugging messages' ) for extra_arg in extra_args: parser.add_argument(extra_arg, required=True) return parser
Return ArgumentParser with any extra arguments.
def to_decimal(text): if not isinstance(text, string_type): raise TypeError("expected str or unicode, %s given" % type(text)) if findall(r"[\x00-\x20\x7c-\xff]", text): raise ValueError("invalid character in sequence") text = text.lstrip('!') decimal = 0 length = len(text) - 1 for i, char in enumerate(text): decimal += (ord(char) - 33) * (91 ** (length - i)) return decimal if text != '' else 0
Takes a base91 char string and returns decimal
def parse_args(parser, provider_required_args, argv): epilog = 'Provider-required arguments:\n' for provider in provider_required_args: epilog += ' %s: %s\n' % (provider, provider_required_args[provider]) parser.epilog = epilog args = parser.parse_args(argv) for arg in provider_required_args[args.provider]: if not args.__getattribute__(arg): parser.error('argument --%s is required' % arg) return args
Add provider required arguments epilog message, parse, and validate.
def delete_milestone_request(session, milestone_request_id): params_data = { 'action': 'delete', } endpoint = 'milestone_requests/{}'.format(milestone_request_id) response = make_put_request(session, endpoint, params_data=params_data) json_data = response.json() if response.status_code == 200: return json_data['status'] else: raise MilestoneRequestNotDeletedException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'])
Delete a milestone request
def get_binom(base1, base2, estE, estH): prior_homo = (1. - estH) / 2. prior_hete = estH bsum = base1 + base2 hetprob = scipy.misc.comb(bsum, base1)/(2. **(bsum)) homoa = scipy.stats.binom.pmf(base2, bsum, estE) homob = scipy.stats.binom.pmf(base1, bsum, estE) hetprob *= prior_hete homoa *= prior_homo homob *= prior_homo probabilities = [homoa, homob, hetprob] bestprob = max(probabilities)/float(sum(probabilities)) if hetprob > homoa: return True, bestprob else: return False, bestprob
return probability of base call
def set_algorithms(self, signature=None, encryption=None, serialization=None, compression=None): self.signature_algorithms = \ self._update_dict(signature, self.DEFAULT_SIGNATURE) self.encryption_algorithms = \ self._update_dict(encryption, self.DEFAULT_ENCRYPTION) self.serialization_algorithms = \ self._update_dict(serialization, self.DEFAULT_SERIALIZATION) self.compression_algorithms = \ self._update_dict(compression, self.DEFAULT_COMPRESSION)
Set algorithms used for sealing. Defaults can not be overridden.
def calculate_slope_aspect(elevation, xres, yres, z=1.0, scale=1.0): z = float(z) scale = float(scale) height, width = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [ z * elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3)) ] x = ( (window[0] + window[3] + window[3] + window[6]) - (window[2] + window[5] + window[5] + window[8]) ) / (8.0 * xres * scale) y = ( (window[6] + window[7] + window[7] + window[8]) - (window[0] + window[1] + window[1] + window[2]) ) / (8.0 * yres * scale) slope = math.pi/2 - np.arctan(np.sqrt(x*x + y*y)) aspect = np.arctan2(x, y) return slope, aspect
Calculate slope and aspect map. Return a pair of arrays 2 pixels smaller than the input elevation array. Slope is returned in radians, from 0 for sheer face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 Parameters ---------- elevation : array input elevation data xres : float column width yres : float row height z : float vertical exaggeration factor scale : float scale factor of pixel size units versus height units (insert 112000 when having elevation values in meters in a geodetic projection) Returns ------- slope shade : array
def _conversion(target, source): def register(func): __CONVERSION__[target][source] = func return func return register
A decorator to register namespace conversions. Usage ----- >>> @conversion('tag_open', 'tag_.*') ... def tag_to_open(annotation): ... annotation.namespace = 'tag_open' ... return annotation
def sync(self): for key in mimicdb.backend.smembers(tpl.bucket % self.name): mimicdb.backend.delete(tpl.key % (self.name, key)) mimicdb.backend.delete(tpl.bucket % self.name) mimicdb.backend.sadd(tpl.connection, self.name) for key in self.list(force=True): mimicdb.backend.sadd(tpl.bucket % self.name, key.name) mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"')))
Sync a bucket. Force all API calls to S3 and populate the database with the current state of S3.
def parse_only_extr_license(self, extr_lic): ident = self.get_extr_license_ident(extr_lic) text = self.get_extr_license_text(extr_lic) comment = self.get_extr_lics_comment(extr_lic) xrefs = self.get_extr_lics_xref(extr_lic) name = self.get_extr_lic_name(extr_lic) if not ident: return lic = document.ExtractedLicense(ident) if text is not None: lic.text = text if name is not None: lic.full_name = name if comment is not None: lic.comment = comment lic.cross_ref = map(lambda x: six.text_type(x), xrefs) return lic
Return an ExtractedLicense object to represent a license object. But does not add it to the SPDXDocument model. Return None if failed.
def custom_serialized(cls, serialized, is_java=True): if not isinstance(serialized, bytes): raise TypeError("Argument to custom_serialized() must be " "a serialized Python class as bytes, given: %s" % str(serialized)) if not is_java: return cls.CUSTOM(gtype=topology_pb2.Grouping.Value("CUSTOM"), python_serialized=serialized) else: raise NotImplementedError("Custom grouping implemented in Java for Python topology" "is not yet supported.")
Custom grouping from a given serialized string This class is created for compatibility with ``custom_serialized(cls, java_serialized)`` method of StreamParse API, although its functionality is not yet implemented (Java-serialized). Currently only custom grouping implemented in Python is supported, and ``custom()`` method should be used to indicate its classpath, rather than directly to use this method. In the future, users can directly specify Java-serialized object with ``is_java=True`` in order to use a custom grouping implemented in Java for python topology. :param serialized: serialized classpath to custom grouping class to use (if python) :param is_java: indicate whether this is Java serialized, or python serialized
def init(src, minimal=False): templates_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'project_templates', ) for filename in os.listdir(templates_path): if (minimal and filename == 'event.json') or filename.endswith('.pyc'): continue dest_path = os.path.join(templates_path, filename) if not os.path.isdir(dest_path): copy(dest_path, src)
Copies template files to a given directory. :param str src: The path to output the template lambda project files. :param bool minimal: Minimal possible template files (excludes event.json).
def collect(self): if self.symlink and not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir('') if self.symlink: handler = self.link_file else: handler = self.copy_file found_files = OrderedDict() for finder in get_finders(): for path, storage in finder.list(self.ignore_patterns): if getattr(storage, 'prefix', None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) if self.post_process and hasattr(self.storage, 'post_process'): processor = self.storage.post_process(found_files, dry_run=self.dry_run) for original_path, processed_path, processed in processor: if isinstance(processed, Exception): self.stderr.write("Post-processing '%s' failed!" % original_path) self.stderr.write("") raise processed if processed: self.log("Post-processed '%s' as '%s'" % (original_path, processed_path), level=1) self.post_processed_files.append(original_path) else: self.log("Skipped post-processing '%s'" % original_path) return { 'modified': self.copied_files + self.symlinked_files, 'unmodified': self.unmodified_files, 'post_processed': self.post_processed_files, }
Perform the bulk of the work of collectmedia. Split off from handle() to facilitate testing.
def sim(self, src, tar): if src == tar: return 1.0 if not src or not tar: return 0.0 min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src) min_len = len(min_word) for i in range(min_len, 0, -1): if min_word[:i] == max_word[:i]: return i / min_len return 0.0
Return the prefix similarity of two strings. Prefix similarity is the ratio of the length of the shorter term that exactly matches the longer term to the length of the shorter term, beginning at the start of both terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Prefix similarity Examples -------- >>> cmp = Prefix() >>> cmp.sim('cat', 'hat') 0.0 >>> cmp.sim('Niall', 'Neil') 0.25 >>> cmp.sim('aluminum', 'Catalan') 0.0 >>> cmp.sim('ATCG', 'TAGC') 0.0
def get_volume_steps(self): if not self.__volume_steps: self.__volume_steps = yield from self.handle_int( self.API.get('volume_steps')) return self.__volume_steps
Read the maximum volume level of the device.
def iter_complete_graphs(start, stop, factory=None): _, nodes = start nodes = list(nodes) if factory is None: factory = count() while len(nodes) < stop: G = nx.complete_graph(nodes) yield G v = next(factory) while v in G: v = next(factory) nodes.append(v)
Iterate over complete graphs. Args: start (int/iterable): Define the size of the starting graph. If an int, the nodes will be index-labeled, otherwise should be an iterable of node labels. stop (int): Stops yielding graphs when the size equals stop. factory (iterator, optional): If provided, nodes added will be labeled according to the values returned by factory. Otherwise the extra nodes will be index-labeled. Yields: :class:`nx.Graph`
def has_edit_permission(self, request): return request.user.is_authenticated and request.user.is_active and request.user.is_staff
Can edit this object
def _expires_in(self): if self.token_expiry: now = _UTCNOW() if self.token_expiry > now: time_delta = self.token_expiry - now return time_delta.days * 86400 + time_delta.seconds else: return 0
Return the number of seconds until this token expires. If token_expiry is in the past, this method will return 0, meaning the token has already expired. If token_expiry is None, this method will return None. Note that returning 0 in such a case would not be fair: the token may still be valid; we just don't know anything about it.
def to_normalized_batch(self): assert all([ attr is None for attr_name, attr in self.__dict__.items() if attr_name.endswith("_aug")]), \ "Expected UnnormalizedBatch to not contain any augmented data " \ "before normalization, but at least one '*_aug' attribute was " \ "already set." images_unaug = nlib.normalize_images(self.images_unaug) shapes = None if images_unaug is not None: shapes = [image.shape for image in images_unaug] return Batch( images=images_unaug, heatmaps=nlib.normalize_heatmaps( self.heatmaps_unaug, shapes), segmentation_maps=nlib.normalize_segmentation_maps( self.segmentation_maps_unaug, shapes), keypoints=nlib.normalize_keypoints( self.keypoints_unaug, shapes), bounding_boxes=nlib.normalize_bounding_boxes( self.bounding_boxes_unaug, shapes), polygons=nlib.normalize_polygons( self.polygons_unaug, shapes), line_strings=nlib.normalize_line_strings( self.line_strings_unaug, shapes), data=self.data )
Convert this unnormalized batch to an instance of Batch. As this method is intended to be called before augmentation, it assumes that none of the ``*_aug`` attributes is yet set. It will produce an AssertionError otherwise. The newly created Batch's ``*_unaug`` attributes will match the ones in this batch, just in normalized form. Returns ------- imgaug.augmentables.batches.Batch The batch, with ``*_unaug`` attributes being normalized.
def sum_out(self, var, bn): "Make a factor eliminating var by summing over its values." vars = [X for X in self.vars if X != var] cpt = dict((event_values(e, vars), sum(self.p(extend(e, var, val)) for val in bn.variable_values(var))) for e in all_events(vars, bn, {})) return Factor(vars, cpt)
Make a factor eliminating var by summing over its values.
def file_download_event_builder(event, sender_app, obj=None, **kwargs): event.update(dict( timestamp=datetime.datetime.utcnow().isoformat(), bucket_id=str(obj.bucket_id), file_id=str(obj.file_id), file_key=obj.key, size=obj.file.size, referrer=request.referrer, **get_user() )) return event
Build a file-download event.
def load_roster(self, source): try: tree = ElementTree.parse(source) except ElementTree.ParseError, err: raise ValueError("Invalid roster format: {0}".format(err)) roster = Roster.from_xml(tree.getroot()) for item in roster: item.verify_roster_result(True) self.roster = roster
Load roster from an XML file. Can be used before the connection is started to load saved roster copy, for efficient retrieval of versioned roster. :Parameters: - `source`: file name or a file object :Types: - `source`: `str` or file-like object
def getPredictionResults(network, clRegionName): classifierRegion = network.regions[clRegionName] actualValues = classifierRegion.getOutputData("actualValues") probabilities = classifierRegion.getOutputData("probabilities") steps = classifierRegion.getSelf().stepsList N = classifierRegion.getSelf().maxCategoryCount results = {step: {} for step in steps} for i in range(len(steps)): stepProbabilities = probabilities[i * N:(i + 1) * N - 1] mostLikelyCategoryIdx = stepProbabilities.argmax() predictedValue = actualValues[mostLikelyCategoryIdx] predictionConfidence = stepProbabilities[mostLikelyCategoryIdx] results[steps[i]]["predictedValue"] = predictedValue results[steps[i]]["predictionConfidence"] = predictionConfidence return results
Get prediction results for all prediction steps.
def shutdown_check_handler(): url = 'http://169.254.169.254/latest/meta-data/spot/instance-action' try: resp = requests.get(url, timeout=1.0) resp.raise_for_status() stopinfo = resp.json() if 'action' in stopinfo and stopinfo['action'] in ('stop', 'terminate', 'hibernate'): stoptime = stopinfo['time'] LOGWARNING('instance is going to %s at %s' % (stopinfo['action'], stoptime)) resp.close() return True else: resp.close() return False except HTTPError as e: resp.close() return False except Exception as e: resp.close() return False
This checks the AWS instance data URL to see if there's a pending shutdown for the instance. This is useful for AWS spot instances. If there is a pending shutdown posted to the instance data URL, we'll use the result of this function break out of the processing loop and shut everything down ASAP before the instance dies. Returns ------- bool - True if the instance is going to die soon. - False if the instance is still safe.
def getrowcount(self, window_name, object_name): object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) return len(object_handle.AXRows)
Get count of rows in table object. @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: Number of rows. @rtype: integer
def mark_read(user, message): BackendClass = stored_messages_settings.STORAGE_BACKEND backend = BackendClass() backend.inbox_delete(user, message)
Mark message instance as read for user. Returns True if the message was `unread` and thus actually marked as `read` or False in case it is already `read` or it does not exist at all. :param user: user instance for the recipient :param message: a Message instance to mark as read
def _update_transmissions(self, content_metadata_item_map, transmission_map): for content_id, channel_metadata in content_metadata_item_map.items(): transmission = transmission_map[content_id] transmission.channel_metadata = channel_metadata transmission.save()
Update ContentMetadataItemTransmision models for the given content metadata items.
def add_review_date(self, doc, reviewed): if len(doc.reviews) != 0: if not self.review_date_set: self.review_date_set = True date = utils.datetime_from_iso_format(reviewed) if date is not None: doc.reviews[-1].review_date = date return True else: raise SPDXValueError('Review::ReviewDate') else: raise CardinalityError('Review::ReviewDate') else: raise OrderError('Review::ReviewDate')
Sets the review date. Raises CardinalityError if already set. OrderError if no reviewer defined before. Raises SPDXValueError if invalid reviewed value.
def _discover_via_entrypoints(self): emgr = extension.ExtensionManager(PLUGIN_EP, invoke_on_load=False) return ((ext.name, ext.plugin) for ext in emgr)
Looks for modules with amtching entry points.
def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, bytes): return obj.decode() elif isinstance(obj, complex): return (obj.real, obj.imag) elif (isinstance(obj, (float, np.float64, np.float_)) and not np.isfinite(obj)): return None elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)): return int(obj) else: return json.JSONEncoder.default(self, obj)
Overrides the default serializer for `JSONEncoder`. This can serialize the following objects in addition to what `JSONEncoder` can already do. - `np.array` - `bytes` - `complex` - `np.float64` and other `np.dtype` objects Parameters ---------- obj : object A Python object to serialize to JSON. Returns ------- str A JSON encoded representation of the input object.
def ping(self): self.__validate_ping_param() ping_proc = subprocrunner.SubprocessRunner(self.__get_ping_command()) ping_proc.run() return PingResult(ping_proc.stdout, ping_proc.stderr, ping_proc.returncode)
Sending ICMP packets. :return: ``ping`` command execution result. :rtype: :py:class:`.PingResult` :raises ValueError: If parameters not valid.
def distrib_id(): with settings(hide('running', 'stdout')): kernel = (run('uname -s') or '').strip().lower() if kernel == LINUX: if is_file('/usr/bin/lsb_release'): id_ = run('lsb_release --id --short').strip().lower() if id in ['arch', 'archlinux']: id_ = ARCH return id_ else: if is_file('/etc/debian_version'): return DEBIAN elif is_file('/etc/fedora-release'): return FEDORA elif is_file('/etc/arch-release'): return ARCH elif is_file('/etc/redhat-release'): release = run('cat /etc/redhat-release') if release.startswith('Red Hat Enterprise Linux'): return REDHAT elif release.startswith('CentOS'): return CENTOS elif release.startswith('Scientific Linux'): return SLES elif is_file('/etc/gentoo-release'): return GENTOO elif kernel == SUNOS: return SUNOS
Get the OS distribution ID. Example:: from burlap.system import distrib_id if distrib_id() != 'Debian': abort(u"Distribution is not supported")
def close(self): try: self.conn.close() self.logger.debug("Close connect succeed.") except pymssql.Error as e: self.unknown("Close connect error: %s" % e)
Close the connection.
def is_allowed(self, role, method, resource): return (role, method, resource) in self._allowed
Check whether role is allowed to access resource :param role: Role to be checked. :param method: Method to be checked. :param resource: View function to be checked.
def get_changeset(changeset): url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format( changeset ) return ET.fromstring(requests.get(url).content)
Get the changeset using the OSM API and return the content as a XML ElementTree. Args: changeset: the id of the changeset.
def disable_FTDI_driver(): logger.debug('Disabling FTDI driver.') if sys.platform == 'darwin': logger.debug('Detected Mac OSX') _check_running_as_root() subprocess.call('kextunload -b com.apple.driver.AppleUSBFTDI', shell=True) subprocess.call('kextunload /System/Library/Extensions/FTDIUSBSerialDriver.kext', shell=True) elif sys.platform.startswith('linux'): logger.debug('Detected Linux') _check_running_as_root() subprocess.call('modprobe -r -q ftdi_sio', shell=True) subprocess.call('modprobe -r -q usbserial', shell=True)
Disable the FTDI drivers for the current platform. This is necessary because they will conflict with libftdi and accessing the FT232H. Note you can enable the FTDI drivers again by calling enable_FTDI_driver.
def _ConvertValueMessage(value, message): if isinstance(value, dict): _ConvertStructMessage(value, message.struct_value) elif isinstance(value, list): _ConvertListValueMessage(value, message.list_value) elif value is None: message.null_value = 0 elif isinstance(value, bool): message.bool_value = value elif isinstance(value, six.string_types): message.string_value = value elif isinstance(value, _INT_OR_FLOAT): message.number_value = value else: raise ParseError('Unexpected type for Value message.')
Convert a JSON representation into Value message.
def enbw(data): r N = len(data) return N * np.sum(data**2) / np.sum(data)**2
r"""Computes the equivalent noise bandwidth .. math:: ENBW = N \frac{\sum_{n=1}^{N} w_n^2}{\left(\sum_{n=1}^{N} w_n \right)^2} .. doctest:: >>> from spectrum import create_window, enbw >>> w = create_window(64, 'rectangular') >>> enbw(w) 1.0 The following table contains the ENBW values for some of the implemented windows in this module (with N=16384). They have been double checked against litterature (Source: [Harris]_, [Marple]_). If not present, it means that it has not been checked. =================== ============ ============= name ENBW litterature =================== ============ ============= rectangular 1. 1. triangle 1.3334 1.33 Hann 1.5001 1.5 Hamming 1.3629 1.36 blackman 1.7268 1.73 kaiser 1.7 blackmanharris,4 2.004 2. riesz 1.2000 1.2 riemann 1.32 1.3 parzen 1.917 1.92 tukey 0.25 1.102 1.1 bohman 1.7858 1.79 poisson 2 1.3130 1.3 hanningpoisson 0.5 1.609 1.61 cauchy 1.489 1.48 lanczos 1.3 =================== ============ =============
def get_docker_client(): env = get_docker_env() host, cert_path, tls_verify = env['DOCKER_HOST'], env['DOCKER_CERT_PATH'], env['DOCKER_TLS_VERIFY'] params = {'base_url': host.replace('tcp://', 'https://'), 'timeout': None, 'version': 'auto'} if tls_verify and cert_path: params['tls'] = docker.tls.TLSConfig( client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ca_cert=os.path.join(cert_path, 'ca.pem'), verify=True, ssl_version=None, assert_hostname=False) return docker.Client(**params)
Ripped off and slightly modified based on docker-py's kwargs_from_env utility function.
def merge_ndx(*args): ndxs = [] struct = None for fname in args: if fname.endswith('.ndx'): ndxs.append(fname) else: if struct is not None: raise ValueError("only one structure file supported") struct = fname fd, multi_ndx = tempfile.mkstemp(suffix='.ndx', prefix='multi_') os.close(fd) atexit.register(os.unlink, multi_ndx) if struct: make_ndx = registry['Make_ndx'](f=struct, n=ndxs, o=multi_ndx) else: make_ndx = registry['Make_ndx'](n=ndxs, o=multi_ndx) _, _, _ = make_ndx(input=['q'], stdout=False, stderr=False) return multi_ndx
Takes one or more index files and optionally one structure file and returns a path for a new merged index file. :param args: index files and zero or one structure file :return: path for the new merged index file
def CanonicalPath(path): path = os.path.normpath(path) path = os.path.abspath(path) path = os.path.normcase(path) return path
Returns a version of a path that is unique. Given two paths path1 and path2: CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on the host OS. Takes account of case, slashes and relative paths. :param unicode path: The original path. :rtype: unicode :returns: The unique path.
def _sasl_authenticate(self, stream, username, authzid): if not stream.initiator: raise SASLAuthenticationFailed("Only initiating entity start" " SASL authentication") if stream.features is None or not self.peer_sasl_mechanisms: raise SASLNotAvailable("Peer doesn't support SASL") props = dict(stream.auth_properties) if not props.get("service-domain") and ( stream.peer and stream.peer.domain): props["service-domain"] = stream.peer.domain if username is not None: props["username"] = username if authzid is not None: props["authzid"] = authzid if "password" in self.settings: props["password"] = self.settings["password"] props["available_mechanisms"] = self.peer_sasl_mechanisms enabled = sasl.filter_mechanism_list( self.settings['sasl_mechanisms'], props, self.settings['insecure_auth']) if not enabled: raise SASLNotAvailable( "None of SASL mechanism selected can be used") props["enabled_mechanisms"] = enabled mechanism = None for mech in enabled: if mech in self.peer_sasl_mechanisms: mechanism = mech break if not mechanism: raise SASLMechanismNotAvailable("Peer doesn't support any of" " our SASL mechanisms") logger.debug("Our mechanism: {0!r}".format(mechanism)) stream.auth_method_used = mechanism self.authenticator = sasl.client_authenticator_factory(mechanism) initial_response = self.authenticator.start(props) if not isinstance(initial_response, sasl.Response): raise SASLAuthenticationFailed("SASL initiation failed") element = ElementTree.Element(AUTH_TAG) element.set("mechanism", mechanism) if initial_response.data: if initial_response.encode: element.text = initial_response.encode() else: element.text = initial_response.data stream.write_element(element)
Start SASL authentication process. [initiating entity only] :Parameters: - `username`: user name. - `authzid`: authorization ID. - `mechanism`: SASL mechanism to use.
def color_from_hex(value): if " value = value[1:] try: unhexed = bytes.fromhex(value) except: unhexed = binascii.unhexlify(value) return color_from_rgb(*struct.unpack('BBB',unhexed))
Takes an HTML hex code and converts it to a proper hue value
def dump(data, file=sys.stdout, use_yaml=None, **kwds): if use_yaml is None: use_yaml = ALWAYS_DUMP_YAML def dump(fp): if use_yaml: yaml.safe_dump(data, stream=fp, **kwds) else: json.dump(data, fp, indent=4, sort_keys=True, **kwds) if not isinstance(file, str): return dump(file) if os.path.isabs(file): parent = os.path.dirname(file) if not os.path.exists(parent): os.makedirs(parent, exist_ok=True) with open(file, 'w') as fp: return dump(fp)
Dumps data as nicely formatted JSON string to a file or file handle :param dict data: a dictionary to dump :param file: a filename or file handle to write to :param kwds: keywords to pass to json.dump
def oneleft(self, window_name, object_name, iterations): if not self.verifyscrollbarhorizontal(window_name, object_name): raise LdtpServerException('Object not horizontal scrollbar') object_handle = self._get_object_handle(window_name, object_name) i = 0 minValue = 1.0 / 8 flag = False while i < iterations: if object_handle.AXValue <= 0: raise LdtpServerException('Minimum limit reached') object_handle.AXValue -= minValue time.sleep(1.0 / 100) flag = True i += 1 if flag: return 1 else: raise LdtpServerException('Unable to decrease scrollbar')
Press scrollbar left with number of iterations @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param interations: iterations to perform on slider increase @type iterations: integer @return: 1 on success. @rtype: integer
def transform_courserun_description(self, content_metadata_item): description_with_locales = [] content_metadata_language_code = transform_language_code(content_metadata_item.get('content_language', '')) for locale in self.enterprise_configuration.get_locales(default_locale=content_metadata_language_code): description_with_locales.append({ 'locale': locale, 'value': ( content_metadata_item['full_description'] or content_metadata_item['short_description'] or content_metadata_item['title'] or '' ) }) return description_with_locales
Return the description of the courserun content item.
def directory(cls, directory, root=None, extension=None, **kwargs): root = os.getcwd() if root is None else root suffix = '' if extension is None else '.' + extension.rsplit('.')[-1] pattern = directory + os.sep + '*' + suffix key = os.path.join(root, directory,'*').rsplit(os.sep)[-2] format_parse = list(string.Formatter().parse(key)) if not all([el is None for el in zip(*format_parse)[1]]): raise Exception('Directory cannot contain format field specifications') return cls(key, pattern, root, **kwargs)
Load all the files in a given directory selecting only files with the given extension if specified. The given kwargs are passed through to the normal constructor.
def move_to(self, thing, destination): "Move a thing to a new location." thing.bump = self.some_things_at(destination, Obstacle) if not thing.bump: thing.location = destination for o in self.observers: o.thing_moved(thing)
Move a thing to a new location.
def _run_paste(app, config, mode): from paste import httpserver version = "WsgiDAV/{} {} Python {}".format( __version__, httpserver.WSGIHandler.server_version, util.PYTHON_VERSION ) _logger.info("Running {}...".format(version)) server = httpserver.serve( app, host=config["host"], port=config["port"], server_version=version, protocol_version="HTTP/1.1", start_loop=False, ) if config["verbose"] >= 5: __handle_one_request = server.RequestHandlerClass.handle_one_request def handle_one_request(self): __handle_one_request(self) if self.close_connection == 1: _logger.debug("HTTP Connection : close") else: _logger.debug("HTTP Connection : continue") server.RequestHandlerClass.handle_one_request = handle_one_request server.RequestHandlerClass.handle_one_request = handle_one_request host, port = server.server_address if host == "0.0.0.0": _logger.info( "Serving on 0.0.0.0:{} view at {}://127.0.0.1:{}".format(port, "http", port) ) else: _logger.info("Serving on {}://{}:{}".format("http", host, port)) try: server.serve_forever() except KeyboardInterrupt: _logger.warning("Caught Ctrl-C, shutting down...") return
Run WsgiDAV using paste.httpserver, if Paste is installed. See http://pythonpaste.org/modules/httpserver.html for more options
def transform(self, jam): for state in self.states(jam): yield self._transform(jam, state)
Iterative transformation generator Applies the deformation to an input jams object. This generates a sequence of deformed output JAMS. Parameters ---------- jam : jams.JAMS The jam to transform Examples -------- >>> for jam_out in deformer.transform(jam_in): ... process(jam_out)
def have_same_affine(one_img, another_img, only_check_3d=False): img1 = check_img(one_img) img2 = check_img(another_img) ndim1 = len(img1.shape) ndim2 = len(img2.shape) if ndim1 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1)) if ndim2 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1)) affine1 = img1.get_affine() affine2 = img2.get_affine() if only_check_3d: affine1 = affine1[:3, :3] affine2 = affine2[:3, :3] try: return np.allclose(affine1, affine2) except ValueError: return False except: raise
Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError
def establish_ssh_tunnel(self): localportlist = [] for (host, port) in self.hostportlist: localport = self.pick_unused_port() self.tunnel.append(subprocess.Popen( ('ssh', self.tunnelhost, '-NL127.0.0.1:%d:%s:%d' % (localport, host, port)))) localportlist.append(('127.0.0.1', localport)) return localportlist
Establish an ssh tunnel for each local host and port that can be used to communicate with the state host.
def add_new_pattern(self, id_, name=None): if name is None: name = id_ pattern = self._parser.new_pattern(id_, name) self._patterns.append(pattern) return pattern
Add a new, empty knitting pattern to the set. :param id_: the id of the pattern :param name: the name of the pattern to add or if :obj:`None`, the :paramref:`id_` is used :return: a new, empty knitting pattern :rtype: knittingpattern.KnittingPattern.KnittingPattern
def _leastUsedCell(cls, random, cells, connections): leastUsedCells = [] minNumSegments = float("inf") for cell in cells: numSegments = connections.numSegments(cell) if numSegments < minNumSegments: minNumSegments = numSegments leastUsedCells = [] if numSegments == minNumSegments: leastUsedCells.append(cell) i = random.getUInt32(len(leastUsedCells)) return leastUsedCells[i]
Gets the cell with the smallest number of segments. Break ties randomly. :param random: (Object) Random number generator. Gets mutated. :param cells: (list) Indices of cells. :param connections: (Object) Connections instance for the TM. :returns: (int) Cell index.
def appendSigner(self, accounts, permission): assert permission in self.permission_types, "Invalid permission" if self.blockchain.wallet.locked(): raise WalletLocked() if not isinstance(accounts, (list, tuple, set)): accounts = [accounts] for account in accounts: if account not in self.signing_accounts: if isinstance(account, self.publickey_class): self.appendWif( self.blockchain.wallet.getPrivateKeyForPublicKey(str(account)) ) else: accountObj = self.account_class( account, blockchain_instance=self.blockchain ) required_treshold = accountObj[permission]["weight_threshold"] keys = self._fetchkeys( accountObj, permission, required_treshold=required_treshold ) if not keys and permission != "owner": keys.extend( self._fetchkeys( accountObj, "owner", required_treshold=required_treshold ) ) for x in keys: self.appendWif(x[0]) self.signing_accounts.append(account)
Try to obtain the wif key from the wallet by telling which account and permission is supposed to sign the transaction
def load_wmt_en_fr_dataset(path='data'): path = os.path.join(path, 'wmt_en_fr') _WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/" _WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/" def gunzip_file(gz_path, new_path): logging.info("Unpacking %s to %s" % (gz_path, new_path)) with gzip.open(gz_path, "rb") as gz_file: with open(new_path, "wb") as new_file: for line in gz_file: new_file.write(line) def get_wmt_enfr_train_set(path): filename = "training-giga-fren.tar" maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True) train_path = os.path.join(path, "giga-fren.release2.fixed") gunzip_file(train_path + ".fr.gz", train_path + ".fr") gunzip_file(train_path + ".en.gz", train_path + ".en") return train_path def get_wmt_enfr_dev_set(path): filename = "dev-v2.tgz" dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False) dev_name = "newstest2013" dev_path = os.path.join(path, "newstest2013") if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")): logging.info("Extracting tgz file %s" % dev_file) with tarfile.open(dev_file, "r:gz") as dev_tar: fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") fr_dev_file.name = dev_name + ".fr" en_dev_file.name = dev_name + ".en" dev_tar.extract(fr_dev_file, path) dev_tar.extract(en_dev_file, path) return dev_path logging.info("Load or Download WMT English-to-French translation > {}".format(path)) train_path = get_wmt_enfr_train_set(path) dev_path = get_wmt_enfr_dev_set(path) return train_path, dev_path
Load WMT'15 English-to-French translation dataset. It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set. Returns the directories of training data and test data. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``. References ---------- - Code modified from /tensorflow/models/rnn/translation/data_utils.py Notes ----- Usually, it will take a long time to download this dataset.
def get_obj_cols(df): obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols
Returns names of 'object' columns in the DataFrame.
def cp(resume, quiet, dataset_uri, dest_base_uri): _copy(resume, quiet, dataset_uri, dest_base_uri)
Copy a dataset to a different location.
def find_characteristic(self, uuid): for char in self.list_characteristics(): if char.uuid == uuid: return char return None
Return the first child characteristic found that has the specified UUID. Will return None if no characteristic that matches is found.
def validate_tpa_user_id(self, value): enterprise_customer = self.context.get('enterprise_customer') try: tpa_client = ThirdPartyAuthApiClient() username = tpa_client.get_username_from_remote_id( enterprise_customer.identity_provider, value ) user = User.objects.get(username=username) return models.EnterpriseCustomerUser.objects.get( user_id=user.id, enterprise_customer=enterprise_customer ) except (models.EnterpriseCustomerUser.DoesNotExist, User.DoesNotExist): pass return None
Validates the tpa_user_id, if is given, to see if there is an existing EnterpriseCustomerUser for it. It first uses the third party auth api to find the associated username to do the lookup.
def id_by_addr(self, addr): if self._databaseType in (const.PROXY_EDITION, const.NETSPEED_EDITION_REV1, const.NETSPEED_EDITION_REV1_V6): raise GeoIPError('Invalid database type; this database is not supported') ipv = 6 if addr.find(':') >= 0 else 4 if ipv == 4 and self._databaseType not in (const.COUNTRY_EDITION, const.NETSPEED_EDITION): raise GeoIPError('Invalid database type; this database supports IPv6 addresses, not IPv4') if ipv == 6 and self._databaseType != const.COUNTRY_EDITION_V6: raise GeoIPError('Invalid database type; this database supports IPv4 addresses, not IPv6') ipnum = util.ip2long(addr) return self._seek_country(ipnum) - const.COUNTRY_BEGIN
Returns the database ID for specified address. The ID might be useful as array index. 0 is unknown. :arg addr: IPv4 or IPv6 address (eg. 203.0.113.30)