diff --git "a/train_code_cleaned.json" "b/train_code_cleaned.json" deleted file mode 100644--- "a/train_code_cleaned.json" +++ /dev/null @@ -1,43154 +0,0 @@ -[ - { - "code": "def run(self):\n for fn in glob_all(self.args.random_data_folder, '*.wav'):\n if fn in self.trained_fns:\n print('Skipping ' + fn + '...')\n continue\n print('Starting file ' + fn + '...')\n self.train_on_audio(fn)\n print('\\r100% ')\n self.trained_fns.append(fn)\n save_trained_fns(self.trained_fns, self.args.model)", - "docstring": "Begin reading through audio files, saving false\n activations and retraining when necessary" - }, - { - "code": "def canonical_headers(self, headers_to_sign):\n l = ['%s:%s'%(n.lower().strip(),\n headers_to_sign[n].strip()) for n in headers_to_sign]\n l.sort()\n return '\\n'.join(l)", - "docstring": "Return the headers that need to be included in the StringToSign\n in their canonical form by converting all header keys to lower\n case, sorting them in alphabetical order and then joining\n them into a string, separated by newlines." - }, - { - "code": "def files_list(self, **kwargs) -> SlackResponse:\n self._validate_xoxp_token()\n return self.api_call(\"files.list\", http_verb=\"GET\", params=kwargs)", - "docstring": "Lists & filters team files." - }, - { - "code": "def acquire(self, lock):\n unit = hookenv.local_unit()\n ts = self.requests[unit].get(lock)\n if not ts:\n self.requests.setdefault(lock, {})\n self.requests[unit][lock] = _timestamp()\n self.msg('Requested {}'.format(lock))\n if self.granted(lock):\n self.msg('Acquired {}'.format(lock))\n return True\n if hookenv.is_leader():\n return self.grant(lock, unit)\n return False", - "docstring": "Acquire the named lock, non-blocking.\n\n The lock may be granted immediately, or in a future hook.\n\n Returns True if the lock has been granted. The lock will be\n automatically released at the end of the hook in which it is\n granted.\n\n Do not mindlessly call this method, as it triggers a cascade of\n hooks. For example, if you call acquire() every time in your\n peers relation-changed hook you will end up with an infinite loop\n of hooks. It should almost always be guarded by some condition." - }, - { - "code": "def success_count(self):\n return len([i for i, result in enumerate(self.data) if result.success])", - "docstring": "Amount of passed test cases in this list.\n\n :return: integer" - }, - { - "code": "def cursor(self, autocommit=True):\n with self.connection(autocommit) as conn:\n with conn.cursor() as cursor:\n yield cursor", - "docstring": "When a connection exits the with block,\n - the tx is committed if no errors were encountered\n - the tx is rolled back if errors\n\n When a cursor exits its with block it is closed, without affecting\n the state of the transaction.\n\n http://initd.org/psycopg/docs/usage.html" - }, - { - "code": "def _cleanlogs(silent=False, log_location=\"log\"):\n try:\n print(\"cleaning up Icetea log directory.\")\n shutil.rmtree(log_location, ignore_errors=silent,\n onerror=None if silent else _clean_onerror)\n except OSError as error:\n print(error)", - "docstring": "Cleans up Mbed-test default log directory.\n\n :param silent: Defaults to False\n :param log_location: Location of log files, defaults to \"log\"\n :return: Nothing" - }, - { - "code": "def context(self):\n ret = libxml2mod.xmlXPathParserGetContext(self._o)\n if ret is None:raise xpathError('xmlXPathParserGetContext() failed')\n __tmp = xpathContext(_obj=ret)\n return __tmp", - "docstring": "Get the xpathContext from an xpathParserContext" - }, - { - "code": "def getmerge(self, path, dst, newline=False, check_crc=False):\n if not path:\n raise InvalidInputException(\"getmerge: no path given\")\n if not dst:\n raise InvalidInputException(\"getmerge: no destination given\")\n temporary_target = \"%s._COPYING_\" % dst\n f = open(temporary_target, 'w')\n processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_getmerge(path, node, dst, check_crc)\n try:\n for item in self._find_items([path], processor, include_toplevel=True, recurse=False, include_children=True):\n for load in item:\n if load['result']:\n f.write(load['response'])\n elif not load['error'] is '':\n if os.path.isfile(temporary_target):\n os.remove(temporary_target)\n raise FatalException(load['error'])\n if newline and load['response']:\n f.write(\"\\n\")\n yield {\"path\": dst, \"response\": '', \"result\": True, \"error\": load['error'], \"source_path\": path}\n finally:\n if os.path.isfile(temporary_target):\n f.close()\n os.rename(temporary_target, dst)", - "docstring": "Get all the files in the directories that\n match the source file pattern and merge and sort them to only\n one file on local fs.\n\n :param paths: Directory containing files that will be merged\n :type paths: string\n :param dst: Path of file that will be written\n :type dst: string\n :param nl: Add a newline character at the end of each file.\n :type nl: boolean\n :returns: string content of the merged file at dst" - }, - { - "code": "def insert_file(self, f, namespace, timestamp):\n doc = f.get_metadata()\n doc[\"content\"] = f.read()\n self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)", - "docstring": "Inserts a file to the doc dict." - }, - { - "code": "def inodeusage(args=None):\n flags = _clean_flags(args, 'disk.inodeusage')\n if __grains__['kernel'] == 'AIX':\n cmd = 'df -i'\n else:\n cmd = 'df -iP'\n if flags:\n cmd += ' -{0}'.format(flags)\n ret = {}\n out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()\n for line in out:\n if line.startswith('Filesystem'):\n continue\n comps = line.split()\n if not comps:\n continue\n try:\n if __grains__['kernel'] == 'OpenBSD':\n ret[comps[8]] = {\n 'inodes': int(comps[5]) + int(comps[6]),\n 'used': comps[5],\n 'free': comps[6],\n 'use': comps[7],\n 'filesystem': comps[0],\n }\n elif __grains__['kernel'] == 'AIX':\n ret[comps[6]] = {\n 'inodes': comps[4],\n 'used': comps[5],\n 'free': comps[2],\n 'use': comps[5],\n 'filesystem': comps[0],\n }\n else:\n ret[comps[5]] = {\n 'inodes': comps[1],\n 'used': comps[2],\n 'free': comps[3],\n 'use': comps[4],\n 'filesystem': comps[0],\n }\n except (IndexError, ValueError):\n log.error('Problem parsing inode usage information')\n ret = {}\n return ret", - "docstring": "Return inode usage information for volumes mounted on this minion\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' disk.inodeusage" - }, - { - "code": "def node(self, nodeid):\n _assert_valid_nodeid(nodeid)\n return Node(hub=Hub(nodeid=nodeid))", - "docstring": "Creates a new node with the specified name, with `MockSocket` instances as incoming and outgoing sockets.\n\n Returns the implementation object created for the node from the cls, args and address specified, and the sockets.\n `cls` must be a callable that takes the insock and outsock, and the specified args and kwargs." - }, - { - "code": "def collapse(self, remove=False):\n descendants = iter(self)\n assert next(descendants) is self\n for descendant in descendants:\n self.sequence_ids.update(descendant.sequence_ids)\n descendant.sequence_ids.clear()\n if remove:\n for node in self.children:\n self.remove_child(node)", - "docstring": "Move all ``sequence_ids`` in the subtree below this node to this node.\n\n If ``remove`` is True, nodes below this one are deleted from the\n taxonomy." - }, - { - "code": "def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5,\n gwAmp=1e-20, alpha=-0.66, logspacing=True):\n gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing)\n gwb.add_gwb(psr,dist)\n return gwb", - "docstring": "Add a stochastic background from inspiraling binaries, using the tempo2\n code that underlies the GWbkgrd plugin.\n\n Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries,\n 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator,\n 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha'\n determine its amplitude and exponent, and setting 'logspacing' to False\n will use linear spacing for the individual sources.\n\n It is also possible to create a background object with\n\n gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing)\n\n then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a\n consistent background for multiple pulsars.\n\n Returns the GWB object" - }, - { - "code": "def _get_ui_content(self, cli, width, height):\n def get_content():\n return self.content.create_content(cli, width=width, height=height)\n key = (cli.render_counter, width, height)\n return self._ui_content_cache.get(key, get_content)", - "docstring": "Create a `UIContent` instance." - }, - { - "code": "def start(st_reg_number):\n weights = [9, 8, 7, 6, 5, 4, 3, 2]\n digit_state_registration = st_reg_number[-1]\n if len(st_reg_number) != 9:\n return False\n sum_total = 0\n for i in range(0, 8):\n sum_total = sum_total + weights[i] * int(st_reg_number[i])\n if sum_total % 11 == 0:\n return digit_state_registration[-1] == '0'\n digit_check = 11 - sum_total % 11\n return str(digit_check) == digit_state_registration", - "docstring": "Checks the number valiaty for the Paraiba state" - }, - { - "code": "def set(self, key, val, bucket):\n if bucket not in self._cache:\n self._cache[bucket] = {}\n self._cache[bucket][key] = val", - "docstring": "Set a cached item by key\n\n WARN: Regardless if the item is already in the cache,\n it will be udpated with the new value." - }, - { - "code": "def lstr_lsnode(self, astr_path=\"\"):\n self.sCore.reset()\n str_cwd = self.cwd()\n if len(astr_path): self.cdnode(astr_path)\n lst = self.snode_current.d_nodes.keys()\n if len(astr_path): self.cdnode(str_cwd)\n return lst", - "docstring": "Return the string names of the set of nodes branching from\n current node as list of strings" - }, - { - "code": "def entries(self):\n return ContentTypeEntriesProxy(self._client, self.space.id, self._environment_id, self.id)", - "docstring": "Provides access to entry management methods for the given content type.\n\n API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/entries\n\n :return: :class:`ContentTypeEntriesProxy ` object.\n :rtype: contentful.content_type_entries_proxy.ContentTypeEntriesProxy\n\n Usage:\n\n >>> content_type_entries_proxy = content_type.entries()\n " - }, - { - "code": "def variant (name, parents_or_properties, explicit_properties = []):\n parents = []\n if not explicit_properties:\n explicit_properties = parents_or_properties\n else:\n parents = parents_or_properties\n inherited = property_set.empty()\n if parents:\n if len (parents) > 1:\n raise BaseException (\"Multiple base variants are not yet supported\")\n p = parents[0]\n if not feature.is_implicit_value (p):\n raise BaseException (\"Invalid base variant '%s'\" % p)\n inherited = __variant_explicit_properties[p]\n explicit_properties = property_set.create_with_validation(explicit_properties)\n explicit_properties = inherited.refine(explicit_properties)\n __variant_explicit_properties[name] = explicit_properties\n feature.extend('variant', [name])\n feature.compose (\"\" + name, explicit_properties.all())", - "docstring": "Declares a new variant.\n First determines explicit properties for this variant, by\n refining parents' explicit properties with the passed explicit\n properties. The result is remembered and will be used if\n this variant is used as parent.\n\n Second, determines the full property set for this variant by\n adding to the explicit properties default values for all properties\n which neither present nor are symmetric.\n\n Lastly, makes appropriate value of 'variant' property expand\n to the full property set.\n name: Name of the variant\n parents_or_properties: Specifies parent variants, if\n 'explicit_properties' are given,\n and explicit_properties otherwise.\n explicit_properties: Explicit properties." - }, - { - "code": "def to_fs_path(uri):\n scheme, netloc, path, _params, _query, _fragment = urlparse(uri)\n if netloc and path and scheme == 'file':\n value = \"//{}{}\".format(netloc, path)\n elif RE_DRIVE_LETTER_PATH.match(path):\n value = path[1].lower() + path[2:]\n else:\n value = path\n if IS_WIN:\n value = value.replace('/', '\\\\')\n return value", - "docstring": "Returns the filesystem path of the given URI.\n\n Will handle UNC paths and normalize windows drive letters to lower-case. Also\n uses the platform specific path separator. Will *not* validate the path for\n invalid characters and semantics. Will *not* look at the scheme of this URI." - }, - { - "code": "def _load_cert_chain(self, cert_url, x509_backend=default_backend()):\n try:\n if cert_url in self._cert_cache:\n return self._cert_cache.get(cert_url)\n else:\n with urlopen(cert_url) as cert_response:\n cert_data = cert_response.read()\n x509_certificate = load_pem_x509_certificate(\n cert_data, x509_backend)\n self._cert_cache[cert_url] = x509_certificate\n return x509_certificate\n except ValueError as e:\n raise VerificationException(\n \"Unable to load certificate from URL\", e)", - "docstring": "Loads the certificate chain from the URL.\n\n This method loads the certificate chain from the certificate\n cache. If there is a cache miss, the certificate chain is\n loaded from the certificate URL using the\n :py:func:`cryptography.x509.load_pem_x509_certificate` method.\n The x509 backend is set as default to the\n :py:class:`cryptography.hazmat.backends.default_backend`\n instance. A :py:class:`VerificationException` is raised if the\n certificate cannot be loaded.\n\n :param cert_url: URL for retrieving certificate chain\n :type cert_url: str\n :param x509_backend: Backend to be used, for loading pem x509\n certificate\n :type x509_backend:\n cryptography.hazmat.backends.interfaces.X509Backend\n :return: Certificate chain loaded from cache or URL\n :rtype cryptography.x509.Certificate\n :raises: :py:class:`VerificationException` if unable to load the\n certificate" - }, - { - "code": "def is_location(v) -> (bool, str):\n def convert2float(value):\n try:\n float_num = float(value)\n return float_num\n except ValueError:\n return False\n if not isinstance(v, str):\n return False, v\n split_lst = v.split(\":\")\n if len(split_lst) != 5:\n return False, v\n if convert2float(split_lst[3]):\n longitude = abs(convert2float(split_lst[3]))\n if longitude > 90:\n return False, v\n if convert2float(split_lst[4]):\n latitude = abs(convert2float(split_lst[3]))\n if latitude > 180:\n return False, v\n return True, v", - "docstring": "Boolean function for checking if v is a location format\n\n Args:\n v:\n Returns: bool" - }, - { - "code": "def update_gtapp(gtapp, **kwargs):\n for key, val in kwargs.items():\n if key in ['pfiles', 'scratch']:\n continue\n if val is None:\n continue\n try:\n gtapp[key] = val\n except ValueError:\n raise ValueError(\n \"gtapp failed to set parameter %s %s\" % (key, val))\n except KeyError:\n raise KeyError(\"gtapp failed to set parameter %s %s\" % (key, val))", - "docstring": "Update the parameters of the object that can run ScienceTools applications\n\n Parameters\n ----------\n\n gtapp : `GtApp.GtApp`\n Object that will run the application in question\n\n kwargs : arguments used to invoke the application" - }, - { - "code": "def from_json_dict(cls,\n json_dict\n ):\n result = cast(StringSpec,\n super().from_json_dict(json_dict))\n format_ = json_dict['format']\n if 'encoding' in format_ and result.hashing_properties:\n result.hashing_properties.encoding = format_['encoding']\n if 'pattern' in format_:\n pattern = format_['pattern']\n try:\n result.regex = re_compile_full(pattern)\n except (SyntaxError, re.error) as e:\n msg = \"Invalid regular expression '{}.'\".format(pattern)\n e_new = InvalidSchemaError(msg)\n raise_from(e_new, e)\n result.regex_based = True\n else:\n result.case = format_.get('case', StringSpec._DEFAULT_CASE)\n result.min_length = format_.get('minLength')\n result.max_length = format_.get('maxLength')\n result.regex_based = False\n return result", - "docstring": "Make a StringSpec object from a dictionary containing its\n properties.\n\n :param dict json_dict: This dictionary must contain an\n `'encoding'` key associated with a Python-conformant\n encoding. It must also contain a `'hashing'` key, whose\n contents are passed to :class:`FieldHashingProperties`.\n Permitted keys also include `'pattern'`, `'case'`,\n `'minLength'`, and `'maxLength'`.\n :raises InvalidSchemaError: When a regular expression is\n provided but is not a valid pattern." - }, - { - "code": "def pages(self):\n if self._owner_id is None:\n it = ProfileIterator.from_username(self._username, self.session)\n self._owner_id = it.owner_id\n return it\n return ProfileIterator(self._owner_id, self.session, self.rhx)", - "docstring": "Obtain an iterator over Instagram post pages.\n\n Returns:\n PageIterator: an iterator over the instagram post pages.\n\n Raises:\n ValueError: when the requested user does not exist.\n RuntimeError: when the user is a private account\n and there is no logged user (or the logged user\n does not follow that account)." - }, - { - "code": "def canonical_query_string(self):\n results = []\n for key, values in iteritems(self.query_parameters):\n if key == _x_amz_signature:\n continue\n for value in values:\n results.append(\"%s=%s\" % (key, value))\n return \"&\".join(sorted(results))", - "docstring": "The canonical query string from the query parameters.\n\n This takes the query string from the request and orders the parameters\n in" - }, - { - "code": "def run(self):\n while not self.stopped.isSet():\n try:\n timeout = (self._state != 'idle') and 0.2 or None\n rdlist, _, _ = select.select([self._socket.fileno()], [], [], timeout)\n if not rdlist:\n if self._state != 'idle':\n self._state = 'idle'\n continue\n data = self._socket.recv(1024)\n if not data:\n try:\n os.fstat(recv._socket.fileno())\n except socket.error:\n break\n continue\n code = utils.mangleIR(data, ignore_errors=True)\n codeName = self.codeMap.get(code)\n if codeName and (self._state != codeName):\n self._state = codeName\n for callback in self._callbacks:\n callback(codeName)\n except:\n time.sleep(0.1)", - "docstring": "Main loop of KIRA thread." - }, - { - "code": "def ReadTrigger(self, trigger_link, options=None):\n if options is None:\n options = {}\n path = base.GetPathFromLink(trigger_link)\n trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)\n return self.Read(path, 'triggers', trigger_id, None, options)", - "docstring": "Reads a trigger.\n\n :param str trigger_link:\n The link to the trigger.\n :param dict options:\n The request options for the request.\n\n :return:\n The read Trigger.\n :rtype:\n dict" - }, - { - "code": "def delay(self):\n if self._last_checked:\n return self._interval - (time.time() - self._last_checked)\n return self._interval", - "docstring": "How long to wait before the next check" - }, - { - "code": "def ExpandDims(a, dim):\n shape = list(a.shape)\n if dim >= 0:\n shape.insert(dim, 1)\n else:\n shape.insert(len(shape) + dim + 1, 1)\n return np.copy(a).reshape(*shape),", - "docstring": "Expand dim op, i.e. add singular axis at dim." - }, - { - "code": "def get_all_article_properties(self, params=None):\n if not params:\n params = {}\n return self._iterate_through_pages(\n get_function=self.get_article_properties_per_page,\n resource=ARTICLE_PROPERTIES,\n **{'params': params}\n )", - "docstring": "Get all article properties\n This will iterate over all pages until it gets all elements.\n So if the rate limit exceeded it will throw an Exception and you will get nothing\n\n :param params: search params\n :return: list" - }, - { - "code": "def hint(invertible=True, callable=None, **hints):\n def wrapper(fn):\n fn.hints = hints\n fn.hinting_invertible = invertible\n fn.hinting_callable = callable\n return fn\n return wrapper", - "docstring": "A decorator that can optionally hang datanommer hints on a rule." - }, - { - "code": "def database_caller_creator(self, name=None):\n couch = couchdb.Server()\n if name:\n db = couch.create(name)\n else:\n n = 'couchdb_' + lower_str_generator(self)\n db = couch.create(n)\n logger.warning('couchdb database created with the name: %s', n,\n extra=d)\n return db", - "docstring": "creates a couchdb database\n returns the related connection object\n which will be later used to spawn the cursor" - }, - { - "code": "def outstanding(self):\n done_count = 0\n for item in self:\n if not self.wait_any and item.fire_and_forget:\n done_count += 1\n elif item.done:\n done_count += 1\n if self.wait_any and done_count > 0:\n return False\n if done_count == len(self):\n return False\n return True", - "docstring": "Returns whether or not this barrier has pending work." - }, - { - "code": "def _get_config_value(profile, config_name):\n config = __salt__['config.option'](profile)\n if not config:\n raise CommandExecutionError(\n 'Authentication information could not be found for the '\n '\\'{0}\\' profile.'.format(profile)\n )\n config_value = config.get(config_name)\n if config_value is None:\n raise CommandExecutionError(\n 'The \\'{0}\\' parameter was not found in the \\'{1}\\' '\n 'profile.'.format(\n config_name,\n profile\n )\n )\n return config_value", - "docstring": "Helper function that returns a profile's configuration value based on\n the supplied configuration name.\n\n profile\n The profile name that contains configuration information.\n\n config_name\n The configuration item's name to use to return configuration values." - }, - { - "code": "def mv_normal_like(x, mu, tau):\n R\n if len(np.shape(x)) > 1:\n return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x])\n else:\n return flib.prec_mvnorm(x, mu, tau)", - "docstring": "R\"\"\"\n Multivariate normal log-likelihood\n\n .. math::\n f(x \\mid \\pi, T) = \\frac{|T|^{1/2}}{(2\\pi)^{1/2}} \\exp\\left\\{ -\\frac{1}{2} (x-\\mu)^{\\prime}T(x-\\mu) \\right\\}\n\n :Parameters:\n - `x` : (n,k)\n - `mu` : (k) Location parameter sequence.\n - `Tau` : (k,k) Positive definite precision matrix.\n\n .. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`" - }, - { - "code": "def disconnect_from(callback, signals):\n if not isinstance(signals, (list, tuple)):\n signals = [signals]\n for signal in signals:\n if responds_to(callback, signal):\n receivers[signal].remove(callback)", - "docstring": "Removes a callback from specified signal registries and prevents it from responding\n to any emitted signal.\n\n :param callback: A callable registered with smokesignal\n :param signals: A single signal or list/tuple of signals" - }, - { - "code": "def encode_date_optional_time(obj):\n if isinstance(obj, datetime.datetime):\n return timezone(\"UTC\").normalize(obj.astimezone(timezone(\"UTC\"))).strftime('%Y-%m-%dT%H:%M:%SZ')\n raise TypeError(\"{0} is not JSON serializable\".format(repr(obj)))", - "docstring": "ISO encode timezone-aware datetimes" - }, - { - "code": "def all_dbs(self):\n url = '/'.join((self.server_url, '_all_dbs'))\n resp = self.r_session.get(url)\n resp.raise_for_status()\n return response_to_json_dict(resp)", - "docstring": "Retrieves a list of all database names for the current client.\n\n :returns: List of database names for the client" - }, - { - "code": "def instantiate_from_config(cfg):\n for h in cfg:\n if h.get(\"type\") in data_types:\n raise KeyError(\"Data type '%s' already exists\" % h)\n data_types[h.get(\"type\")] = DataType(h)", - "docstring": "Instantiate data types from config" - }, - { - "code": "def currentpath(self) -> str:\n return os.path.join(self.basepath, self.currentdir)", - "docstring": "Absolute path of the current working directory.\n\n >>> from hydpy.core.filetools import FileManager\n >>> filemanager = FileManager()\n >>> filemanager.BASEDIR = 'basename'\n >>> filemanager.projectdir = 'projectname'\n >>> from hydpy import repr_, TestIO\n >>> with TestIO():\n ... filemanager.currentdir = 'testdir'\n ... repr_(filemanager.currentpath) # doctest: +ELLIPSIS\n '...hydpy/tests/iotesting/projectname/basename/testdir'" - }, - { - "code": "def get_event_kind(self):\n slug = self.kwargs.get('kind_slug', None)\n if slug is None:\n return None\n else:\n slugs_to_kinds = {v:k for k,v in Event.KIND_SLUGS.items()}\n return slugs_to_kinds.get(slug, None)", - "docstring": "Unless we're on the front page we'll have a kind_slug like 'movies'.\n We need to translate that into an event `kind` like 'movie'." - }, - { - "code": "def _mkdirs_impacket(path, share='C$', conn=None, host=None, username=None, password=None):\n if conn is None:\n conn = get_conn(host, username, password)\n if conn is False:\n return False\n comps = path.split('/')\n pos = 1\n for comp in comps:\n cwd = '\\\\'.join(comps[0:pos])\n try:\n conn.listPath(share, cwd)\n except (smbSessionError, smb3SessionError):\n log.exception('Encountered error running conn.listPath')\n conn.createDirectory(share, cwd)\n pos += 1", - "docstring": "Recursively create a directory structure on an SMB share\n\n Paths should be passed in with forward-slash delimiters, and should not\n start with a forward-slash." - }, - { - "code": "def get_nan_locs(self, **kwargs):\n if np.issubdtype(self.X.dtype, np.string_) or np.issubdtype(self.X.dtype, np.unicode_):\n mask = np.where( self.X == '' )\n nan_matrix = np.zeros(self.X.shape)\n nan_matrix[mask] = np.nan\n else:\n nan_matrix = self.X.astype(float)\n if self.y is None:\n return np.argwhere(np.isnan(nan_matrix))\n else:\n nan_locs = []\n for target_value in np.unique(self.y):\n indices = np.argwhere(self.y == target_value)\n target_matrix = nan_matrix[indices.flatten()]\n nan_target_locs = np.argwhere(np.isnan(target_matrix))\n nan_locs.append((target_value, nan_target_locs))\n return nan_locs", - "docstring": "Gets the locations of nans in feature data and returns\n the coordinates in the matrix" - }, - { - "code": "def _symlink_check(name, target, force, user, group, win_owner):\n changes = {}\n if not os.path.exists(name) and not __salt__['file.is_link'](name):\n changes['new'] = name\n return None, 'Symlink {0} to {1} is set for creation'.format(\n name, target\n ), changes\n if __salt__['file.is_link'](name):\n if __salt__['file.readlink'](name) != target:\n changes['change'] = name\n return None, 'Link {0} target is set to be changed to {1}'.format(\n name, target\n ), changes\n else:\n result = True\n msg = 'The symlink {0} is present'.format(name)\n if not _check_symlink_ownership(name, user, group, win_owner):\n result = None\n changes['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name))\n msg += (\n ', but the ownership of the symlink would be changed '\n 'from {2}:{3} to {0}:{1}'\n ).format(user, group, *_get_symlink_ownership(name))\n return result, msg, changes\n else:\n if force:\n return None, ('The file or directory {0} is set for removal to '\n 'make way for a new symlink targeting {1}'\n .format(name, target)), changes\n return False, ('File or directory exists where the symlink {0} '\n 'should be. Did you mean to use force?'.format(name)), changes", - "docstring": "Check the symlink function" - }, - { - "code": "def __add_class_views(self):\n config = self.config\n if \"class_views\" in self.kwargs:\n class_views = self.kwargs.pop(\"class_views\")\n for route, view in class_views:\n if issubclass(view, endpoints.BaseEndpoint) and isinstance(\n route, str\n ):\n self.bp.add_route(\n view.as_view(\n self.responses,\n config=self.config,\n instance=self.instance,\n ),\n route,\n strict_slashes=config.strict_slashes(),\n )\n else:\n raise exceptions.InvalidClassViewsFormat()", - "docstring": "Include any custom class views on the Sanic JWT Blueprint" - }, - { - "code": "def buy(self, product_id, order_type, **kwargs):\n return self.place_order(product_id, 'buy', order_type, **kwargs)", - "docstring": "Place a buy order.\n\n This is included to maintain backwards compatibility with older versions\n of cbpro-Python. For maximum support from docstrings and function\n signatures see the order type-specific functions place_limit_order,\n place_market_order, and place_stop_order.\n\n Args:\n product_id (str): Product to order (eg. 'BTC-USD')\n order_type (str): Order type ('limit', 'market', or 'stop')\n **kwargs: Additional arguments can be specified for different order\n types.\n\n Returns:\n dict: Order details. See `place_order` for example." - }, - { - "code": "def load_writer_configs(writer_configs, ppp_config_dir,\n **writer_kwargs):\n try:\n writer_info = read_writer_config(writer_configs)\n writer_class = writer_info['writer']\n except (ValueError, KeyError, yaml.YAMLError):\n raise ValueError(\"Invalid writer configs: \"\n \"'{}'\".format(writer_configs))\n init_kwargs, kwargs = writer_class.separate_init_kwargs(writer_kwargs)\n writer = writer_class(ppp_config_dir=ppp_config_dir,\n config_files=writer_configs,\n **init_kwargs)\n return writer, kwargs", - "docstring": "Load the writer from the provided `writer_configs`." - }, - { - "code": "def optimize_batch(self, batchsize=10, returns='best', paralell=True):\n if returns not in ('best', 'all'):\n raise ValueError('returns must be either \"best\" or \"all\"')\n starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)]\n if paralell:\n with Pool() as p:\n results = p.map(self.optimize, starts)\n else:\n results = map(self.optimize, starts)\n results = sorted(results, key=lambda x: x.stress)\n return results if returns == 'all' else results[0]", - "docstring": "Run multiple optimizations using different starting coordinates.\n\n Args:\n batchsize (`int`): Number of optimizations to run.\n returns (`str`): If ``'all'``, return results of all optimizations,\n ordered by stress, ascending. If ``'best'`` return the\n projection with the lowest stress.\n parallel (`bool`): If ``True``, run optimizations in parallel.\n\n Examples:\n\n .. doctest::\n\n >>> import pandas as pd\n >>> from pymds import DistanceMatrix\n >>> dist = pd.DataFrame({\n ... 'a': [0.0, 1.0, 2.0],\n ... 'b': [1.0, 0.0, 3 ** 0.5],\n ... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])\n >>> dm = DistanceMatrix(dist)\n >>> batch = dm.optimize_batch(batchsize=3, returns='all')\n >>> len(batch)\n 3\n >>> type(batch[0])\n \n\n Returns:\n `list` or :py:class:`pymds.Projection`:\n\n `list`: Length batchsize, containing instances of\n :py:class:`pymds.Projection`. Sorted by stress, ascending.\n\n or\n\n :py:class:`pymds.Projection`: Projection with the lowest\n stress." - }, - { - "code": "def get_fault_type_dummy_variables(self, rup):\n is_normal = np.array(\n self.RAKE_THRESH < -rup.rake < (180. - self.RAKE_THRESH))\n is_reverse = np.array(\n self.RAKE_THRESH < rup.rake < (180. - self.RAKE_THRESH))\n if not self.ALREADY_WARNED and is_normal.any():\n msg = ('Normal faulting not supported by %s; '\n 'treating as strike-slip' % type(self).__name__)\n warnings.warn(msg, UserWarning)\n self.ALREADY_WARNED = True\n is_strike_slip = ~is_reverse | is_normal\n is_strike_slip = is_strike_slip.astype(float)\n return is_strike_slip", - "docstring": "Fault-type classification dummy variable based on rup.rake.\n\n \"``H`` is 1 for a strike-slip mechanism and 0 for a reverse mechanism\"\n (p. 1201).\n\n Note:\n UserWarning is raised if mechanism is determined to be normal\n faulting, since as summarized in Table 2 on p. 1197 the data used\n for regression included only reverse and stike-slip events." - }, - { - "code": "def _set_m2ms(self, old_m2ms):\n for k, v in old_m2ms.items():\n if v:\n setattr(self, k, v)", - "docstring": "Creates the same m2m relationships that the old\n object had." - }, - { - "code": "def delete(self, key, sort_key):\n primary_key = key\n key = self.prefixed('{}:{}'.format(key, sort_key))\n self.logger.debug('Storage - delete {}'.format(key))\n if sort_key is not None:\n self.cache[self.prefixed(primary_key)].remove(sort_key)\n for index in self._secondary_indexes:\n obj = json.loads(self.cache[key])\n if index in obj.keys():\n self.cache['secondary_indexes'][index][obj[index]].remove(\n key)\n del(self.cache[key])\n return True", - "docstring": "Delete an element in dictionary" - }, - { - "code": "def getavailable(self):\n from importlib import import_module\n available = []\n for script in self.SCRIPTS:\n if have(script):\n available.append(script)\n for module in self.MODULES:\n try:\n import_module(module)\n available.append(module)\n except ImportError:\n pass\n return sorted(available)", - "docstring": "Return a list of subtitle downloaders available." - }, - { - "code": "def read_file(fpath):\n with io.open(os.path.join(PATH_BASE, fpath)) as f:\n return f.read()", - "docstring": "Reads a file within package directories." - }, - { - "code": "def load_feature_lists(self, feature_lists):\n column_names = []\n feature_ranges = []\n running_feature_count = 0\n for list_id in feature_lists:\n feature_list_names = load_lines(self.features_dir + 'X_train_{}.names'.format(list_id))\n column_names.extend(feature_list_names)\n start_index = running_feature_count\n end_index = running_feature_count + len(feature_list_names) - 1\n running_feature_count += len(feature_list_names)\n feature_ranges.append([list_id, start_index, end_index])\n X_train = np.hstack([\n load(self.features_dir + 'X_train_{}.pickle'.format(list_id))\n for list_id in feature_lists\n ])\n X_test = np.hstack([\n load(self.features_dir + 'X_test_{}.pickle'.format(list_id))\n for list_id in feature_lists\n ])\n df_train = pd.DataFrame(X_train, columns=column_names)\n df_test = pd.DataFrame(X_test, columns=column_names)\n return df_train, df_test, feature_ranges", - "docstring": "Load pickled features for train and test sets, assuming they are saved\n in the `features` folder along with their column names.\n\n Args:\n feature_lists: A list containing the names of the feature lists to load.\n\n Returns:\n A tuple containing 3 items: train dataframe, test dataframe,\n and a list describing the index ranges for the feature lists." - }, - { - "code": "def squeeze(records, gap_threshold=1.0):\n with _record_buffer(records) as r:\n gap_proportions = gap_proportion(r())\n keep_columns = [g < gap_threshold for g in gap_proportions]\n for record in r():\n sequence = str(record.seq)\n squeezed = itertools.compress(sequence, keep_columns)\n yield SeqRecord(Seq(''.join(squeezed)), id=record.id,\n description=record.description)", - "docstring": "Remove any gaps that are present in the same position across all sequences\n in an alignment. Takes a second sequence iterator for determining gap\n positions." - }, - { - "code": "def from_file(filename):\n trace = Trace()\n reached = False\n with open(filename) as fp:\n for line in fp.readlines():\n if not reached and line.strip() == \"Trace Type: Counterexample\":\n reached = True\n continue\n elif reached:\n trace.parse_line(line)\n return trace", - "docstring": "Read in filename and creates a trace object.\n\n :param filename: path to nu(x|s)mv output file\n :type filename: str\n :return:" - }, - { - "code": "def undelay(self):\r\n i = 0\r\n while i < len(self):\r\n op = self[i]\r\n i += 1\r\n if hasattr(op, 'arg1'):\r\n if isinstance(op.arg1,DelayedArg):\r\n op.arg1 = op.arg1.resolve()\r\n if isinstance(op.arg1,CodeBlock):\r\n op.arg1.undelay()", - "docstring": "resolves all delayed arguments" - }, - { - "code": "def import_module(module_name):\n if import_path != sys.path:\n imp.find_module(module_name.split('.')[0], import_path)\n if module_name in sys.modules:\n return sys.modules[module_name]\n else:\n __import__(module_name)\n return sys.modules[module_name]", - "docstring": "Imports a module. A single point of truth for importing modules to\n be documented by `pydoc`. In particular, it makes sure that the top\n module in `module_name` can be imported by using only the paths in\n `pydoc.import_path`.\n\n If a module has already been imported, then its corresponding entry\n in `sys.modules` is returned. This means that modules that have\n changed on disk cannot be re-imported in the same process and have\n its documentation updated." - }, - { - "code": "def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata['countries'].get(iso3.upper())\n if country is not None:\n return country\n if exception is not None:\n raise exception\n return None", - "docstring": "Get country information from ISO3 code\n\n Args:\n iso3 (str): ISO3 code for which to get country information\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[Dict[str]]: country information" - }, - { - "code": "def get_batch_result_ids(self, job_id, batch_id):\n response = requests.get(self._get_batch_results_url(job_id, batch_id),\n headers=self._get_batch_info_headers())\n response.raise_for_status()\n root = ET.fromstring(response.text)\n result_ids = [r.text for r in root.findall('%sresult' % self.API_NS)]\n return result_ids", - "docstring": "Get result IDs of a batch that has completed processing.\n\n :param job_id: job_id as returned by 'create_operation_job(...)'\n :param batch_id: batch_id as returned by 'create_batch(...)'\n :return: list of batch result IDs to be used in 'get_batch_result(...)'" - }, - { - "code": "def suggest(self, query):\n res, suggest = self.search(query, results=1, suggestion=True)\n try:\n title = suggest or res[0]\n except IndexError:\n title = None\n return title", - "docstring": "Gather suggestions based on the provided title or None if no\n suggestions found\n\n Args:\n query (str): Page title\n Returns:\n String or None: Suggested page title or **None** if no \\\n suggestion found" - }, - { - "code": "def pool_refresh(name, **kwargs):\n conn = __get_conn(**kwargs)\n try:\n pool = conn.storagePoolLookupByName(name)\n return not bool(pool.refresh())\n finally:\n conn.close()", - "docstring": "Refresh a defined libvirt storage pool.\n\n :param name: libvirt storage pool name\n :param connection: libvirt connection URI, overriding defaults\n :param username: username to connect with, overriding defaults\n :param password: password to connect with, overriding defaults\n\n .. versionadded:: 2019.2.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' virt.pool_refresh default" - }, - { - "code": "def to_json(self):\n return {\n 'wind_speed': self.wind_speed,\n 'wind_direction': self.wind_direction,\n 'rain': self.rain,\n 'snow_on_ground': self.snow_on_ground\n }", - "docstring": "Convert the Wind Condition to a dictionary." - }, - { - "code": "async def wait_for_action(self, action_id):\n if action_id.startswith(\"action-\"):\n action_id = action_id[7:]\n def predicate(delta):\n return delta.data['status'] in ('completed', 'failed')\n return await self._wait('action', action_id, None, predicate)", - "docstring": "Given an action, wait for it to complete." - }, - { - "code": "def wait_for_process_termination(process, timeout=10):\n if sys.version_info >= (3, 5):\n try:\n yield from asyncio.wait_for(process.wait(), timeout=timeout)\n except ProcessLookupError:\n return\n else:\n while timeout > 0:\n if process.returncode is not None:\n return\n yield from asyncio.sleep(0.1)\n timeout -= 0.1\n raise asyncio.TimeoutError()", - "docstring": "Wait for a process terminate, and raise asyncio.TimeoutError in case of\n timeout.\n\n In theory this can be implemented by just:\n yield from asyncio.wait_for(self._iou_process.wait(), timeout=100)\n\n But it's broken before Python 3.4:\n http://bugs.python.org/issue23140\n\n :param process: An asyncio subprocess\n :param timeout: Timeout in seconds" - }, - { - "code": "def border(self, L):\n if self.shape == L_shape:\n L.append(self.value)\n else:\n for x in self.sons:\n x.border(L)", - "docstring": "Append to L the border of the subtree." - }, - { - "code": "def authenticate(self, driver):\n events = [driver.username_re, driver.password_re, self.device.prompt_re, driver.rommon_re,\n driver.unable_to_connect_re, driver.authentication_error_re, pexpect.TIMEOUT, pexpect.EOF]\n transitions = [\n (driver.username_re, [0], 1, partial(a_send_username, self.username), 10),\n (driver.username_re, [1], 1, None, 10),\n (driver.password_re, [0, 1], 2, partial(a_send_password, self._acquire_password()),\n _C['first_prompt_timeout']),\n (driver.username_re, [2], -1, a_authentication_error, 0),\n (driver.password_re, [2], -1, a_authentication_error, 0),\n (driver.authentication_error_re, [1, 2], -1, a_authentication_error, 0),\n (self.device.prompt_re, [0, 1, 2], -1, None, 0),\n (driver.rommon_re, [0], -1, partial(a_send, \"\\r\\n\"), 0),\n (pexpect.TIMEOUT, [0], 1, partial(a_send, \"\\r\\n\"), 10),\n (pexpect.TIMEOUT, [2], -1, None, 0),\n (pexpect.TIMEOUT, [3, 7], -1, ConnectionTimeoutError(\"Connection Timeout\", self.hostname), 0),\n (driver.unable_to_connect_re, [0, 1, 2], -1, a_unable_to_connect, 0),\n ]\n self.log(\"EXPECTED_PROMPT={}\".format(pattern_to_str(self.device.prompt_re)))\n fsm = FSM(\"CONSOLE-SERVER-AUTH\", self.device, events, transitions, timeout=_C['connect_timeout'],\n init_pattern=self.last_pattern)\n return fsm.run()", - "docstring": "Authenticate using the Console Server protocol specific FSM." - }, - { - "code": "def expire(app, sandbox, exit=True):\n success = []\n failures = []\n service = _mturk_service_from_config(sandbox)\n hits = _current_hits(service, app)\n for hit in hits:\n hit_id = hit[\"id\"]\n try:\n service.expire_hit(hit_id)\n success.append(hit_id)\n except MTurkServiceException:\n failures.append(hit_id)\n out = Output()\n if success:\n out.log(\"Expired {} hits: {}\".format(len(success), \", \".join(success)))\n if failures:\n out.log(\n \"Could not expire {} hits: {}\".format(len(failures), \", \".join(failures))\n )\n if not success and not failures:\n out.log(\"No hits found for this application.\")\n if not sandbox:\n out.log(\n \"If this experiment was run in the MTurk sandbox, use: \"\n \"`dallinger expire --sandbox --app {}`\".format(app)\n )\n if exit and not success:\n sys.exit(1)", - "docstring": "Expire hits for an experiment id." - }, - { - "code": "def packet_is_for_me(self, m):\n if m.target_system != self.master.mav.srcSystem:\n return False\n if m.target_component != self.master.mav.srcComponent:\n return False\n if self.sender is not None:\n if (m.get_srcSystem(), m.get_srcComponent()) != self.sender:\n return False\n return True", - "docstring": "returns true if this packet is appropriately addressed" - }, - { - "code": "def stop_tracking(self, end_time):\n facts = self.__get_todays_facts()\n if facts and not facts[-1]['end_time']:\n self.__touch_fact(facts[-1], end_time)\n self.facts_changed()", - "docstring": "Stops tracking the current activity" - }, - { - "code": "def get_or_create_person(self, row):\n person, created = entity.Person.objects.get_or_create(\n first_name=row[\"first\"], last_name=row[\"last\"]\n )\n return person", - "docstring": "Gets or creates the Person object for the given row of AP data." - }, - { - "code": "def handle_set(msg):\n if not msg.gateway.is_sensor(msg.node_id, msg.child_id):\n return None\n msg.gateway.sensors[msg.node_id].set_child_value(\n msg.child_id, msg.sub_type, msg.payload)\n if msg.gateway.sensors[msg.node_id].new_state:\n msg.gateway.sensors[msg.node_id].set_child_value(\n msg.child_id, msg.sub_type, msg.payload,\n children=msg.gateway.sensors[msg.node_id].new_state)\n msg.gateway.alert(msg)\n if msg.gateway.sensors[msg.node_id].reboot:\n return msg.copy(\n child_id=SYSTEM_CHILD_ID,\n type=msg.gateway.const.MessageType.internal, ack=0,\n sub_type=msg.gateway.const.Internal.I_REBOOT, payload='')\n return None", - "docstring": "Process a set message." - }, - { - "code": "def _get_vm_by_name(name, allDetails=False):\n vms = get_resources_vms(includeConfig=allDetails)\n if name in vms:\n return vms[name]\n log.info('VM with name \"%s\" could not be found.', name)\n return False", - "docstring": "Since Proxmox works based op id's rather than names as identifiers this\n requires some filtering to retrieve the required information." - }, - { - "code": "def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im():\n hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu()\n update_hparams_for_tpu(hparams)\n hparams.batch_size = 4\n hparams.optimizer = \"Adafactor\"\n hparams.learning_rate_schedule = \"rsqrt_decay\"\n hparams.learning_rate_warmup_steps = 6000\n hparams.layer_prepostprocess_dropout = 0.1\n return hparams", - "docstring": "TPU related imagenet model." - }, - { - "code": "def D_for_Ntubes_VDI(N, Ntp, Do, pitch, angle=30):\n r\n if Ntp == 1:\n f2 = 0.\n elif Ntp == 2:\n f2 = 22.\n elif Ntp == 4:\n f2 = 70.\n elif Ntp == 6:\n f2 = 90.\n elif Ntp == 8:\n f2 = 105.\n else:\n raise Exception('Only 1, 2, 4 and 8 passes are supported')\n if angle == 30 or angle == 60:\n f1 = 1.1\n elif angle == 45 or angle == 90:\n f1 = 1.3\n else:\n raise Exception('Only 30, 60, 45 and 90 degree layouts are supported')\n Do, pitch = Do*1000, pitch*1000\n Dshell = (f1*N*pitch**2 + f2*N**0.5*pitch +Do)**0.5\n return Dshell/1000.", - "docstring": "r'''A rough equation presented in the VDI Heat Atlas for estimating\n the size of a tube bundle from a given number of tubes, number of tube\n passes, outer tube diameter, pitch, and arrangement.\n No accuracy estimation given.\n\n .. math::\n OTL = \\sqrt{f_1 z t^2 + f_2 t \\sqrt{z} - d_o}\n\n Parameters\n ----------\n N : float\n Number of tubes, [-]\n Ntp : float\n Number of tube passes, [-]\n Do : float\n Tube outer diameter, [m]\n pitch : float\n Pitch; distance between two orthogonal tube centers, [m]\n angle : float\n The angle the tubes are positioned; 30, 45, 60 or 90\n\n Returns\n -------\n DBundle : float\n Outer diameter of tube bundle, [m]\n\n Notes\n -----\n f1 = 1.1 for triangular, 1.3 for square patterns\n f2 is as follows: 1 pass, 0; 2 passes, 22; 4 passes, 70; 8 passes, 105.\n 6 tube passes is not officially supported, only 1, 2, 4 and 8.\n However, an estimated constant has been added to support it.\n f2 = 90.\n\n Examples\n --------\n >>> D_for_Ntubes_VDI(N=970, Ntp=2., Do=0.00735, pitch=0.015, angle=30.)\n 0.5003600119829544\n\n References\n ----------\n .. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.\n Berlin; New York:: Springer, 2010." - }, - { - "code": "def _compute_full_ts(self, data):\n full_ts, dt = self._compute(data)\n vert_types = ('vert_int', 'vert_av')\n if self.dtype_out_vert in vert_types and self.var.def_vert:\n dp = self._get_input_data(_DP_VARS[self.dtype_in_vert],\n self.start_date, self.end_date)\n full_ts = utils.vertcoord.int_dp_g(full_ts, dp)\n if self.dtype_out_vert == 'vert_av':\n ps = self._get_input_data(utils.vertcoord.ps,\n self.start_date, self.end_date)\n full_ts *= (GRAV_EARTH / ps)\n return full_ts, dt", - "docstring": "Perform calculation and create yearly timeseries at each point." - }, - { - "code": "def pack(self):\n payload = io.BytesIO()\n payload.seek(self.header_size, io.SEEK_CUR)\n self.build_payload(payload)\n packet_length = len(payload.getvalue()) - self.header_size\n self.header = MessageHeader(self.session_id, self.packet_count, packet_length, constants.MAX_SEGMENT_SIZE,\n num_segments=len(self.segments), packet_options=0)\n packed_header = self.header_struct.pack(*self.header)\n payload.seek(0)\n payload.write(packed_header)\n payload.seek(0, io.SEEK_END)\n trace(self)\n return payload", - "docstring": "Pack message to binary stream." - }, - { - "code": "def index_bams(job, config):\n job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid)\n disk = '1G' if config.ci_test else '20G'\n config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv()\n config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv()\n job.addFollowOnJobFn(preprocessing_declaration, config)", - "docstring": "Convenience job for handling bam indexing to make the workflow declaration cleaner\n\n :param JobFunctionWrappingJob job: passed automatically by Toil\n :param Namespace config: Argparse Namespace object containing argument inputs" - }, - { - "code": "def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):\n q =\n log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))\n with self.get_cursor() as dbc:\n dbc.execute(q.format(int(keep_expired_for)))", - "docstring": "Remove long expired full_hash entries." - }, - { - "code": "def save_cert(name, master):\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': ''}\n cert = \"{0}trusted-master.crt\".format(get_certs_path())\n if os.path.isfile(cert):\n ret['comment'] = 'No execution needed. Cert: {0} already saved.'.format(cert)\n return ret\n if __opts__['test']:\n ret['result'] = None\n ret['comment'] = 'Certificate save for icinga2 master would be executed'\n return ret\n cert_save = __salt__['icinga2.save_cert'](name, master)\n if not cert_save['retcode']:\n ret['comment'] = \"Certificate for icinga2 master saved\"\n ret['changes']['cert'] = \"Executed. Certificate saved: {0}\".format(cert)\n return ret", - "docstring": "Save the certificate on master icinga2 node.\n\n name\n The domain name for which this certificate will be saved\n\n master\n Icinga2 master node for which this certificate will be saved" - }, - { - "code": "def get_assessment_part_form_for_update(self, assessment_part_id):\n collection = JSONClientValidated('assessment_authoring',\n collection='AssessmentPart',\n runtime=self._runtime)\n if not isinstance(assessment_part_id, ABCId):\n raise errors.InvalidArgument('the argument is not a valid OSID Id')\n if (assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart' or\n assessment_part_id.get_authority() != self._authority):\n raise errors.InvalidArgument()\n result = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())})\n mdata = {}\n if not result['assessmentPartId']:\n pass\n else:\n parent_part_id = Id(result['assessmentPartId'])\n mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True)\n lookup_session = mgr.get_assessment_part_lookup_session_for_bank(self._catalog_id, proxy=self._proxy)\n if lookup_session.get_assessment_parts_for_assessment_part(parent_part_id).available() > 1:\n mdata['sequestered']['is_read_only'] = True\n mdata['sequestered']['is_required'] = True\n obj_form = objects.AssessmentPartForm(osid_object_map=result,\n runtime=self._runtime,\n proxy=self._proxy,\n mdata=mdata)\n self._forms[obj_form.get_id().get_identifier()] = not UPDATED\n return obj_form", - "docstring": "Gets the assessment part form for updating an existing assessment part.\n\n A new assessment part form should be requested for each update\n transaction.\n\n arg: assessment_part_id (osid.id.Id): the ``Id`` of the\n ``AssessmentPart``\n return: (osid.assessment.authoring.AssessmentPartForm) - the\n assessment part form\n raise: NotFound - ``assessment_part_id`` is not found\n raise: NullArgument - ``assessment_part_id`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure occurred\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):\n for entry in top_level:\n datetime_value = entry.get('date', None)\n package_identifiers = entry.get('packageIdentifiers', [])\n if not datetime_value or not package_identifiers:\n continue\n display_name = entry.get('displayName', '')\n display_version = entry.get('displayVersion', '')\n process_name = entry.get('processName', '')\n package_identifiers = ', '.join(package_identifiers)\n event_data = plist_event.PlistTimeEventData()\n event_data.desc = (\n 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: '\n '{3:s}.').format(\n display_name, display_version, process_name, package_identifiers)\n event_data.key = ''\n event_data.root = '/item'\n event = time_events.PythonDatetimeEvent(\n datetime_value, definitions.TIME_DESCRIPTION_WRITTEN)\n parser_mediator.ProduceEventWithEventData(event, event_data)", - "docstring": "Extracts relevant install history entries.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n top_level (dict[str, object]): plist top-level key." - }, - { - "code": "def MakeBuildDirectory(self):\n self.build_dir = config.CONFIG.Get(\n \"PyInstaller.build_dir\", context=self.context)\n self.work_path = config.CONFIG.Get(\n \"PyInstaller.workpath_dir\", context=self.context)\n self.CleanDirectory(self.build_dir)\n self.CleanDirectory(self.work_path)", - "docstring": "Prepares the build directory." - }, - { - "code": "def guess_chunks(shape, typesize):\n ndims = len(shape)\n chunks = np.maximum(np.array(shape, dtype='=f8'), 1)\n dset_size = np.product(chunks)*typesize\n target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024)))\n if target_size > CHUNK_MAX:\n target_size = CHUNK_MAX\n elif target_size < CHUNK_MIN:\n target_size = CHUNK_MIN\n idx = 0\n while True:\n chunk_bytes = np.product(chunks)*typesize\n if (chunk_bytes < target_size or\n abs(chunk_bytes-target_size)/target_size < 0.5) and \\\n chunk_bytes < CHUNK_MAX:\n break\n if np.product(chunks) == 1:\n break\n chunks[idx % ndims] = np.ceil(chunks[idx % ndims] / 2.0)\n idx += 1\n return tuple(int(x) for x in chunks)", - "docstring": "Guess an appropriate chunk layout for a dataset, given its shape and\n the size of each element in bytes. Will allocate chunks only as large\n as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of\n each axis, slightly favoring bigger values for the last index.\n Undocumented and subject to change without warning." - }, - { - "code": "def delete_peer(self, peerAddr):\n if _debug: BTR._debug(\"delete_peer %r\", peerAddr)\n del self.peers[peerAddr]", - "docstring": "Delete a peer." - }, - { - "code": "def solve(self, value, filter_):\n try:\n return value[filter_.slice or filter_.index]\n except IndexError:\n return None", - "docstring": "Get slice or entry defined by an index from the given value.\n\n Arguments\n ---------\n value : ?\n A value to solve in combination with the given filter.\n filter_ : dataql.resource.SliceFilter\n An instance of ``SliceFilter``to solve with the given value.\n\n Example\n -------\n\n >>> from dataql.solvers.registry import Registry\n >>> registry = Registry()\n >>> solver = SliceSolver(registry)\n >>> solver.solve([1, 2, 3], SliceFilter(1))\n 2\n >>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None)))\n [2, 3]\n >>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2)))\n [1]\n >>> solver.solve([1, 2, 3], SliceFilter(4))" - }, - { - "code": "def clone_subgraphs(self, g):\n if not isinstance(g, CGRContainer):\n raise InvalidData('only CGRContainer acceptable')\n r_group = []\n x_group = {}\n r_group_clones = []\n newcomponents = []\n components, lost_bonds, term_atoms = self.__split_graph(g)\n lost_map = {x: y for x, y in lost_bonds}\n x_terminals = set(lost_map.values())\n r_terminals = set(lost_map)\n for i in components:\n x_terminal_atom = x_terminals.intersection(i)\n if x_terminal_atom:\n x_group[x_terminal_atom.pop()] = i\n continue\n r_terminal_atom = r_terminals.intersection(i)\n if r_terminal_atom:\n r_group.append([r_terminal_atom, i])\n continue\n newcomponents.append(i)\n tmp = g\n for i in newcomponents:\n for k, j in r_group:\n gm = GraphMatcher(j, i, node_match=self.__node_match_products,\n edge_match=self.__edge_match_products)\n mapping = next((x for x in gm.subgraph_isomorphisms_iter() if k.issubset(x) and\n all(x[y] in term_atoms for y in k)), None)\n if mapping:\n r_group_clones.append([k, mapping])\n tmp = compose(tmp, self.__remap_group(j, tmp, mapping)[0])\n break\n for i, j in r_group_clones:\n for k in i:\n remappedgroup, mapping = self.__remap_group(x_group[lost_map[k]], tmp, {})\n tmp = CGRcore.union(tmp, remappedgroup)\n tmp.add_edge(j[k], mapping[lost_map[k]], s_bond=1, sp_bond=(1, None))\n if r_group_clones:\n tmp.meta.update(g.meta)\n return tmp\n return tmp.copy()", - "docstring": "search bond breaks and creations" - }, - { - "code": "def _get_for_address(address, key):\n address = netaddr.IPAddress(address)\n for iface in netifaces.interfaces():\n addresses = netifaces.ifaddresses(iface)\n if address.version == 4 and netifaces.AF_INET in addresses:\n addr = addresses[netifaces.AF_INET][0]['addr']\n netmask = addresses[netifaces.AF_INET][0]['netmask']\n network = netaddr.IPNetwork(\"%s/%s\" % (addr, netmask))\n cidr = network.cidr\n if address in cidr:\n if key == 'iface':\n return iface\n else:\n return addresses[netifaces.AF_INET][0][key]\n if address.version == 6 and netifaces.AF_INET6 in addresses:\n for addr in addresses[netifaces.AF_INET6]:\n network = _get_ipv6_network_from_address(addr)\n if not network:\n continue\n cidr = network.cidr\n if address in cidr:\n if key == 'iface':\n return iface\n elif key == 'netmask' and cidr:\n return str(cidr).split('/')[1]\n else:\n return addr[key]\n return None", - "docstring": "Retrieve an attribute of or the physical interface that\n the IP address provided could be bound to.\n\n :param address (str): An individual IPv4 or IPv6 address without a net\n mask or subnet prefix. For example, '192.168.1.1'.\n :param key: 'iface' for the physical interface name or an attribute\n of the configured interface, for example 'netmask'.\n :returns str: Requested attribute or None if address is not bindable." - }, - { - "code": "def get_resource_by_agent(self, agent_id):\n collection = JSONClientValidated('resource',\n collection='Resource',\n runtime=self._runtime)\n result = collection.find_one(\n dict({'agentIds': {'$in': [str(agent_id)]}},\n **self._view_filter()))\n return objects.Resource(\n osid_object_map=result,\n runtime=self._runtime,\n proxy=self._proxy)", - "docstring": "Gets the ``Resource`` associated with the given agent.\n\n arg: agent_id (osid.id.Id): ``Id`` of the ``Agent``\n return: (osid.resource.Resource) - associated resource\n raise: NotFound - ``agent_id`` is not found\n raise: NullArgument - ``agent_id`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def load(fp, cls=BinaryQuadraticModel, vartype=None):\n pattern = re.compile(_LINE_REGEX)\n vartype_pattern = re.compile(_VARTYPE_HEADER_REGEX)\n triplets = []\n for line in fp:\n triplets.extend(pattern.findall(line))\n vt = vartype_pattern.findall(line)\n if vt:\n if vartype is None:\n vartype = vt[0]\n else:\n if isinstance(vartype, str):\n vartype = Vartype[vartype]\n else:\n vartype = Vartype(vartype)\n if Vartype[vt[0]] != vartype:\n raise ValueError(\"vartypes from headers and/or inputs do not match\")\n if vartype is None:\n raise ValueError(\"vartype must be provided either as a header or as an argument\")\n bqm = cls.empty(vartype)\n for u, v, bias in triplets:\n if u == v:\n bqm.add_variable(int(u), float(bias))\n else:\n bqm.add_interaction(int(u), int(v), float(bias))\n return bqm", - "docstring": "Load a COOrdinate formatted binary quadratic model from a file." - }, - { - "code": "def collapse_if_tuple(abi):\n typ = abi[\"type\"]\n if not typ.startswith(\"tuple\"):\n return typ\n delimited = \",\".join(collapse_if_tuple(c) for c in abi[\"components\"])\n array_dim = typ[5:]\n collapsed = \"({}){}\".format(delimited, array_dim)\n return collapsed", - "docstring": "Converts a tuple from a dict to a parenthesized list of its types.\n\n >>> from eth_utils.abi import collapse_if_tuple\n >>> collapse_if_tuple(\n ... {\n ... 'components': [\n ... {'name': 'anAddress', 'type': 'address'},\n ... {'name': 'anInt', 'type': 'uint256'},\n ... {'name': 'someBytes', 'type': 'bytes'},\n ... ],\n ... 'type': 'tuple',\n ... }\n ... )\n '(address,uint256,bytes)'" - }, - { - "code": "def __pad(strdata):\r\n if request.args.get('callback'):\r\n return \"%s(%s);\" % (request.args.get('callback'), strdata)\r\n else:\r\n return strdata", - "docstring": "Pads `strdata` with a Request's callback argument, if specified, or does\r\n nothing." - }, - { - "code": "def evaluate_unicode(self, value):\n if value.startswith('u8'):\n length = 1\n value = value[3:-1]\n encoding = 'utf-8'\n elif value.startswith('u'):\n length = 2\n value = value[2:-1]\n encoding = 'utf-16'\n else:\n length = 4\n value = value[2:-1]\n encoding = 'utf-32'\n def replace_unicode(m):\n groups = m.groupdict()\n esc = m.group(0)\n value = esc\n if groups.get('special'):\n value = BACK_SLASH_TRANSLATION[esc]\n elif groups.get('char') or groups.get('oct'):\n integer = int(esc[2:], 16) if groups.get('char') else int(esc[1:], 8)\n if (\n (length < 2 and integer <= 0xFF) or\n (length < 4 and integer <= 0xFFFF) or\n (length >= 4 and integer <= 0x10FFFF)\n ):\n try:\n value = chr(integer)\n except Exception:\n value = ' '\n return value\n return self.norm_nl(RE_UESC.sub(replace_unicode, value).replace('\\x00', '\\n')), encoding", - "docstring": "Evaluate Unicode." - }, - { - "code": "def rescan(self):\n if not self.attached:\n return\n dprint(5, \"starting rescan\")\n newstuff = {};\n self.last_scan_timestamp = time.time()\n for path, watcher in list(self.watchers.items()):\n newfiles = watcher.newFiles()\n if newfiles is None:\n if watcher.survive_deletion:\n dprintf(5, \"access error on %s, but will still be watched\\n\", watcher.path)\n else:\n dprintf(2, \"access error on %s, will no longer be watched\\n\", watcher.path)\n del self.watchers[path]\n if not watcher.disappeared:\n self.emit(SIGNAL(\"disappearedFile\"), path)\n watcher.disappeared = True\n continue\n dprintf(5, \"%s: %d new file(s)\\n\", watcher.path, len(newfiles))\n newfiles = [p for p in newfiles if p is path or p not in self.watchers]\n newfiles = [filename for filename in newfiles if\n self._watching_state.get(os.path.dirname(filename)) > Purr.UNWATCHED]\n for newfile in newfiles:\n if watcher.quiet or self._watching_state.get(os.path.dirname(newfile)) < Purr.POUNCE:\n quiet = True\n else:\n quiet = matches_patterns(os.path.basename(newfile), self._quiet_patterns)\n newstuff[newfile] = quiet and newstuff.get(newfile, True)\n dprintf(4, \"%s: new data product, quiet=%d (watcher quiet: %s)\\n\", newfile, quiet, watcher.quiet)\n self.temp_watchers[newfile] = Purrer.WatchedFile(newfile)\n for path, watcher in list(self.temp_watchers.items()):\n if watcher.newFiles() is None:\n dprintf(2, \"access error on %s, marking as disappeared\", watcher.path)\n del self.temp_watchers[path]\n self.emit(SIGNAL(\"disappearedFile\"), path)\n return self.makeDataProducts(iter(newstuff.items()))", - "docstring": "Checks files and directories on watchlist for updates, rescans them for new data products.\n If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED." - }, - { - "code": "def _minigui_report_search_status(self, leaves):\n root = self._player.get_root()\n msg = {\n \"id\": hex(id(root)),\n \"n\": int(root.N),\n \"q\": float(root.Q),\n }\n msg[\"childQ\"] = [int(round(q * 1000)) for q in root.child_Q]\n msg[\"childN\"] = [int(n) for n in root.child_N]\n ranked_children = root.rank_children()\n variations = {}\n for i in ranked_children[:15]:\n if root.child_N[i] == 0 or i not in root.children:\n break\n c = coords.to_gtp(coords.from_flat(i))\n child = root.children[i]\n nodes = child.most_visited_path_nodes()\n moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes]\n variations[c] = {\n \"n\": int(root.child_N[i]),\n \"q\": float(root.child_Q[i]),\n \"moves\": [c] + moves,\n }\n if leaves:\n path = []\n leaf = leaves[0]\n while leaf != root:\n path.append(leaf.fmove)\n leaf = leaf.parent\n if path:\n path.reverse()\n variations[\"live\"] = {\n \"n\": int(root.child_N[path[0]]),\n \"q\": float(root.child_Q[path[0]]),\n \"moves\": [coords.to_gtp(coords.from_flat(m)) for m in path]\n }\n if variations:\n msg[\"variations\"] = variations\n dbg(\"mg-update:%s\" % json.dumps(msg, sort_keys=True))", - "docstring": "Prints the current MCTS search status to stderr.\n\n Reports the current search path, root node's child_Q, root node's\n child_N, the most visited path in a format that can be parsed by\n one of the STDERR_HANDLERS in minigui.ts.\n\n Args:\n leaves: list of leaf MCTSNodes returned by tree_search()." - }, - { - "code": "def acknowledge_message(self, delivery_tag):\n logger.debug('Acknowledging message %s', delivery_tag)\n self._channel.basic_ack(delivery_tag)", - "docstring": "Acknowledge the message delivery from RabbitMQ.\n\n :param int delivery_tag: The delivery tag from the Basic.Deliver frame" - }, - { - "code": "def mk_set_headers(self, data, columns):\n columns = tuple(columns)\n lens = []\n for key in columns:\n value_len = max(len(str(each.get(key, ''))) for each in data)\n lens.append(max(value_len, len(self._get_name(key))))\n fmt = self.mk_fmt(*lens)\n return fmt", - "docstring": "figure out sizes and create header fmt" - }, - { - "code": "def get_conn_info(self):\n return session.ConnectionInfo(self.request.remote_ip,\n self.request.cookies,\n self.request.arguments,\n self.request.headers,\n self.request.path)", - "docstring": "Return `ConnectionInfo` object from current transport" - }, - { - "code": "def checkStock(self):\n if not self.preferences:\n logger.debug(\"no preferences\")\n return None\n soup = BeautifulSoup(\n self.xpath(path['stock-table'])[0].html, \"html.parser\")\n count = 0\n for product in soup.select(\"div.tradebox\"):\n prod_name = product.select(\"span.instrument-name\")[0].text\n stk_name = [x for x in self.preferences\n if x.lower() in prod_name.lower()]\n if not stk_name:\n continue\n name = prod_name\n if not [x for x in self.stocks if x.product == name]:\n self.stocks.append(Stock(name))\n stock = [x for x in self.stocks if x.product == name][0]\n if 'tradebox-market-closed' in product['class']:\n stock.market = False\n if not stock.market:\n logger.debug(\"market closed for %s\" % stock.product)\n continue\n sell_price = product.select(\"div.tradebox-price-sell\")[0].text\n buy_price = product.select(\"div.tradebox-price-buy\")[0].text\n sent = int(product.select(path['sent'])[0].text.strip('%')) / 100\n stock.new_rec([sell_price, buy_price, sent])\n count += 1\n logger.debug(f\"added %d stocks\" % count)\n return self.stocks", - "docstring": "check stocks in preference" - }, - { - "code": "def _remove_persistent_module(mod, comment):\n conf = _get_modules_conf()\n mod_name = _strip_module_name(mod)\n if not mod_name or mod_name not in mod_list(True):\n return set()\n escape_mod = re.escape(mod)\n if comment:\n __salt__['file.comment'](conf, '^[\\t ]*{0}[\\t ]?'.format(escape_mod))\n else:\n __salt__['file.sed'](conf, '^[\\t ]*{0}[\\t ]?'.format(escape_mod), '')\n return set([mod_name])", - "docstring": "Remove module from configuration file. If comment is true only comment line\n where module is." - }, - { - "code": "def extract_from_files(\n files: List[Path],\n languages: Dict[str, List[str]]) -> DataSet:\n enumerator = enumerate(sorted(languages.items()))\n rank_map = {ext: rank for rank, (_, exts) in enumerator for ext in exts}\n with multiprocessing.Pool(initializer=_process_init) as pool:\n file_iterator = ((path, rank_map) for path in files)\n arrays = _to_arrays(pool.starmap(_extract_features, file_iterator))\n LOGGER.debug(\"Extracted arrays count: %d\", len(arrays[0]))\n return arrays", - "docstring": "Extract arrays of features from the given files.\n\n :param files: list of paths\n :param languages: language name =>\n associated file extension list\n :return: features" - }, - { - "code": "def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,\n do_download=True):\n ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)\n fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)\n if base_dir is None:\n base_dir = os.getcwd()\n orig_dir = os.getcwd()\n os.chdir(base_dir)\n input_dir = os.path.join(base_dir, \"inputs\")\n if isinstance(xs, dict):\n out = {}\n for k, v in xs.items():\n if k not in ignore_keys and v and isinstance(v, six.string_types):\n if v.lower() == \"none\":\n out[k] = None\n else:\n out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)\n elif isinstance(v, (list, tuple)):\n out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)\n for x in v]\n else:\n out[k] = v\n elif isinstance(xs, six.string_types):\n if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):\n dl = objectstore.download(xs, input_dir)\n if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):\n out = os.path.normpath(os.path.join(base_dir, dl))\n else:\n out = xs\n else:\n out = xs\n else:\n out = xs\n os.chdir(orig_dir)\n return out", - "docstring": "Normalize any file paths found in a subdirectory of configuration input.\n\n base_dir -- directory to normalize relative paths to\n ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)\n fileonly_keys -- algorithm key names to only expand files (not directories)\n cur_key -- current key when calling recursively" - }, - { - "code": "def current_custom_claims():\n jwt_data = get_jwt_data_from_app_context()\n return {k: v for (k, v) in jwt_data.items() if k not in RESERVED_CLAIMS}", - "docstring": "This method returns any custom claims in the current jwt" - }, - { - "code": "def _realGetAllThemes(self):\n l = []\n for offering in getOfferings():\n l.extend(offering.themes)\n l.sort(key=lambda o: o.priority)\n l.reverse()\n return l", - "docstring": "Collect themes from all available offerings." - }, - { - "code": "def get_instance(self, payload):\n return RecordInstance(self._version, payload, account_sid=self._solution['account_sid'], )", - "docstring": "Build an instance of RecordInstance\n\n :param dict payload: Payload response from the API\n\n :returns: twilio.rest.api.v2010.account.usage.record.RecordInstance\n :rtype: twilio.rest.api.v2010.account.usage.record.RecordInstance" - }, - { - "code": "def unload(filename):\n if isinstance(filename, list):\n for f in filename:\n libspice.unload_c(stypes.stringToCharP(f))\n return\n filename = stypes.stringToCharP(filename)\n libspice.unload_c(filename)", - "docstring": "Unload a SPICE kernel.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/unload_c.html\n\n :param filename: The name of a kernel to unload.\n :type filename: str" - }, - { - "code": "def register_tile(self, hw_type, api_major, api_minor, name, fw_major, fw_minor, fw_patch, exec_major, exec_minor, exec_patch, slot, unique_id):\n api_info = (api_major, api_minor)\n fw_info = (fw_major, fw_minor, fw_patch)\n exec_info = (exec_major, exec_minor, exec_patch)\n address = 10 + slot\n info = TileInfo(hw_type, name, api_info, fw_info, exec_info, slot, unique_id, state=TileState.JUST_REGISTERED, address=address)\n self.tile_manager.insert_tile(info)\n debug = int(self.tile_manager.debug_mode)\n if self.tile_manager.safe_mode:\n run_level = RunLevel.SAFE_MODE\n info.state = TileState.SAFE_MODE\n config_rpcs = []\n else:\n run_level = RunLevel.START_ON_COMMAND\n info.state = TileState.BEING_CONFIGURED\n config_rpcs = self.config_database.stream_matching(address, name)\n self.tile_manager.queue.put_nowait((info, config_rpcs))\n return [address, run_level, debug]", - "docstring": "Register a tile with this controller.\n\n This function adds the tile immediately to its internal cache of registered tiles\n and queues RPCs to send all config variables and start tile rpcs back to the tile." - }, - { - "code": "def RepairNodeStorageUnits(r, node, storage_type, name):\n query = {\n \"storage_type\": storage_type,\n \"name\": name,\n }\n return r.request(\"put\", \"/2/nodes/%s/storage/repair\" % node, query=query)", - "docstring": "Repairs a storage unit on the node.\n\n @type node: str\n @param node: node whose storage units to repair\n @type storage_type: str\n @param storage_type: storage type to repair\n @type name: str\n @param name: name of the storage unit to repair\n\n @rtype: int\n @return: job id" - }, - { - "code": "def regroup(self, group_by=None):\n if not group_by:\n group_by = self.group_by\n groups = self.groups\n self.groups = {}\n for g in groups:\n for item in groups[g]:\n self.add(item, group_by)", - "docstring": "Regroup items." - }, - { - "code": "def override_temp(replacement):\n pkg_resources.py31compat.makedirs(replacement, exist_ok=True)\n saved = tempfile.tempdir\n tempfile.tempdir = replacement\n try:\n yield\n finally:\n tempfile.tempdir = saved", - "docstring": "Monkey-patch tempfile.tempdir with replacement, ensuring it exists" - }, - { - "code": "def default_working_dir():\n import nameset.virtualchain_hooks as virtualchain_hooks\n return os.path.expanduser('~/.{}'.format(virtualchain_hooks.get_virtual_chain_name()))", - "docstring": "Get the default configuration directory for blockstackd" - }, - { - "code": "def event(self, button):\n if self.timer:\n self.timer.cancel()\n if self.last_button != button:\n if self.last_button:\n self.trigger()\n self.clicks = 1\n else:\n self.clicks += 1\n button_index = min(button, len(self.callbacks)) - 1\n if not self.callbacks[button_index][1]:\n self.clicks = 1\n self.last_button = button\n self.trigger()\n else:\n self.last_button = button\n self.timer = Timer(self.click_time, self.trigger)\n self.timer.start()", - "docstring": "button has been clicked" - }, - { - "code": "def run(self):\n document = self.state.document\n if not document.settings.file_insertion_enabled:\n return [document.reporter.warning('File insertion disabled',\n line=self.lineno)]\n try:\n location = self.state_machine.get_source_and_line(self.lineno)\n url = self.arguments[0]\n reader = RemoteCodeBlockReader(url, self.options, self.config)\n text, lines = reader.read(location=location)\n retnode = nodes.literal_block(text, text)\n set_source_info(self, retnode)\n if self.options.get('diff'):\n retnode['language'] = 'udiff'\n elif 'language' in self.options:\n retnode['language'] = self.options['language']\n retnode['linenos'] = ('linenos' in self.options or\n 'lineno-start' in self.options or\n 'lineno-match' in self.options)\n retnode['classes'] += self.options.get('class', [])\n extra_args = retnode['highlight_args'] = {}\n if 'emphasize-lines' in self.options:\n hl_lines = parselinenos(self.options['emphasize-lines'], lines)\n if any(i >= lines for i in hl_lines):\n logger.warning(\n 'line number spec is out of range(1-%d): %r' %\n (lines, self.options['emphasize-lines']),\n location=location)\n extra_args['hl_lines'] = [x + 1 for x in hl_lines if x < lines]\n extra_args['linenostart'] = reader.lineno_start\n if 'caption' in self.options:\n caption = self.options['caption'] or self.arguments[0]\n retnode = container_wrapper(self, retnode, caption)\n self.add_name(retnode)\n return [retnode]\n except Exception as exc:\n return [document.reporter.warning(str(exc), line=self.lineno)]", - "docstring": "Run the ``remote-code-block`` directive." - }, - { - "code": "def get_intro_prompt(self):\n server_msg = self.web_services.get_system_status()\n return server_msg + colorize('psiTurk version ' + version_number +\n '\\nType \"help\" for more information.',\n 'green', False)", - "docstring": "Overloads intro prompt with network-aware version if you can reach\n psiTurk.org, request system status message" - }, - { - "code": "def get_broker_id(data_path):\n META_FILE_PATH = \"{data_path}/meta.properties\"\n if not data_path:\n raise ValueError(\"You need to specify the data_path if broker_id == -1\")\n meta_properties_path = META_FILE_PATH.format(data_path=data_path)\n return _read_generated_broker_id(meta_properties_path)", - "docstring": "This function will look into the data folder to get the automatically created\n broker_id.\n\n :param string data_path: the path to the kafka data folder\n :returns int: the real broker_id" - }, - { - "code": "def carefullyCollideContexts(numContexts, numCells, numMinicolumns):\n minicolumns = []\n for _ in xrange(numMinicolumns):\n contextsForCell = [set() for _ in xrange(numCells)]\n contexts = range(numContexts)\n random.shuffle(contexts)\n while len(contexts) > 0:\n eligibleCells = range(len(contextsForCell))\n while len(contexts) > 0 and len(eligibleCells) > 0:\n candidateAdditions = [(context, cell)\n for context in contexts\n for cell in eligibleCells]\n badness = [sum(sum(1 if (context in otherCellContexts and\n otherContext in otherCellContexts) else 0\n for minicolumn in minicolumns\n for otherCellContexts in minicolumn)\n for otherContext in contextsForCell[cell])\n for context, cell in candidateAdditions]\n selectedContext, selectedCell = candidateAdditions[\n badness.index(min(badness))]\n contextsForCell[selectedCell].add(selectedContext)\n eligibleCells.remove(selectedCell)\n contexts.remove(selectedContext)\n minicolumns.append(contextsForCell)\n return minicolumns", - "docstring": "Use a greedy algorithm to choose how each minicolumn should distribute\n contexts between its cells.\n\n @return (list of lists of lists of ints)\n iContext integers for each cell, grouped by minicolumn. For example,\n [[[1, 3], [2,4]],\n [[1, 2]]]\n would specify that cell 0 connects to location 1 and location 3, while cell\n 1 connects to locations 2 and 4, and cell 2 (in the second minicolumn)\n connects to locations 1 and 2." - }, - { - "code": "def query(self, properties=None, criteria=None, distinct_key=None,\n **kwargs):\n if properties is not None:\n props, prop_dict = self._parse_properties(properties)\n else:\n props, prop_dict = None, None\n crit = self._parse_criteria(criteria)\n if self.query_post:\n for func in self.query_post:\n func(crit, props)\n cur = self.collection.find(filter=crit, projection=props, **kwargs)\n if distinct_key is not None:\n cur = cur.distinct(distinct_key)\n return QueryListResults(prop_dict, cur, postprocess=self.result_post)\n else:\n return QueryResults(prop_dict, cur, postprocess=self.result_post)", - "docstring": "Convenience method for database access. All properties and criteria\n can be specified using simplified names defined in Aliases. You can\n use the supported_properties property to get the list of supported\n properties.\n\n Results are returned as an iterator of dicts to ensure memory and cpu\n efficiency.\n\n Note that the dict returned have keys also in the simplified names\n form, not in the mongo format. For example, if you query for\n \"analysis.e_above_hull\", the returned result must be accessed as\n r['analysis.e_above_hull'] instead of mongo's\n r['analysis']['e_above_hull']. This is a *feature* of the query engine\n to allow simple access to deeply nested docs without having to resort\n to some recursion to go deep into the result.\n\n However, if you query for 'analysis', the entire 'analysis' key is\n returned as r['analysis'] and then the subkeys can be accessed in the\n usual form, i.e., r['analysis']['e_above_hull']\n\n :param properties: Properties to query for. Defaults to None which means all supported properties.\n :param criteria: Criteria to query for as a dict.\n :param distinct_key: If not None, the key for which to get distinct results\n :param \\*\\*kwargs: Other kwargs supported by pymongo.collection.find.\n Useful examples are limit, skip, sort, etc.\n :return: A QueryResults Iterable, which is somewhat like pymongo's\n cursor except that it performs mapping. In general, the dev does\n not need to concern himself with the form. It is sufficient to know\n that the results are in the form of an iterable of dicts." - }, - { - "code": "async def async_set_switch_state(\n self, switch_number: SwitchNumber, state: bool) -> None:\n await self._protocol.async_execute(\n SetSwitchCommand(\n switch_number,\n SwitchState.On if state else SwitchState.Off))", - "docstring": "Turn a switch on or off.\n\n :param switch_number: the switch to be set.\n :param state: True to turn on, False to turn off." - }, - { - "code": "def parse(cls, spec, relative_to='', subproject_roots=None):\n spec_path, target_name = parse_spec(spec,\n relative_to=relative_to,\n subproject_roots=subproject_roots)\n return cls(spec_path, target_name)", - "docstring": "Parses an address from its serialized form.\n\n :param string spec: An address in string form :.\n :param string relative_to: For sibling specs, ie: ':another_in_same_build_family', interprets\n the missing spec_path part as `relative_to`.\n :param list subproject_roots: Paths that correspond with embedded build roots\n under the current build root.\n :returns: A new address.\n :rtype: :class:`pants.base.address.Address`" - }, - { - "code": "def _register_thrift(self, service_module, handler, **kwargs):\n import tchannel.thrift as thrift\n thrift.register(self._handler, service_module, handler, **kwargs)\n return handler", - "docstring": "Register a Thrift endpoint on this TChannel.\n\n :param service_module:\n Reference to the Thrift-generated module for the service being\n registered.\n :param handler:\n Handler for the endpoint\n :param method:\n Name of the Thrift method being registered. If omitted, ``f``'s\n name is assumed to be the method name.\n :param service:\n Name of the Thrift service. By default this is determined\n automatically from the module name." - }, - { - "code": "def validate_element(self, value):\n if not isinstance(value, self.type):\n if isinstance(value, six.integer_types) and self.type == float:\n return float(value)\n if value is None:\n if self.required:\n raise ValidationError('Required field is missing')\n else:\n try:\n name = self.name\n except AttributeError:\n raise ValidationError('Expected type %s for %s, '\n 'found %s (type %s)' %\n (self.type, self.__class__.__name__,\n value, type(value)))\n else:\n raise ValidationError(\n 'Expected type %s for field %s, found %s (type %s)' %\n (self.type, name, value, type(value)))\n return value", - "docstring": "Validate single element of field.\n\n This is different from validate in that it is used on individual\n values of repeated fields.\n\n Args:\n value: Value to validate.\n\n Returns:\n The value casted in the expected type.\n\n Raises:\n ValidationError if value is not expected type." - }, - { - "code": "def submit_the_only_form(self):\n form = ElementSelector(world.browser, str('//form'))\n assert form, \"Cannot find a form on the page.\"\n form.submit()", - "docstring": "Look for a form on the page and submit it.\n\n Asserts if more than one form exists." - }, - { - "code": "def load_config(self, config):\n if config is not None and config.has_section('outputs'):\n logger.debug('Read number of processes to display in the WebUI')\n n = config.get_value('outputs', 'max_processes_display', default=None)\n logger.debug('Number of processes to display in the WebUI: {}'.format(n))", - "docstring": "Load the outputs section of the configuration file." - }, - { - "code": "def write(self):\n if self._text is not None:\n with self.lock:\n self.file.write(str(self._text).encode())\n self.file.flush()\n sleep(self.nice_delay)", - "docstring": "Write the current text to self.file, and flush it.\n This can be overridden to handle custom writes." - }, - { - "code": "def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):\n paired = vcfutils.get_paired_bams(align_bams, items)\n assert paired and not paired.normal_bam, (\"Pisces supports tumor-only variant calling: %s\" %\n (\",\".join([dd.get_sample_name(d) for d in items])))\n vrs = bedutils.population_variant_regions(items)\n target = shared.subset_variant_regions(vrs, region,\n out_file, items=items, do_merge=True)\n min_af = float(dd.get_min_allele_fraction(paired.tumor_data)) / 100.0\n if not utils.file_exists(out_file):\n base_out_name = utils.splitext_plus(os.path.basename(paired.tumor_bam))[0]\n raw_file = \"%s.vcf\" % utils.splitext_plus(out_file)[0]\n with file_transaction(paired.tumor_data, raw_file) as tx_out_file:\n ref_dir = _prep_genome(os.path.dirname(tx_out_file), paired.tumor_data)\n out_dir = os.path.dirname(tx_out_file)\n cores = dd.get_num_cores(paired.tumor_data)\n emit_min_af = min_af / 10.0\n cmd = (\"pisces --bampaths {paired.tumor_bam} --genomepaths {ref_dir} --intervalpaths {target} \"\n \"--maxthreads {cores} --minvf {emit_min_af} --vffilter {min_af} \"\n \"--ploidy somatic --gvcf false -o {out_dir}\")\n cmd += \" -RMxNFilter 5,9,0.35\"\n if min_af < (1.0 / 100.0):\n cmd += \" --minbasecallquality 30\"\n do.run(cmd.format(**locals()), \"Pisces tumor-only somatic calling\")\n shutil.move(os.path.join(out_dir, \"%s.vcf\" % base_out_name),\n tx_out_file)\n vcfutils.bgzip_and_index(raw_file, paired.tumor_data[\"config\"],\n prep_cmd=\"sed 's\n (base_out_name, dd.get_sample_name(paired.tumor_data),\n vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file)))\n return vcfutils.bgzip_and_index(out_file, paired.tumor_data[\"config\"])", - "docstring": "Run tumor only pisces calling\n\n Handles bgzipping output file and fixing VCF sample naming to match BAM sample." - }, - { - "code": "def sorted_files_from_bucket(bucket, keys=None):\n keys = keys or []\n total = len(keys)\n sortby = dict(zip(keys, range(total)))\n values = ObjectVersion.get_by_bucket(bucket).all()\n return sorted(values, key=lambda x: sortby.get(x.key, total))", - "docstring": "Return files from bucket sorted by given keys.\n\n :param bucket: :class:`~invenio_files_rest.models.Bucket` containing the\n files.\n :param keys: Keys order to be used.\n :returns: Sorted list of bucket items." - }, - { - "code": "def _check_settings(self, app):\n if self.USER_ENABLE_INVITE_USER and not self.UserInvitationClass:\n raise ConfigError(\n 'UserInvitationClass is missing while USER_ENABLE_INVITE_USER is True.' \\\n ' Specify UserInvitationClass with UserManager(app, db, User, UserInvitationClass=...' \\\n ' or set USER_ENABLE_INVITE_USER=False.')\n setting = app.config.get('USER_ENABLE_LOGIN_WITHOUT_CONFIRM_EMAIL', None)\n if setting is not None:\n print(\n 'Deprecation warning: USER_ENABLE_LOGIN_WITHOUT_CONFIRM_EMAIL'\\\n ' will be deprecated.' \\\n ' It has been replaced by USER_ALLOW_LOGIN_WITHOUT_CONFIRMED_EMAIL.'\\\n ' Please change this as soon as possible.')\n self.USER_ALLOW_LOGIN_WITHOUT_CONFIRMED_EMAIL = setting\n setting = app.config.get('USER_ENABLE_RETYPE_PASSWORD', None)\n if setting is not None:\n print(\n 'Deprecation warning: USER_ENABLE_RETYPE_PASSWORD'\\\n ' will be deprecated.' \\\n ' It has been replaced with USER_REQUIRE_RETYPE_PASSWORD.'\\\n ' Please change this as soon as possible.')\n self.USER_REQUIRE_RETYPE_PASSWORD = setting\n setting = app.config.get('USER_SHOW_USERNAME_EMAIL_DOES_NOT_EXIST', None)\n if setting is not None:\n print(\n 'Deprecation warning: USER_SHOW_USERNAME_EMAIL_DOES_NOT_EXIST' \\\n ' will be deprecated.' \\\n ' It has been replaced with USER_SHOW_USERNAME_DOES_NOT_EXIST'\n ' and USER_SHOW_EMAIL_DOES_NOT_EXIST.'\n ' Please change this as soon as possible.')\n self.USER_SHOW_USERNAME_DOES_NOT_EXIST = setting\n self.USER_SHOW_EMAIL_DOES_NOT_EXIST = setting\n setting = app.config.get('USER_PASSWORD_HASH', None)\n if setting is not None:\n print(\n \"Deprecation warning: USER_PASSWORD_HASH=\"\\\n \" will be deprecated.\"\\\n \" It has been replaced with USER_PASSLIB_CRYPTCONTEXT_SCHEMES=.\"\n \" Please change USER_PASSWORD_HASH='something' to\"\\\n \" USER_PASSLIB_CRYPTCONTEXT_SCHEMES=['something'] as soon as possible.\")\n self.USER_PASSLIB_CRYPTCONTEXT_SCHEMES = [setting]\n if not self.USER_EMAIL_SENDER_EMAIL and self.USER_ENABLE_EMAIL:\n raise ConfigError(\n 'USER_EMAIL_SENDER_EMAIL is missing while USER_ENABLE_EMAIL is True.'\\\n ' specify USER_EMAIL_SENDER_EMAIL (and USER_EMAIL_SENDER_NAME) or set USER_ENABLE_EMAIL to False.')\n if not self.USER_ENABLE_USERNAME and not self.USER_ENABLE_EMAIL:\n self.USER_ENABLE_REGISTER = False\n if not self.USER_ENABLE_EMAIL:\n self.USER_ENABLE_CONFIRM_EMAIL = False\n self.USER_ENABLE_MULTIPLE_EMAILS = False\n self.USER_ENABLE_FORGOT_PASSWORD = False\n self.USER_SEND_PASSWORD_CHANGED_EMAIL = False\n self.USER_SEND_REGISTERED_EMAIL = False\n self.USER_SEND_USERNAME_CHANGED_EMAIL = False\n self.USER_REQUIRE_INVITATION = False\n if not self.USER_ENABLE_USERNAME:\n self.USER_ENABLE_CHANGE_USERNAME = False", - "docstring": "Verify required settings. Produce a helpful error messages for incorrect settings." - }, - { - "code": "def _get_vswitch_name(self, network_type, physical_network):\n if network_type != constants.TYPE_LOCAL:\n vswitch_name = self._get_vswitch_for_physical_network(\n physical_network)\n else:\n vswitch_name = self._local_network_vswitch\n if vswitch_name:\n return vswitch_name\n err_msg = _(\"No vSwitch configured for physical network \"\n \"'%(physical_network)s'. Neutron network type: \"\n \"'%(network_type)s'.\")\n raise exception.NetworkingHyperVException(\n err_msg % dict(physical_network=physical_network,\n network_type=network_type))", - "docstring": "Get the vswitch name for the received network information." - }, - { - "code": "def add_genesis_parser(subparsers, parent_parser):\n parser = subparsers.add_parser(\n 'genesis',\n help='Creates the genesis.batch file for initializing the validator',\n description='Generates the genesis.batch file for '\n 'initializing the validator.',\n epilog='This command generates a serialized GenesisData protobuf '\n 'message and stores it in the genesis.batch file. One or more input '\n 'files (optional) can contain serialized BatchList protobuf messages '\n 'to add to the GenesisData. The output shows the location of this '\n 'file. By default, the genesis.batch file is stored in '\n '/var/lib/sawtooth. If $SAWTOOTH_HOME is set, the location is '\n '$SAWTOOTH_HOME/data/genesis.batch. Use the --output option to change '\n 'the name of the file.',\n parents=[parent_parser])\n parser.add_argument(\n '-o', '--output',\n type=str,\n help='choose the output file for GenesisData')\n parser.add_argument(\n 'input_file',\n nargs='*',\n type=str,\n help='file or files containing batches to add to the resulting '\n 'GenesisData')", - "docstring": "Creates the arg parsers needed for the genesis command." - }, - { - "code": "def describe_table(self, tablename):\n try:\n response = self.call(\n 'describe_table', TableName=tablename)['Table']\n return Table.from_response(response)\n except DynamoDBError as e:\n if e.kwargs['Code'] == 'ResourceNotFoundException':\n return None\n else:\n raise", - "docstring": "Get the details about a table\n\n Parameters\n ----------\n tablename : str\n Name of the table\n\n Returns\n -------\n table : :class:`~dynamo3.fields.Table`" - }, - { - "code": "def as_python(self, name: str) -> str:\n if self._ruleTokens:\n pattern = \"jsg.JSGPattern(r'{}'.format({}))\".\\\n format(self._rulePattern, ', '.join(['{v}={v}.pattern'.format(v=v) for v in sorted(self._ruleTokens)]))\n else:\n pattern = \"jsg.JSGPattern(r'{}')\".format(self._rulePattern)\n base_type = self._jsontype.signature_type() if self._jsontype else \"jsg.JSGString\"\n return python_template.format(name=name, base_type=base_type, pattern=pattern)", - "docstring": "Return the python representation" - }, - { - "code": "def on_selection_changed(self, sel):\n m, self.editing_iter = sel.get_selected()\n if self.editing_iter:\n self.editing_model = m[self.editing_iter][0]\n self.show_curr_model_view(self.editing_model, False)\n else: self.view.remove_currency_view()\n return", - "docstring": "The user changed selection" - }, - { - "code": "def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http',\n sslflags=None):\n protocol = six.text_type(protocol).lower()\n name = _get_binding_info(hostheader, ipaddress, port)\n if protocol not in _VALID_PROTOCOLS:\n message = (\"Invalid protocol '{0}' specified. Valid formats:\"\n ' {1}').format(protocol, _VALID_PROTOCOLS)\n raise SaltInvocationError(message)\n if sslflags:\n sslflags = int(sslflags)\n if sslflags not in _VALID_SSL_FLAGS:\n message = (\"Invalid sslflags '{0}' specified. Valid sslflags range:\"\n ' {1}..{2}').format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[-1])\n raise SaltInvocationError(message)\n current_bindings = list_bindings(site)\n if name in current_bindings:\n log.debug('Binding already present: %s', name)\n return True\n if sslflags:\n ps_cmd = ['New-WebBinding',\n '-Name', \"'{0}'\".format(site),\n '-HostHeader', \"'{0}'\".format(hostheader),\n '-IpAddress', \"'{0}'\".format(ipaddress),\n '-Port', \"'{0}'\".format(port),\n '-Protocol', \"'{0}'\".format(protocol),\n '-SslFlags', '{0}'.format(sslflags)]\n else:\n ps_cmd = ['New-WebBinding',\n '-Name', \"'{0}'\".format(site),\n '-HostHeader', \"'{0}'\".format(hostheader),\n '-IpAddress', \"'{0}'\".format(ipaddress),\n '-Port', \"'{0}'\".format(port),\n '-Protocol', \"'{0}'\".format(protocol)]\n cmd_ret = _srvmgr(ps_cmd)\n if cmd_ret['retcode'] != 0:\n msg = 'Unable to create binding: {0}\\nError: {1}' \\\n ''.format(site, cmd_ret['stderr'])\n raise CommandExecutionError(msg)\n if name in list_bindings(site):\n log.debug('Binding created successfully: %s', site)\n return True\n log.error('Unable to create binding: %s', site)\n return False", - "docstring": "Create an IIS Web Binding.\n\n .. note::\n\n This function only validates against the binding\n ipaddress:port:hostheader combination, and will return True even if the\n binding already exists with a different configuration. It will not\n modify the configuration of an existing binding.\n\n Args:\n site (str): The IIS site name.\n hostheader (str): The host header of the binding. Usually a hostname.\n ipaddress (str): The IP address of the binding.\n port (int): The TCP port of the binding.\n protocol (str): The application protocol of the binding.\n sslflags (str): The flags representing certificate type and storage of\n the binding.\n\n Returns:\n bool: True if successful, otherwise False\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'" - }, - { - "code": "def stream(self, date_created_before=values.unset, date_created=values.unset,\n date_created_after=values.unset, date_updated_before=values.unset,\n date_updated=values.unset, date_updated_after=values.unset,\n friendly_name=values.unset, status=values.unset, limit=None,\n page_size=None):\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n date_created_before=date_created_before,\n date_created=date_created,\n date_created_after=date_created_after,\n date_updated_before=date_updated_before,\n date_updated=date_updated,\n date_updated_after=date_updated_after,\n friendly_name=friendly_name,\n status=status,\n page_size=limits['page_size'],\n )\n return self._version.stream(page, limits['limit'], limits['page_limit'])", - "docstring": "Streams ConferenceInstance records from the API as a generator stream.\n This operation lazily loads records as efficiently as possible until the limit\n is reached.\n The results are returned as a generator, so this operation is memory efficient.\n\n :param date date_created_before: The `YYYY-MM-DD` value of the resources to read\n :param date date_created: The `YYYY-MM-DD` value of the resources to read\n :param date date_created_after: The `YYYY-MM-DD` value of the resources to read\n :param date date_updated_before: The `YYYY-MM-DD` value of the resources to read\n :param date date_updated: The `YYYY-MM-DD` value of the resources to read\n :param date date_updated_after: The `YYYY-MM-DD` value of the resources to read\n :param unicode friendly_name: The string that identifies the Conference resources to read\n :param ConferenceInstance.Status status: The status of the resources to read\n :param int limit: Upper limit for the number of records to return. stream()\n guarantees to never return more than limit. Default is no limit\n :param int page_size: Number of records to fetch per request, when not set will use\n the default value of 50 records. If no page_size is defined\n but a limit is defined, stream() will attempt to read the\n limit with the most efficient page size, i.e. min(limit, 1000)\n\n :returns: Generator that will yield up to limit results\n :rtype: list[twilio.rest.api.v2010.account.conference.ConferenceInstance]" - }, - { - "code": "def check_payment(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id):\n assert state_op_type in ['NAME_REGISTRATION', 'NAME_RENEWAL'], 'Invalid op type {}'.format(state_op_type)\n assert name_fee is not None\n assert isinstance(name_fee, (int,long))\n name = nameop['name']\n namespace_id = get_namespace_from_name(name)\n namespace = state_engine.get_namespace( namespace_id )\n res = None\n log.debug('{} is a version-0x{} namespace'.format(namespace['namespace_id'], namespace['version']))\n if namespace['version'] == NAMESPACE_VERSION_PAY_TO_BURN:\n res = check_payment_v1(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)\n elif namespace['version'] == NAMESPACE_VERSION_PAY_TO_CREATOR:\n res = check_payment_v2(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)\n elif namespace['version'] == NAMESPACE_VERSION_PAY_WITH_STACKS:\n res = check_payment_v3(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id)\n else:\n log.warning(\"Namespace {} has version bits 0x{:x}, which has unknown registration rules\".format(namespace['namespace_id'], namespace['version']))\n return {'status': False}\n if not res['status']:\n return res\n tokens_paid = res['tokens_paid']\n token_units = res['token_units']\n return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units}", - "docstring": "Verify that the right payment was made, in the right cryptocurrency units.\n Does not check any accounts or modify the nameop in any way; it only checks that the name was paid for by the transaction.\n\n NOTE: if state_op_type is NAME_REGISTRATION, you will need to have called state_create_put_preorder() before calling this method!\n\n Returns {'status': True, 'tokens_paid': tokens_paid, 'token_units': ...} if the payment information is correct.\n Returns {'status': False} if not" - }, - { - "code": "def dataset_search(q=None, type=None, keyword=None,\n\towningOrg=None, publishingOrg=None, hostingOrg=None, decade=None,\n\tpublishingCountry = None, facet = None, facetMincount=None,\n\tfacetMultiselect = None, hl = False, limit = 100, offset = None,\n\t**kwargs):\n\turl = gbif_baseurl + 'dataset/search'\n\targs = {'q': q, 'type': type, 'keyword': keyword,\n\t\t\t\t'owningOrg': owningOrg, 'publishingOrg': publishingOrg,\n\t\t\t\t'hostingOrg': hostingOrg, 'decade': decade,\n\t\t\t\t'publishingCountry': publishingCountry, 'facet': facet,\n\t\t\t\t'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,\n\t\t\t\t'hl': hl, 'limit': limit, 'offset': offset}\n\tgbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}\n\tif gbif_kwargs is not None:\n\t\txx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))\n\t\targs.update(xx)\n\tkwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}\n\tout = gbif_GET(url, args, **kwargs)\n\treturn out", - "docstring": "Full text search across all datasets. Results are ordered by relevance.\n\n\t:param q: [str] Query term(s) for full text search. The value for this parameter\n\t\t can be a simple word or a phrase. Wildcards can be added to the simple word\n\t\t parameters only, e.g. ``q=*puma*``\n\t:param type: [str] Type of dataset, options include OCCURRENCE, etc.\n\t:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which\n\t\t you can search on. The search is done on the merged collection of tags, the\n\t\t dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING\n\t\t ANYMORE AS OF 2016-09-02.\n\t:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`\n\t:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`\n\t:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`\n\t:param publishingCountry: [str] Publishing country.\n\t:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage\n\t\t broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,\n\t\t etc, and will return datasets wholly contained in the decade as well as those\n\t\t that cover the entire decade or more. Facet by decade to get the break down,\n\t\t e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)\n\t:param facet: [str] A list of facet names used to retrieve the 100 most frequent values\n\t\t\tfor a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,\n\t\t\tand publishingCountry. Additionally subtype and country are legal values but not\n\t\t\tyet implemented, so data will not yet be returned for them.\n\t:param facetMincount: [str] Used in combination with the facet parameter. Set\n\t\t\tfacetMincount={#} to exclude facets with a count less than {#}, e.g.\n\t\t\thttp://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000\n\t\t\tonly shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have\n\t\t\tcounts less than 10000.\n\t:param facetMultiselect: [bool] Used in combination with the facet parameter. Set\n\t\t\tfacetMultiselect=True to still return counts for values that are not currently\n\t\t\tfiltered, e.g.\n\t\t\thttp://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true\n\t\t\tstill shows type values 'OCCURRENCE' and 'METADATA' even though type is being\n\t\t\tfiltered by type=CHECKLIST\n\t:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext\n\t\t\tsearch fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.\n\t\t\thttp://api.gbif.org/v1/dataset/search?q=plant&hl=true\n\t\t\tFulltext search fields include: title, keyword, country, publishing country,\n\t\t\tpublishing organization title, hosting organization title, and description. One\n\t\t\tadditional full text field is searched which includes information from metadata\n\t\t\tdocuments, but the text of this field is not returned in the response.\n\t:param limit: [int] Number of results to return. Default: ``300``\n\t:param offset: [int] Record to start at. Default: ``0``\n\n\t:note: Note that you can pass in additional faceting parameters on a per field basis.\n\t\t\tFor example, if you want to limit the numbef of facets returned from a field ``foo`` to\n\t\t\t3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,\n\t\t\tbut does allow some. See also examples.\n\n\t:return: A dictionary\n\n\tReferences: http://www.gbif.org/developer/registry#datasetSearch\n\n\tUsage::\n\n\t\t\tfrom pygbif import registry\n\t\t\t# Gets all datasets of type \"OCCURRENCE\".\n\t\t\tregistry.dataset_search(type=\"OCCURRENCE\", limit = 10)\n\n\t\t\t# Fulltext search for all datasets having the word \"amsterdam\" somewhere in\n\t\t\t# its metadata (title, description, etc).\n\t\t\tregistry.dataset_search(q=\"amsterdam\", limit = 10)\n\n\t\t\t# Limited search\n\t\t\tregistry.dataset_search(type=\"OCCURRENCE\", limit=2)\n\t\t\tregistry.dataset_search(type=\"OCCURRENCE\", limit=2, offset=10)\n\n\t\t\t# Search by decade\n\t\t\tregistry.dataset_search(decade=1980, limit = 10)\n\n\t\t\t# Faceting\n\t\t\t## just facets\n\t\t\tregistry.dataset_search(facet=\"decade\", facetMincount=10, limit=0)\n\n\t\t\t## data and facets\n\t\t\tregistry.dataset_search(facet=\"decade\", facetMincount=10, limit=2)\n\n\t\t\t## many facet variables\n\t\t\tregistry.dataset_search(facet=[\"decade\", \"type\"], facetMincount=10, limit=0)\n\n\t\t\t## facet vars\n\t\t\t### per variable paging\n\t\t\tx = registry.dataset_search(\n\t\t\t\tfacet = [\"decade\", \"type\"],\n\t\t\t\tdecade_facetLimit = 3,\n\t\t\t\ttype_facetLimit = 3,\n\t\t\t\tlimit = 0\n\t\t\t)\n\n\t\t\t## highlight\n\t\t\tx = registry.dataset_search(q=\"plant\", hl=True, limit = 10)\n\t\t\t[ z['description'] for z in x['results'] ]" - }, - { - "code": "def calc_constitutive_matrix(self):\n self.A_general = np.zeros([5,5], dtype=np.float64)\n self.B_general = np.zeros([5,5], dtype=np.float64)\n self.D_general = np.zeros([5,5], dtype=np.float64)\n lam_thick = sum([ply.h for ply in self.plies])\n self.h = lam_thick\n h0 = -lam_thick/2 + self.offset\n for ply in self.plies:\n hk_1 = h0\n h0 += ply.h\n hk = h0\n self.A_general += ply.QL*(hk - hk_1)\n self.B_general += 1/2.*ply.QL*(hk**2 - hk_1**2)\n self.D_general += 1/3.*ply.QL*(hk**3 - hk_1**3)\n self.A = self.A_general[0:3, 0:3]\n self.B = self.B_general[0:3, 0:3]\n self.D = self.D_general[0:3, 0:3]\n self.E = self.A_general[3:5, 3:5]\n conc1 = np.concatenate([self.A, self.B], axis=1)\n conc2 = np.concatenate([self.B, self.D], axis=1)\n self.ABD = np.concatenate([conc1, conc2], axis=0)\n self.ABDE = np.zeros((8, 8), dtype=np.float64)\n self.ABDE[0:6, 0:6] = self.ABD\n self.ABDE[6:8, 6:8] = self.E", - "docstring": "Calculates the laminate constitutive matrix\n\n This is the commonly called ``ABD`` matrix with ``shape=(6, 6)`` when\n the classical laminated plate theory is used, or the ``ABDE`` matrix\n when the first-order shear deformation theory is used, containing the\n transverse shear terms." - }, - { - "code": "def root_tokens(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(ROOT_TOKENS)", - "docstring": "Root tokens of word roots." - }, - { - "code": "def get_pk(obj):\n if inspect.isclass(obj):\n pk_list = sqlalchemy.inspect(obj).primary_key\n else:\n pk_list = obj.__mapper__.primary_key\n return pk_list", - "docstring": "Return primary key name by model class or instance.\n\n :Parameters:\n - `obj`: SQLAlchemy model instance or class.\n\n :Examples:\n\n >>> from sqlalchemy import Column, Integer\n >>> from sqlalchemy.ext.declarative import declarative_base\n >>> Base = declarative_base()\n >>> class User(Base):\n ... __tablename__ = 'users'\n ... id = Column(Integer, primary_key=True)\n >>> get_pk(User())\n (Column('id', Integer(), table=, primary_key=True, nullable=False),)\n >>> get_pk(User)\n (Column('id', Integer(), table=, primary_key=True, nullable=False),)" - }, - { - "code": "def load_labware(self, labware: Labware) -> Labware:\n if labware.magdeck_engage_height is None:\n MODULE_LOG.warning(\n \"This labware ({}) is not explicitly compatible with the\"\n \" Magnetic Module. You will have to specify a height when\"\n \" calling engage().\")\n return super().load_labware(labware)", - "docstring": "Load labware onto a Magnetic Module, checking if it is compatible" - }, - { - "code": "def validate(**kwargs):\n def decorator(func):\n _VALIDATORS[kwargs.pop('name', func.__name__)] = func\n return func\n return decorator", - "docstring": "Defines a decorator to register a validator with a name for look-up.\n\n If name is not provided we use function name as name of the validator." - }, - { - "code": "def __append_list(append_to, value):\n if value is not None:\n if isinstance(value, list):\n append_to.extend(value)\n else:\n append_to.append(value)", - "docstring": "Appends the value to the list." - }, - { - "code": "def body_lines(self):\n if not self.message.is_multipart():\n body = self.message.get_payload(None, decode=True)\n else:\n _, _, body = self.message.as_string().partition(\"\\n\\n\")\n if isinstance(body, bytes):\n for enc in ['ascii', 'utf-8']:\n try:\n body = body.decode(enc)\n break\n except UnicodeDecodeError:\n continue\n else:\n body = self.message.get_payload(None, decode=False)\n return body.splitlines(True)", - "docstring": "Return a normalized list of lines from message's body." - }, - { - "code": "def is_type(obj,\n type_,\n **kwargs):\n if not is_iterable(type_):\n type_ = [type_]\n return_value = False\n for check_for_type in type_:\n if isinstance(check_for_type, type):\n return_value = isinstance(obj, check_for_type)\n elif obj.__class__.__name__ == check_for_type:\n return_value = True\n else:\n return_value = _check_base_classes(obj.__class__.__bases__,\n check_for_type)\n if return_value is True:\n break\n return return_value", - "docstring": "Indicate if ``obj`` is a type in ``type_``.\n\n .. hint::\n\n This checker is particularly useful when you want to evaluate whether\n ``obj`` is of a particular type, but importing that type directly to use\n in :func:`isinstance() ` would cause a circular import\n error.\n\n To use this checker in that kind of situation, you can instead pass the\n *name* of the type you want to check as a string in ``type_``. The checker\n will evaluate it and see whether ``obj`` is of a type or inherits from a\n type whose name matches the string you passed.\n\n :param obj: The object whose type should be checked.\n :type obj: :class:`object `\n\n :param type_: The type(s) to check against.\n :type type_: :class:`type ` / iterable of :class:`type ` /\n :class:`str ` with type name / iterable of :class:`str `\n with type name\n\n :returns: ``True`` if ``obj`` is a type in ``type_``. Otherwise, ``False``.\n :rtype: :class:`bool `\n\n :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates\n keyword parameters passed to the underlying validator" - }, - { - "code": "def get_peer_resources(self, peer_jid):\n try:\n d = dict(self._presences[peer_jid])\n d.pop(None, None)\n return d\n except KeyError:\n return {}", - "docstring": "Return a dict mapping resources of the given bare `peer_jid` to the\n presence state last received for that resource.\n\n Unavailable presence states are not included. If the bare JID is in a\n error state (i.e. an error presence stanza has been received), the\n returned mapping is empty." - }, - { - "code": "def trace_api(self):\n if self._trace_api is None:\n self._trace_api = make_trace_api(self)\n return self._trace_api", - "docstring": "Helper for trace-related API calls.\n\n See\n https://cloud.google.com/trace/docs/reference/v2/rpc/google.devtools.\n cloudtrace.v2" - }, - { - "code": "def show(modifier):\n ret = {'changes': False}\n capital_modifiers = ['Tables']\n all_modifiers = ['rules', 'states', 'tables']\n all_modifiers += capital_modifiers\n if modifier.title() in capital_modifiers:\n modifier = modifier.title()\n if modifier not in all_modifiers:\n raise SaltInvocationError('Unknown modifier: {0}'.format(modifier))\n cmd = 'pfctl -s {0}'.format(modifier)\n result = __salt__['cmd.run_all'](cmd,\n output_loglevel='trace',\n python_shell=False)\n if result['retcode'] == 0:\n ret['comment'] = result['stdout'].split('\\n')\n else:\n raise CommandExecutionError(\n 'Could not show {0}'.format(modifier),\n info={'errors': [result['stderr']], 'changes': False}\n )\n return ret", - "docstring": "Show filter parameters.\n\n modifier:\n Modifier to apply for filtering. Only a useful subset of what pfctl supports\n can be used with Salt.\n\n - rules\n - states\n - tables\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' pf.show rules" - }, - { - "code": "def jamieson_pst(v, v0, c0, s, gamma0, q, theta0, n, z, mass, c_v,\n three_r=3. * constants.R, t_ref=300.):\n rho = mass / vol_uc2mol(v, z) * 1.e-6\n rho0 = mass / vol_uc2mol(v0, z) * 1.e-6\n p_h = hugoniot_p(rho, rho0, c0, s)\n p_th_h = jamieson_pth(v, v0, c0, s, gamma0, q, theta0, n, z, mass, c_v,\n three_r=three_r, t_ref=t_ref)\n p_st = p_h - p_th_h\n return p_st", - "docstring": "calculate static pressure at 300 K from Hugoniot data using the constq\n formulation\n\n :param v: unit-cell volume in A^3\n :param v0: unit-cell volume in A^3 at 1 bar\n :param c0: velocity at 1 bar in km/s\n :param s: slope of the velocity change\n :param gamma0: Gruneisen parameter at 1 bar\n :param q: logarithmic derivative of Gruneisen parameter\n :param theta0: Debye temperature in K\n :param n: number of elements in a chemical formula\n :param z: number of formula unit in a unit cell\n :param mass: molar mass in gram\n :param c_v: heat capacity\n :param three_r: 3 times gas constant.\n Jamieson modified this value to compensate for mismatches\n :param t_ref: reference temperature, 300 K\n :return: static pressure in GPa\n :note: 2017/05/18 I am unsure if this is actually being used in pytheos" - }, - { - "code": "def setupViewletByName(self, name):\n context = aq_inner(self.context)\n request = self.request\n reg = self.getViewletByName(name)\n if reg is None:\n return None\n factory = reg.factory\n try:\n viewlet = factory(context, request, self, None).__of__(context)\n except TypeError:\n raise RuntimeError(\n \"Unable to initialize viewlet {}. \"\n \"Factory method {} call failed.\"\n .format(name, str(factory)))\n return viewlet", - "docstring": "Constructs a viewlet instance by its name.\n\n Viewlet update() and render() method are not called.\n\n @return: Viewlet instance of None if viewlet with name does not exist" - }, - { - "code": "def pb2dict(obj):\n adict = {}\n if not obj.IsInitialized():\n return None\n for field in obj.DESCRIPTOR.fields:\n if not getattr(obj, field.name):\n continue\n if not field.label == FD.LABEL_REPEATED:\n if not field.type == FD.TYPE_MESSAGE:\n adict[field.name] = getattr(obj, field.name)\n else:\n value = pb2dict(getattr(obj, field.name))\n if value:\n adict[field.name] = value\n else:\n if field.type == FD.TYPE_MESSAGE:\n adict[field.name] = \\\n [pb2dict(v) for v in getattr(obj, field.name)]\n else:\n adict[field.name] = [v for v in getattr(obj, field.name)]\n return adict", - "docstring": "Takes a ProtoBuf Message obj and convertes it to a dict." - }, - { - "code": "def Parse(self, parser_mediator):\n file_entry = parser_mediator.GetFileEntry()\n if not file_entry:\n raise errors.UnableToParseFile('Invalid file entry')\n parser_mediator.AppendToParserChain(self)\n try:\n self.ParseFileEntry(parser_mediator, file_entry)\n finally:\n parser_mediator.PopFromParserChain()", - "docstring": "Parsers the file entry and extracts event objects.\n\n Args:\n parser_mediator (ParserMediator): a parser mediator.\n\n Raises:\n UnableToParseFile: when the file cannot be parsed." - }, - { - "code": "def hungarian(A, B):\n distances = cdist(A, B, 'euclidean')\n indices_a, indices_b = linear_sum_assignment(distances)\n return indices_b", - "docstring": "Hungarian reordering.\n\n Assume A and B are coordinates for atoms of SAME type only" - }, - { - "code": "def access(self, path, mode, dir_fd=None, follow_symlinks=None):\n if follow_symlinks is not None and sys.version_info < (3, 3):\n raise TypeError(\"access() got an unexpected \"\n \"keyword argument 'follow_symlinks'\")\n path = self._path_with_dir_fd(path, self.access, dir_fd)\n try:\n stat_result = self.stat(path, follow_symlinks=follow_symlinks)\n except OSError as os_error:\n if os_error.errno == errno.ENOENT:\n return False\n raise\n if is_root():\n mode &= ~os.W_OK\n return (mode & ((stat_result.st_mode >> 6) & 7)) == mode", - "docstring": "Check if a file exists and has the specified permissions.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions represented as a bitwise-OR combination of\n os.F_OK, os.R_OK, os.W_OK, and os.X_OK.\n dir_fd: If not `None`, the file descriptor of a directory, with\n `path` being relative to this directory.\n New in Python 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3.\n\n Returns:\n bool, `True` if file is accessible, `False` otherwise." - }, - { - "code": "def trigger(self, when=1):\n tw = Window(self.stream, self._config['type'])\n tw._config['evictPolicy'] = self._config['evictPolicy']\n tw._config['evictConfig'] = self._config['evictConfig']\n if self._config['evictPolicy'] == 'TIME':\n tw._config['evictTimeUnit'] = 'MILLISECONDS'\n if isinstance(when, datetime.timedelta):\n tw._config['triggerPolicy'] = 'TIME'\n tw._config['triggerConfig'] = int(when.total_seconds() * 1000.0)\n tw._config['triggerTimeUnit'] = 'MILLISECONDS'\n elif isinstance(when, int):\n tw._config['triggerPolicy'] = 'COUNT'\n tw._config['triggerConfig'] = when\n else:\n raise ValueError(when)\n return tw", - "docstring": "Declare a window with this window's size and a trigger policy.\n\n When the window is triggered is defined by `when`.\n\n If `when` is an `int` then the window is triggered every\n `when` tuples. For example, with ``when=5`` the window\n will be triggered every five tuples.\n\n If `when` is an `datetime.timedelta` then it is the period\n of the trigger. With a `timedelta` representing one minute\n then the window is triggered every minute.\n\n By default, when `trigger` has not been called on a `Window`\n it triggers for every tuple inserted into the window\n (equivalent to ``when=1``).\n\n Args:\n when: The size of the window, either an `int` to define the\n number of tuples or `datetime.timedelta` to define the\n duration of the window.\n\n Returns:\n Window: Window that will be triggered.\n\n .. warning:: A trigger is only supported for a sliding window\n such as one created by :py:meth:`last`." - }, - { - "code": "def get_serializer_class(self, view, method_func):\n if hasattr(method_func, 'request_serializer'):\n return getattr(method_func, 'request_serializer')\n if hasattr(view, 'serializer_class'):\n return getattr(view, 'serializer_class')\n if hasattr(view, 'get_serializer_class'):\n return getattr(view, 'get_serializer_class')()\n return None", - "docstring": "Try to get the serializer class from view method.\n If view method don't have request serializer, fallback to serializer_class on view class" - }, - { - "code": "def find_flag_alias(self, flag):\n for each in self.opt_names:\n if flag in each:\n result = set(each)\n result.remove(flag)\n return result\n return None", - "docstring": "Return alias set of a flag; return None if flag is not defined in\n \"Options\"." - }, - { - "code": "def graft(coll, branch, index):\n pre = coll[:index]\n post = coll[index:]\n ret = pre + branch + post\n return ret", - "docstring": "Graft list branch into coll at index" - }, - { - "code": "def load_calibration_template(self, template):\n self.tone_calibrator.stimulus.clearComponents()\n self.tone_calibrator.stimulus.loadFromTemplate(template['tone_doc'], self.tone_calibrator.stimulus)\n comp_doc = template['noise_doc']\n for state, calstim in zip(comp_doc, self.bs_calibrator.get_stims()):\n calstim.loadState(state)", - "docstring": "Reloads calibration settings from saved template doc\n\n :param template: Values for calibration stimuli (see calibration_template function)\n :type template: dict" - }, - { - "code": "def atlasdb_add_zonefile_info( name, zonefile_hash, txid, present, tried_storage, block_height, con=None, path=None ):\n global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK\n with AtlasDBOpen( con=con, path=path ) as dbcon:\n with ZONEFILE_INV_LOCK:\n if present:\n present = 1\n else:\n present = 0\n if tried_storage:\n tried_storage = 1\n else:\n tried_storage = 0\n sql = \"UPDATE zonefiles SET name = ?, zonefile_hash = ?, txid = ?, present = ?, tried_storage = ?, block_height = ? WHERE txid = ?;\"\n args = (name, zonefile_hash, txid, present, tried_storage, block_height, txid )\n cur = dbcon.cursor()\n update_res = atlasdb_query_execute( cur, sql, args )\n dbcon.commit()\n if update_res.rowcount == 0:\n sql = \"INSERT OR IGNORE INTO zonefiles (name, zonefile_hash, txid, present, tried_storage, block_height) VALUES (?,?,?,?,?,?);\"\n args = (name, zonefile_hash, txid, present, tried_storage, block_height)\n cur = dbcon.cursor()\n atlasdb_query_execute( cur, sql, args )\n dbcon.commit()\n zfbits = atlasdb_get_zonefile_bits( zonefile_hash, con=dbcon, path=path )\n inv_vec = None\n if ZONEFILE_INV is None:\n inv_vec = \"\"\n else:\n inv_vec = ZONEFILE_INV[:]\n ZONEFILE_INV = atlas_inventory_flip_zonefile_bits( inv_vec, zfbits, present )\n log.debug('Set {} ({}) to {}'.format(zonefile_hash, ','.join(str(i) for i in zfbits), present))\n NUM_ZONEFILES = atlasdb_zonefile_inv_length( con=dbcon, path=path )\n return True", - "docstring": "Add a zonefile to the database.\n Mark it as present or absent.\n Keep our in-RAM inventory vector up-to-date" - }, - { - "code": "def split_prefix(key, prefixs):\n key_upper = key.upper()\n for prefix in prefixs:\n if key_upper.startswith(prefix):\n plen = len(prefix)\n return (key_upper[:plen], key[plen:])", - "docstring": "split key string into prefix and remainder\n for first matching prefix from a list" - }, - { - "code": "def validate(self, instance, value):\n if isinstance(value, string_types):\n value = COLORS_NAMED.get(value, value)\n if value.upper() == 'RANDOM':\n value = random.choice(COLORS_20)\n value = value.upper().lstrip('\n if len(value) == 3:\n value = ''.join(v*2 for v in value)\n if len(value) != 6:\n self.error(instance, value, extra='Color must be known name '\n 'or a hex with 6 digits. e.g. \"\n try:\n value = [\n int(value[i:i + 6 // 3], 16) for i in range(0, 6, 6 // 3)\n ]\n except ValueError:\n self.error(instance, value,\n extra='Hex color must be base 16 (0-F)')\n if not isinstance(value, (list, tuple)):\n self.error(instance, value,\n extra='Color must be a list or tuple of length 3')\n if len(value) != 3:\n self.error(instance, value, extra='Color must be length 3')\n for val in value:\n if not isinstance(val, integer_types) or not 0 <= val <= 255:\n self.error(instance, value,\n extra='Color values must be ints 0-255.')\n return tuple(value)", - "docstring": "Check if input is valid color and converts to RGB" - }, - { - "code": "def run(self):\n salt.utils.process.appendproctitle(self.__class__.__name__)\n halite.start(self.hopts)", - "docstring": "Fire up halite!" - }, - { - "code": "def killJobs(self, jobsToKill):\n if len(jobsToKill) > 0:\n self.batchSystem.killBatchJobs(jobsToKill)\n for jobBatchSystemID in jobsToKill:\n self.processFinishedJob(jobBatchSystemID, 1)", - "docstring": "Kills the given set of jobs and then sends them for processing" - }, - { - "code": "def _add_section(self, node):\n self._filldown(node.lineno)\n self.context.append(node.name)\n self._update_current_context()\n for _ in map(self.visit, iter_child_nodes(node)):\n pass\n self.context.pop()\n self._update_current_context()", - "docstring": "Register the current node as a new context block" - }, - { - "code": "def write_idx(self, idx, buf):\n key = self.key_type(idx)\n pos = self.tell()\n self.write(buf)\n self.fidx.write('%s\\t%d\\n'%(str(key), pos))\n self.idx[key] = pos\n self.keys.append(key)", - "docstring": "Inserts input record at given index.\n\n Examples\n ---------\n >>> for i in range(5):\n ... record.write_idx(i, 'record_%d'%i)\n >>> record.close()\n\n Parameters\n ----------\n idx : int\n Index of a file.\n buf :\n Record to write." - }, - { - "code": "def _exec(**kwargs):\n if 'ignore_retcode' not in kwargs:\n kwargs['ignore_retcode'] = True\n if 'output_loglevel' not in kwargs:\n kwargs['output_loglevel'] = 'quiet'\n return salt.modules.cmdmod.run_all(**kwargs)", - "docstring": "Simple internal wrapper for cmdmod.run" - }, - { - "code": "def _start_ec2_instances(awsclient, ec2_instances, wait=True):\n if len(ec2_instances) == 0:\n return\n client_ec2 = awsclient.get_client('ec2')\n stopped_instances = all_pages(\n client_ec2.describe_instance_status,\n {\n 'InstanceIds': ec2_instances,\n 'Filters': [{\n 'Name': 'instance-state-name',\n 'Values': ['stopping', 'stopped']\n }],\n 'IncludeAllInstances': True\n },\n lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],\n )\n if stopped_instances:\n log.info('Starting EC2 instances: %s', stopped_instances)\n client_ec2.start_instances(InstanceIds=stopped_instances)\n if wait:\n waiter_inst_running = client_ec2.get_waiter('instance_running')\n waiter_inst_running.wait(InstanceIds=stopped_instances)\n waiter_status_ok = client_ec2.get_waiter('instance_status_ok')\n waiter_status_ok.wait(InstanceIds=stopped_instances)", - "docstring": "Helper to start ec2 instances\n\n :param awsclient:\n :param ec2_instances:\n :param wait: waits for instances to start\n :return:" - }, - { - "code": "def copy(self):\n copied_item = self.__class__({})\n for prop in self.__class__.properties:\n if prop in ['uuid']:\n continue\n val = getattr(self, prop, None)\n if val is not None:\n setattr(copied_item, prop, val)\n if hasattr(self, \"customs\"):\n copied_item.customs = copy(self.customs)\n if hasattr(self, \"tags\"):\n copied_item.tags = copy(self.tags)\n if hasattr(self, \"templates\"):\n copied_item.templates = copy(self.templates)\n return copied_item", - "docstring": "Get a copy of this item but with a new id\n\n :return: copy of this object with a new id\n :rtype: object" - }, - { - "code": "def C_array2dict(C):\n d = OrderedDict()\n i=0\n for k in C_keys:\n s = C_keys_shape[k]\n if s == 1:\n j = i+1\n d[k] = C[i]\n else:\n j = i \\\n + reduce(operator.mul, s, 1)\n d[k] = C[i:j].reshape(s)\n i = j\n return d", - "docstring": "Convert a 1D array containing C values to a dictionary." - }, - { - "code": "def finalize_options(self):\n assert bool(self.fa_version), 'FA version is mandatory for this command.'\n if self.zip_path:\n assert os.path.exists(self.zip_path), (\n 'Local zipfile does not exist: %s' % self.zip_path)", - "docstring": "Validate the command options." - }, - { - "code": "def create_batch(cls, size, **kwargs):\n return [cls.create(**kwargs) for _ in range(size)]", - "docstring": "Create a batch of instances of the given class, with overriden attrs.\n\n Args:\n size (int): the number of instances to create\n\n Returns:\n object list: the created instances" - }, - { - "code": "def _sendMessage(self, msg):\n if not msg:\n return\n msg = self._collapseMsg(msg)\n self.sendStatus(msg)", - "docstring": "Collapse and send msg to the master" - }, - { - "code": "def comma_join(fields, oxford=True):\n def fmt(field):\n return \"'%s'\" % field\n if not fields:\n return \"nothing\"\n elif len(fields) == 1:\n return fmt(fields[0])\n elif len(fields) == 2:\n return \" and \".join([fmt(f) for f in fields])\n else:\n result = \", \".join([fmt(f) for f in fields[:-1]])\n if oxford:\n result += \",\"\n result += \" and %s\" % fmt(fields[-1])\n return result", - "docstring": "Join together words." - }, - { - "code": "def _onMessage(self, ws, message):\n try:\n data = json.loads(message)['NotificationContainer']\n log.debug('Alert: %s %s %s', *data)\n if self._callback:\n self._callback(data)\n except Exception as err:\n log.error('AlertListener Msg Error: %s', err)", - "docstring": "Called when websocket message is recieved." - }, - { - "code": "def index_iterator(self):\n idx = 0\n while idx < self.number_intervals:\n new_idx = yield idx\n idx += 1\n if new_idx:\n idx = new_idx - 1", - "docstring": "Generator that resumes from same index, or restarts from sent index." - }, - { - "code": "def accept_bras(cls, ops, kwargs):\n from qnet.algebra.core.state_algebra import Bra\n kets = []\n for bra in ops:\n if isinstance(bra, Bra):\n kets.append(bra.ket)\n else:\n return ops, kwargs\n return Bra.create(cls.create(*kets, **kwargs))", - "docstring": "Accept operands that are all bras, and turn that into to bra of the\n operation applied to all corresponding kets" - }, - { - "code": "def get_storage_account(access_token, subscription_id, rgname, account_name):\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourcegroups/', rgname,\n '/providers/Microsoft.Storage/storageAccounts/', account_name,\n '?api-version=', STORAGE_API])\n return do_get(endpoint, access_token)", - "docstring": "Get the properties for the named storage account.\n\n Args:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n rgname (str): Azure resource group name.\n account_name (str): Name of the new storage account.\n\n Returns:\n HTTP response. JSON body of storage account properties." - }, - { - "code": "def read(self):\n data = xr.open_rasterio(self.finfo[\"filename\"],\n chunks=(1, CHUNK_SIZE, CHUNK_SIZE))\n attrs = data.attrs.copy()\n if hasattr(data, 'crs'):\n self.area = self.get_geotiff_area_def(data.crs)\n data = data.rename({'band': 'bands'})\n data['bands'] = BANDS[data.bands.size]\n try:\n data = mask_image_data(data)\n except ValueError as err:\n logger.warning(err)\n data.attrs = attrs\n self.file_content['image'] = data", - "docstring": "Read the image" - }, - { - "code": "def _truncated_power_method(self, A, x0, k, max_iter=10000, thresh=1e-8):\n xts = [x0]\n for t in range(max_iter):\n xts.append(self._normalize(self._truncate(np.dot(A, xts[-1]), k)))\n if np.linalg.norm(xts[-1] - xts[-2]) < thresh: break\n return xts[-1]", - "docstring": "given a matrix A, an initial guess x0, and a maximum cardinality k,\n find the best k-sparse approximation to its dominant eigenvector\n\n References\n ----------\n [1] Yuan, X-T. and Zhang, T. \"Truncated Power Method for Sparse Eigenvalue Problems.\"\n Journal of Machine Learning Research. Vol. 14. 2013.\n http://www.jmlr.org/papers/volume14/yuan13a/yuan13a.pdf" - }, - { - "code": "def iiif_image_handler(prefix=None, identifier=None,\n path=None, config=None, klass=None, auth=None, **args):\n if (not auth or degraded_request(identifier) or auth.image_authz()):\n if (auth):\n logging.debug(\"Authorized for image %s\" % identifier)\n i = IIIFHandler(prefix, identifier, config, klass, auth)\n try:\n return i.image_request_response(path)\n except IIIFError as e:\n return i.error_response(e)\n else:\n degraded_uri = host_port_prefix(\n config.host, config.port, prefix) + '/' + identifier + '-deg/' + path\n logging.info(\"Redirection to degraded: %s\" % degraded_uri)\n response = redirect(degraded_uri)\n response.headers['Access-control-allow-origin'] = '*'\n return response", - "docstring": "Handler for IIIF Image Requests.\n\n Behaviour for case of a non-authn or non-authz case is to\n return 403." - }, - { - "code": "def _to_json_type(obj, classkey=None):\n if isinstance(obj, dict):\n data = {}\n for (k, v) in obj.items():\n data[k] = _to_json_type(v, classkey)\n return data\n elif hasattr(obj, \"_ast\"):\n return _to_json_type(obj._ast())\n elif hasattr(obj, \"__iter__\"):\n return [_to_json_type(v, classkey) for v in obj]\n elif hasattr(obj, \"__dict__\"):\n data = dict([\n (key, _to_json_type(value, classkey))\n for key, value in obj.__dict__.iteritems()\n if not callable(value) and not key.startswith('_')\n ])\n if classkey is not None and hasattr(obj, \"__class__\"):\n data[classkey] = obj.__class__.__name__\n return data\n else:\n return obj", - "docstring": "Recursively convert the object instance into a valid JSON type." - }, - { - "code": "def logSysInfo():\n logger.info('\n logger.info(datetime.today().strftime(\"%A, %d %B %Y %I:%M%p\"))\n logger.info('Running on [{0}] [{1}]'.format(platform.node(),\n platform.platform()))\n logger.info('Python [{0}]'.format(sys.version))\n logger.info('", - "docstring": "Write system info to log file" - }, - { - "code": "def construct_name_filter(pattern):\n if pattern is None:\n return False, lambda name: True\n if pattern.startswith('/') and pattern.endswith('/'):\n name_re = re.compile(pattern[1:-1])\n return False, lambda name: name_re.search(name) is not None\n return True, lambda name: name == pattern", - "docstring": "Return a function for filtering sensor names based on a pattern.\n\n Parameters\n ----------\n pattern : None or str\n If None, the returned function matches all names.\n If pattern starts and ends with '/' the text between the slashes\n is used as a regular expression to search the names.\n Otherwise the pattern must match the name of the sensor exactly.\n\n Returns\n -------\n exact : bool\n Return True if pattern is expected to match exactly. Used to\n determine whether having no matching sensors constitutes an error.\n filter_func : f(str) -> bool\n Function for determining whether a name matches the pattern." - }, - { - "code": "def doc_includes_process(xmldoc, program):\n\treturn program in lsctables.ProcessTable.get_table(xmldoc).getColumnByName(u\"program\")", - "docstring": "Return True if the process table in xmldoc includes entries for a\n\tprogram named program." - }, - { - "code": "def get_user(self, name):\n r = self.kraken_request('GET', 'user/' + name)\n return models.User.wrap_get_user(r)", - "docstring": "Get the user for the given name\n\n :param name: The username\n :type name: :class:`str`\n :returns: the user instance\n :rtype: :class:`models.User`\n :raises: None" - }, - { - "code": "def react(self, msg):\n returned = \"\"\n mtype = type(msg)\n if mtype in stringTypes:\n msg = unpack_frame(msg)\n elif mtype == dict:\n pass\n else:\n raise FrameError(\"Unknown message type '%s', I don't know what to do with this!\" % mtype)\n if msg['cmd'] in self.states:\n returned = self.states[msg['cmd']](msg)\n return returned", - "docstring": "Called to provide a response to a message if needed.\n\n msg:\n This is a dictionary as returned by unpack_frame(...)\n or it can be a straight STOMP message. This function\n will attempt to determine which an deal with it.\n\n returned:\n A message to return or an empty string." - }, - { - "code": "def threeD_gridplot(nodes, **kwargs):\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n lats = []\n longs = []\n depths = []\n for node in nodes:\n lats.append(float(node[0]))\n longs.append(float(node[1]))\n depths.append(float(node[2]))\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(lats, longs, depths)\n ax.set_ylabel(\"Latitude (deg)\")\n ax.set_xlabel(\"Longitude (deg)\")\n ax.set_zlabel(\"Depth(km)\")\n ax.get_xaxis().get_major_formatter().set_scientific(False)\n ax.get_yaxis().get_major_formatter().set_scientific(False)\n fig = _finalise_figure(fig=fig, **kwargs)\n return fig", - "docstring": "Plot in a series of grid points in 3D.\n\n :type nodes: list\n :param nodes: List of tuples of the form (lat, long, depth)\n\n :returns: :class:`matplotlib.figure.Figure`\n\n .. rubric:: Example\n\n >>> from eqcorrscan.utils.plotting import threeD_gridplot\n >>> nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]\n >>> threeD_gridplot(nodes=nodes) # doctest: +SKIP\n\n .. plot::\n\n from eqcorrscan.utils.plotting import threeD_gridplot\n nodes = [(-43.5, 170.4, 4), (-43.3, 170.8, 12), (-43.4, 170.3, 8)]\n threeD_gridplot(nodes=nodes)" - }, - { - "code": "def do_output(self, *args):\n if args:\n action, params = args[0], args[1:]\n log.debug(\"Pass %s directly to output with %s\", action, params)\n function = getattr(self.output, \"do_\" + action, None)\n if function:\n function(*params)", - "docstring": "Pass a command directly to the current output processor" - }, - { - "code": "def cut(self):\n cursorPos = self._qpart.cursorPosition\n topLeft = (min(self._start[0], cursorPos[0]),\n min(self._start[1], cursorPos[1]))\n self.copy()\n self.delete()\n self._qpart.cursorPosition = topLeft", - "docstring": "Cut action. Copy and delete" - }, - { - "code": "def get_context_data(self, **kwargs):\n self.request.session.set_test_cookie()\n if not self.request.session.test_cookie_worked():\n messages.add_message(\n self.request, messages.ERROR, \"Please enable cookies.\")\n self.request.session.delete_test_cookie()\n return super().get_context_data(**kwargs)", - "docstring": "Tests cookies." - }, - { - "code": "def parse_atoms(self, pdb):\n atomre = re.compile(\"ATOM\")\n atomlines = [line for line in pdb.lines if atomre.match(line)]\n chainresnums = {}\n for line in atomlines:\n chain = line[21]\n resname = line[17:20]\n resnum = line[22:27]\n chainresnums.setdefault(chain, [])\n if resnum in chainresnums[chain]:\n assert self[chain][chainresnums[chain].index(resnum)] == resname\n else:\n if resnum[-1] == ' ':\n self.setdefault(chain, [])\n self[chain] += [resname]\n chainresnums[chain] += [resnum]\n return chainresnums", - "docstring": "Parse the ATOM entries into the object" - }, - { - "code": "def is_filter_selected(self, selection_id, value):\n selected = self.dashboard_cookie.get(selection_id)\n return selected == value", - "docstring": "Compares whether the 'selection_id' parameter value saved in the\n cookie is the same value as the \"value\" parameter.\n\n :param selection_id: a string as a dashboard_cookie key.\n :param value: The value to compare against the value from\n dashboard_cookie key.\n :return: Boolean." - }, - { - "code": "def _pad(self, b):\n return b + (self.bs - len(b) % self.bs) * chr(self.bs - len(b) % self.bs).encode(\"UTF-8\")", - "docstring": "Will padd the param to be of the correct length for the encryption alg.\n\n :type b: bytes\n :rtype: bytes" - }, - { - "code": "def _send_ffcs(self, region, core_mask, fr):\n arg1 = (NNCommands.flood_fill_core_select << 24) | core_mask\n arg2 = region\n self._send_scp(255, 255, 0, SCPCommands.nearest_neighbour_packet,\n arg1, arg2, fr)", - "docstring": "Send a flood-fill core select packet.\n\n This packet was added in a patched SC&MP 1.34*. Each packet includes a\n region and a core mask; every core that is in the region ORs the core\n mask with a mask it stores locally. On receiving a flood-fill end (FFE)\n packet the application is loaded to the cores specified by this\n composed core mask.\n\n FFCS packets should be sent in ascending order of\n `(region << 18) | core`.\n\n * See https://bitbucket.org/mundya/scamp/branch/new-ff2" - }, - { - "code": "def run(self):\n self.announce(\"Moving library files\", level=3)\n self.skip_build = True\n bin_dir = self.distribution.bin_dir\n libs = [os.path.join(bin_dir, _lib) for _lib in \n os.listdir(bin_dir) if \n os.path.isfile(os.path.join(bin_dir, _lib)) and \n os.path.splitext(_lib)[1] in [\".dll\", \".so\"]\n and not (_lib.startswith(\"python\") or _lib.startswith(\"bpy\"))]\n for lib in libs:\n shutil.move(lib, os.path.join(self.build_dir,\n os.path.basename(lib)))\n self.distribution.data_files = [os.path.join(self.install_dir, \n os.path.basename(lib))\n for lib in libs]\n self.distribution.run_command(\"install_data\")\n super().run()", - "docstring": "Copy libraries from the bin directory and place them as appropriate" - }, - { - "code": "def delete_item(cls, item_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return cls._delete_item_with_http_info(item_id, **kwargs)\n else:\n (data) = cls._delete_item_with_http_info(item_id, **kwargs)\n return data", - "docstring": "Remove item.\n\n Remove item from shopping cart\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async=True\n >>> thread = api.delete_item(item_id, async=True)\n >>> result = thread.get()\n\n :param async bool\n :param str item_id: Item ID to delete. (required)\n :return: ShoppingCart\n If the method is called asynchronously,\n returns the request thread." - }, - { - "code": "def have_key(self, *keygrips):\n for keygrip in keygrips:\n try:\n self.get_identity(keygrip=keygrip)\n break\n except KeyError as e:\n log.warning('HAVEKEY(%s) failed: %s', keygrip, e)\n else:\n raise AgentError(b'ERR 67108881 No secret key ')", - "docstring": "Check if any keygrip corresponds to a TREZOR-based key." - }, - { - "code": "def df_names_to_idx(names:IntsOrStrs, df:DataFrame):\n \"Return the column indexes of `names` in `df`.\"\n if not is_listy(names): names = [names]\n if isinstance(names[0], int): return names\n return [df.columns.get_loc(c) for c in names]", - "docstring": "Return the column indexes of `names` in `df`." - }, - { - "code": "def update_generic_password(client, path):\n vault_path, key = path_pieces(path)\n mount = mount_for_path(vault_path, client)\n if not mount:\n client.revoke_self_token()\n raise aomi.exceptions.VaultConstraint('invalid path')\n if backend_type(mount, client) != 'generic':\n client.revoke_self_token()\n raise aomi.exceptions.AomiData(\"Unsupported backend type\")\n LOG.debug(\"Updating generic password at %s\", path)\n existing = client.read(vault_path)\n if not existing or 'data' not in existing:\n LOG.debug(\"Nothing exists yet at %s!\", vault_path)\n existing = {}\n else:\n LOG.debug(\"Updating %s at %s\", key, vault_path)\n existing = existing['data']\n new_password = get_password()\n if key in existing and existing[key] == new_password:\n client.revoke_self_token()\n raise aomi.exceptions.AomiData(\"Password is same as existing\")\n existing[key] = new_password\n client.write(vault_path, **existing)", - "docstring": "Will update a single key in a generic secret backend as\n thought it were a password" - }, - { - "code": "def mark_typed_map(self, name, type_object):\n if not hasattr(type_object, 'dump'):\n raise ArgumentError(\"The passed type object %s is missing required method: dump()\" % type_object)\n if not hasattr(type_object, 'Restore'):\n raise ArgumentError(\"The passed type object %s is missing required method: Restore()\" % type_object)\n def _dump_map(obj):\n if obj is None:\n return None\n if not isinstance(obj, dict):\n raise DataError(\"Property %s marked as list was not a dict: %s\" % (name, repr(obj)))\n return {key: val.dump() for key, val in obj.items()}\n def _restore_map(obj):\n if obj is None:\n return obj\n return {key: type_object.Restore(val) for key, val in obj.items()}\n self.mark_complex(name, _dump_map, _restore_map)", - "docstring": "Mark a property as containing a map str to serializable object.\n\n This convenience method allows you to avoid having to call\n ``mark_complex()`` whenever you need to serialize a dict of objects.\n This method requires that all members of the given dict be of a single\n class that contains a dump() method and a Restore() class method where\n type_object.Restore(x.dump()) == x.\n\n Args:\n name (str): The name of the complex property.\n type_object: The class object that will be contained inside\n this dict." - }, - { - "code": "def update(self, milliseconds):\n self.__sort_up()\n for obj in self.__up_objects:\n obj.update(milliseconds)", - "docstring": "Updates all of the objects in our world." - }, - { - "code": "def handle_error(self, type_, value, tb):\n if not issubclass(type_, pywsgi.GreenletExit):\n self.server.loop.handle_error(self.environ, type_, value, tb)\n if self.response_length:\n self.close_connection = True\n else:\n tb_stream = traceback.format_exception(type_, value, tb)\n del tb\n tb_stream.append('\\n')\n tb_stream.append(pprint.pformat(self.environ))\n body = ''.join(tb_stream)\n headers = pywsgi._INTERNAL_ERROR_HEADERS[:]\n headers[2] = ('Content-Length', str(len(body)))\n self.start_response(pywsgi._INTERNAL_ERROR_STATUS, headers)\n self.write(body)", - "docstring": "This method copies the code from pywsgi.WSGIHandler.handle_error,\n change the write part to be a reflection of traceback and environ" - }, - { - "code": "def get_unit_name(self):\n if not self.unit:\n return None\n name = sorted(self.unit.names, key=len)[-1]\n return '%ss' % name", - "docstring": "Returns the name of the unit for this GPS scale\n\n Note that this returns a simply-pluralised version of the name." - }, - { - "code": "def _write(self, session, openFile, replaceParamFile):\n timeSeries = self.timeSeries\n numTS = len(timeSeries)\n valList = []\n for tsNum, ts in enumerate(timeSeries):\n values = ts.values\n for value in values:\n valDict = {'time': value.simTime,\n 'tsNum': tsNum,\n 'value': value.value}\n valList.append(valDict)\n result = pivot(valList, ('time',), ('tsNum',), 'value')\n for line in result:\n valString = ''\n for n in range(0, numTS):\n val = '%.6f' % line[(n,)]\n valString = '%s%s%s' % (\n valString,\n ' ' * (13 - len(str(val))),\n val)\n openFile.write(' %.8f%s\\n' % (line['time'], valString))", - "docstring": "Generic Time Series Write to File Method" - }, - { - "code": "def walk_data(input_data):\n def _walk_dict(input_dict, path_to_root):\n if not path_to_root:\n yield '.', input_dict\n for key, value in input_dict.items():\n key_path = '%s.%s' % (path_to_root, key)\n type_name = value.__class__.__name__\n yield key_path, value\n if type_name == 'dict':\n for dot_path, value in _walk_dict(value, key_path):\n yield dot_path, value\n elif type_name == 'list':\n for dot_path, value in _walk_list(value, key_path):\n yield dot_path, value\n def _walk_list(input_list, path_to_root):\n for i in range(len(input_list)):\n item_path = '%s[%s]' % (path_to_root, i)\n type_name = input_list[i].__class__.__name__\n yield item_path, input_list[i]\n if type_name == 'dict':\n for dot_path, value in _walk_dict(input_list[i], item_path):\n yield dot_path, value\n elif type_name == 'list':\n for dot_path, value in _walk_list(input_list[i], item_path):\n yield dot_path, value\n if isinstance(input_data, dict):\n for dot_path, value in _walk_dict(input_data, ''):\n yield dot_path, value\n elif isinstance(input_data, list):\n for dot_path, value in _walk_list(input_data, ''):\n yield dot_path, value\n else:\n raise ValueError('walk_data() input_data argument must be a list or dictionary.')", - "docstring": "a generator function for retrieving data in a nested dictionary\n\n :param input_data: dictionary or list with nested data\n :return: string with dot_path, object with value of endpoint" - }, - { - "code": "def regressOut(Y, X, return_b=False):\n Xd = la.pinv(X)\n b = Xd.dot(Y)\n Y_out = Y-X.dot(b)\n if return_b:\n return Y_out, b\n else:\n return Y_out", - "docstring": "regresses out X from Y" - }, - { - "code": "def get_method(self, name):\n prog = re.compile(name)\n l = []\n for i in self.get_classes():\n for j in i.get_methods():\n if prog.match(j.get_name()):\n l.append(j)\n return l", - "docstring": "Return a list all methods which corresponds to the regexp\n\n :param name: the name of the method (a python regexp)\n\n :rtype: a list with all :class:`EncodedMethod` objects" - }, - { - "code": "def profile_view(request, semester, targetUsername, profile=None):\n wprofile = get_object_or_404(\n WorkshiftProfile,\n user__username=targetUsername,\n semester=semester\n )\n if wprofile == profile:\n page_name = \"My Workshift Profile\"\n else:\n page_name = \"{}'s Workshift Profile\".format(wprofile.user.get_full_name())\n past_shifts = WorkshiftInstance.objects.filter(\n Q(workshifter=wprofile) | Q(liable=wprofile),\n closed=True,\n )\n regular_shifts = RegularWorkshift.objects.filter(\n active=True, current_assignees=wprofile,\n )\n assigned_instances = WorkshiftInstance.objects.filter(\n Q(workshifter=wprofile) | Q(liable=wprofile),\n closed=False,\n ).exclude(\n weekly_workshift__current_assignees=wprofile,\n )\n pool_hours = wprofile.pool_hours.order_by(\n \"-pool__is_primary\", \"pool__title\",\n )\n first_standing, second_standing, third_standing = \\\n any(pool_hours.first_date_standing for pool_hours in wprofile.pool_hours.all()), \\\n any(pool_hours.second_date_standing for pool_hours in wprofile.pool_hours.all()), \\\n any(pool_hours.third_date_standing for pool_hours in wprofile.pool_hours.all())\n full_management = utils.can_manage(request.user, semester=semester)\n any_management = utils.can_manage(request.user, semester, any_pool=True)\n view_note = wprofile == profile or full_management\n return render_to_response(\"profile.html\", {\n \"page_name\": page_name,\n \"profile\": wprofile,\n \"view_note\": view_note,\n \"past_shifts\": past_shifts,\n \"regular_shifts\": regular_shifts,\n \"assigned_instances\": assigned_instances,\n \"pool_hours\": pool_hours,\n \"first_standing\": first_standing,\n \"second_standing\": second_standing,\n \"third_standing\": third_standing,\n \"can_edit\": any_management,\n }, context_instance=RequestContext(request))", - "docstring": "Show the user their workshift history for the current semester as well as\n upcoming shifts." - }, - { - "code": "def load_spitzer_catalog(show_progress=False):\n path = get_path('spitzer_example_catalog.xml', location='remote',\n show_progress=show_progress)\n table = Table.read(path)\n return table", - "docstring": "Load a 4.5 micron Spitzer catalog.\n\n The image from which this catalog was derived is returned by\n :func:`load_spitzer_image`.\n\n Parameters\n ----------\n show_progress : bool, optional\n Whether to display a progress bar during the download (default\n is `False`).\n\n Returns\n -------\n catalog : `~astropy.table.Table`\n The catalog of sources.\n\n See Also\n --------\n load_spitzer_image\n\n Examples\n --------\n .. plot::\n :include-source:\n\n from photutils import datasets\n catalog = datasets.load_spitzer_catalog()\n plt.scatter(catalog['l'], catalog['b'])\n plt.xlabel('Galactic l')\n plt.ylabel('Galactic b')\n plt.xlim(18.39, 18.05)\n plt.ylim(0.13, 0.30)" - }, - { - "code": "def basicauthfail(self, realm = b'all'):\n if not isinstance(realm, bytes):\n realm = realm.encode('ascii')\n self.start_response(401, [(b'WWW-Authenticate', b'Basic realm=\"' + realm + b'\"')])\n self.exit(b'

' + _createstatus(401) + b'

')", - "docstring": "Return 401 for authentication failure. This will end the handler." - }, - { - "code": "def encodeSentence(self, *words):\n encoded = map(self.encodeWord, words)\n encoded = b''.join(encoded)\n encoded += b'\\x00'\n return encoded", - "docstring": "Encode given sentence in API format.\n\n :param words: Words to endoce.\n :returns: Encoded sentence." - }, - { - "code": "def auth_edit(name, **kwargs):\n ctx = Context(**kwargs)\n ctx.timeout = None\n ctx.execute_action('auth:group:edit', **{\n 'storage': ctx.repo.create_secure_service('storage'),\n 'name': name,\n })", - "docstring": "Interactively edits an authorization group." - }, - { - "code": "def execute(connection: connection, statement: str) -> Optional[List[Tuple[str, ...]]]:\n response = list()\n with connection:\n with connection.cursor(cursor_factory=Psycopg2Cursor) as cursor:\n cursor.execute(statement)\n connection.commit()\n try:\n response = cursor.fetchall()\n if not response:\n log('', logger_name=_LOGGER_NAME)\n return None\n except ProgrammingError as e:\n if e.args and e.args[0] == 'no results to fetch':\n log('', logger_name=_LOGGER_NAME)\n return None\n raise e\n log('Response', logger_name=_LOGGER_NAME)\n log('--------', logger_name=_LOGGER_NAME)\n for line in response:\n log(str(line), logger_name=_LOGGER_NAME)\n return response", - "docstring": "Execute PGSQL statement and fetches the statement response.\n\n Parameters\n ----------\n connection: psycopg2.extensions.connection\n Active connection to a PostGreSQL database.\n statement: str\n PGSQL statement to run against the database.\n\n Returns\n -------\n response: list or None\n List of tuples, where each tuple represents a formatted line of response from the database engine, where\n each tuple item roughly corresponds to a column. For instance, while a raw SELECT response might include\n the table headers, psycopg2 returns only the rows that matched. If no response was given, None is returned." - }, - { - "code": "def visit_AST_or(self, pattern):\n return any(self.field_match(self.node, value_or)\n for value_or in pattern.args)", - "docstring": "Match if any of the or content match with the other node." - }, - { - "code": "def parse_input_file(text, variables=None):\n text = find_includes(text)\n lines = text.splitlines()\n tasks, linenumbers = find_tasks(lines)\n preamble = [line for line in lines[:linenumbers[0]]]\n logging.debug(\"Preamble:\\n{}\".format(\"\\n\".join(preamble)))\n if variables is not None:\n preamble += \"\\n\" + \"\\n\".join(variables)\n environment = create_environment(preamble)\n code_sections = []\n for n in range(len(linenumbers) - 1):\n code_sections.append((linenumbers[n], linenumbers[n+1]))\n for n, task in zip(code_sections, tasks):\n task[\"code\"] = lines[n[0]: n[1]]\n task[\"environment\"] = environment\n clean_tasks = []\n for task in tasks:\n clean_tasks.append(Task(**task))\n return clean_tasks", - "docstring": "Parser for a file with syntax somewhat similar to Drake." - }, - { - "code": "def get_recirc_content(self, published=True, count=3):\n query = self.get_query()\n if not query.get('included_ids'):\n qs = Content.search_objects.search()\n qs = qs.query(\n TagBoost(slugs=self.tags.values_list(\"slug\", flat=True))\n ).filter(\n ~Ids(values=[self.id])\n ).sort(\n \"_score\"\n )\n return qs[:count]\n query['included_ids'] = query['included_ids'][:count]\n search = custom_search_model(Content, query, published=published, field_map={\n \"feature_type\": \"feature_type.slug\",\n \"tag\": \"tags.slug\",\n \"content-type\": \"_type\"\n })\n return search", - "docstring": "gets the first 3 content objects in the `included_ids`" - }, - { - "code": "def create_environment(component_config):\n ret = os.environ.copy()\n for env in component_config.get_list(\"dp.env_list\"):\n real_env = env.upper()\n value = os.environ.get(real_env)\n value = _prepend_env(component_config, env, value)\n value = _append_env(component_config, env, value)\n _apply_change(ret, real_env, value, component_config)\n return ret", - "docstring": "Create a modified environment.\n\n Arguments\n component_config - The configuration for a component." - }, - { - "code": "def tfclasses():\n classes = {}\n mydir = op.dirname(op.abspath(inspect.getfile(get_mimetype)))\n tfcls = {\"\",\n \"\"}\n for filename in glob(op.join(mydir, '*.py')):\n name = op.splitext(op.basename(filename))[0]\n module = import_module('aston.tracefile.' + name)\n for clsname in dir(module):\n cls = getattr(module, clsname)\n if hasattr(cls, '__base__'):\n if str(cls.__base__) in tfcls:\n classes[cls.mime] = cls\n return classes", - "docstring": "A mapping of mimetypes to every class for reading data files." - }, - { - "code": "def match_future_child(self, parent, relation, recursive=False):\n match = False\n children = self.get_descendants if recursive else self.get_children\n for child in children(parent, no_iframe=self.iframe_restrict):\n match = self.match_selectors(child, relation)\n if match:\n break\n return match", - "docstring": "Match future child." - }, - { - "code": "def check_delta(fun, x, dxs, period=None):\n dn1s = []\n dn2s = []\n dnds = []\n for dx in dxs:\n f0, grad0 = fun(x, do_gradient=True)\n f1, grad1 = fun(x+dx, do_gradient=True)\n grad = 0.5*(grad0+grad1)\n d1 = f1 - f0\n if period is not None:\n d1 -= np.floor(d1/period + 0.5)*period\n if hasattr(d1, '__iter__'):\n norm = np.linalg.norm\n else:\n norm = abs\n d2 = np.dot(grad, dx)\n dn1s.append(norm(d1))\n dn2s.append(norm(d2))\n dnds.append(norm(d1-d2))\n dn1s = np.array(dn1s)\n dn2s = np.array(dn2s)\n dnds = np.array(dnds)\n threshold = np.median(dn1s)\n mask = dn1s > threshold\n if not (dnds[mask] < threshold).all():\n raise AssertionError((\n 'The first order approximation on the difference is too wrong. The '\n 'threshold is %.1e.\\n\\nDifferences:\\n%s\\n\\nFirst order '\n 'approximation to differences:\\n%s\\n\\nAbsolute errors:\\n%s')\n % (threshold,\n ' '.join('%.1e' % v for v in dn1s[mask]),\n ' '.join('%.1e' % v for v in dn2s[mask]),\n ' '.join('%.1e' % v for v in dnds[mask])\n ))", - "docstring": "Check the difference between two function values using the analytical gradient\n\n Arguments:\n | ``fun`` -- The function to be tested, more info below.\n | ``x`` -- The argument vector.\n | ``dxs`` -- A matrix where each row is a vector of small differences\n to be added to the argument vector.\n\n Optional argument:\n | ``period`` -- If the function value is periodic, one may provide the\n period such that differences are computed using\n periodic boundary conditions.\n\n The function ``fun`` takes a mandatory argument ``x`` and an optional\n argument ``do_gradient``:\n | ``x`` -- The arguments of the function to be tested.\n | ``do_gradient`` -- When False, only the function value is returned.\n When True, a 2-tuple with the function value and\n the gradient are returned. [default=False]\n\n For every row in dxs, the following computation is repeated:\n\n 1) D1 = 'f(x+dx) - f(x)' is computed.\n 2) D2 = '0.5 (grad f(x+dx) + grad f(x)) . dx' is computed.\n\n A threshold is set to the median of the D1 set. For each case where |D1|\n is larger than the threshold, |D1 - D2|, should be smaller than the\n threshold." - }, - { - "code": "def make_config(self, instance_relative: bool = False) -> Config:\n config = self.config_class(\n self.instance_path if instance_relative else self.root_path,\n DEFAULT_CONFIG,\n )\n config['ENV'] = get_env()\n config['DEBUG'] = get_debug_flag()\n return config", - "docstring": "Create and return the configuration with appropriate defaults." - }, - { - "code": "def server_link(rel, server_id=None, self_rel=False):\n servers_href = '/v1/servers'\n link = _SERVER_LINKS[rel].copy()\n link['href'] = link['href'].format(**locals())\n link['rel'] = 'self' if self_rel else rel\n return link", - "docstring": "Helper for getting a Server link document, given a rel." - }, - { - "code": "def _setLearningMode(self, l4Learning = False, l2Learning=False):\n for column in self.L4Columns:\n column.setParameter(\"learn\", 0, l4Learning)\n for column in self.L2Columns:\n column.setParameter(\"learningMode\", 0, l2Learning)", - "docstring": "Sets the learning mode for L4 and L2." - }, - { - "code": "def service_restart(service_name):\n if host.service_available(service_name):\n if host.service_running(service_name):\n host.service_restart(service_name)\n else:\n host.service_start(service_name)", - "docstring": "Wrapper around host.service_restart to prevent spurious \"unknown service\"\n messages in the logs." - }, - { - "code": "def _update_asset_content_filename_on_disk_to_match_id(self, ac):\n def has_secondary_storage():\n return 'secondary_data_store_path' in self._config_map\n datastore_path = ''\n secondary_data_store_path = ''\n if 'data_store_full_path' in self._config_map:\n datastore_path = self._config_map['data_store_full_path']\n if has_secondary_storage():\n secondary_data_store_path = self._config_map['secondary_data_store_path']\n relative_path = self._config_map['data_store_path']\n filepath = os.path.join(datastore_path, ac._my_map['url'])\n old_filename = os.path.splitext(os.path.basename(filepath))[0]\n new_path = filepath.replace(old_filename, ac.ident.identifier)\n os.rename(filepath, new_path)\n if has_secondary_storage():\n old_path = '{0}/repository/AssetContent'.format(relative_path)\n filepath = os.path.join(datastore_path, ac._my_map['url']).replace(old_path, secondary_data_store_path)\n old_filename = os.path.splitext(os.path.basename(filepath))[0]\n new_path = filepath.replace(old_filename, ac.ident.identifier)\n os.rename(filepath, new_path)", - "docstring": "Because we want the asset content filename to match the ac.ident,\n here we manipulate the saved file on disk after creating the\n asset content" - }, - { - "code": "def read(self, length=None):\n if not length is None:\n if not isinstance(length, inttype) :\n raise TypeError(\"length to read should be number\")\n buf = create_string_buffer(length)\n readbytes = libcrypto.BIO_read(self.bio, buf, length)\n if readbytes == -2:\n raise NotImplementedError(\"Function is not supported by\" +\n \"this BIO\")\n if readbytes == -1:\n raise IOError\n if readbytes == 0:\n return b\"\"\n return buf.raw[:readbytes]\n else:\n buf = create_string_buffer(1024)\n out = b\"\"\n readbytes = 1\n while readbytes > 0:\n readbytes = libcrypto.BIO_read(self.bio, buf, 1024)\n if readbytes == -2:\n raise NotImplementedError(\"Function is not supported by \" +\n \"this BIO\")\n if readbytes == -1:\n raise IOError\n if readbytes > 0:\n out += buf.raw[:readbytes]\n return out", - "docstring": "Reads data from readble BIO. For test purposes.\n @param length - if specifed, limits amount of data read.\n If not BIO is read until end of buffer" - }, - { - "code": "def list_subnets(conn=None, call=None, kwargs=None):\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The list_subnets function must be called with '\n '-f or --function.'\n )\n if conn is None:\n conn = get_conn()\n if kwargs is None or (isinstance(kwargs, dict) and 'network' not in kwargs):\n raise SaltCloudSystemExit(\n 'A `network` must be specified'\n )\n return conn.list_subnets(filters={'network': kwargs['network']})", - "docstring": "List subnets in a virtual network\n\n network\n network to list subnets of\n\n .. code-block:: bash\n\n salt-cloud -f list_subnets myopenstack network=salt-net" - }, - { - "code": "def _setup_arm_arch(self, arch_mode=None):\n if arch_mode is None:\n arch_mode = ARCH_ARM_MODE_THUMB\n self.name = \"ARM\"\n self.arch_info = ArmArchitectureInformation(arch_mode)\n self.disassembler = ArmDisassembler(architecture_mode=arch_mode)\n self.ir_translator = ArmTranslator(architecture_mode=arch_mode)", - "docstring": "Set up ARM architecture." - }, - { - "code": "def get_event_abi(self, contract_name: str, event_name: str) -> Dict:\n from web3.utils.contracts import find_matching_event_abi\n assert self.contracts, 'ContractManager should have contracts compiled'\n contract_abi = self.get_contract_abi(contract_name)\n return find_matching_event_abi(\n abi=contract_abi,\n event_name=event_name,\n )", - "docstring": "Returns the ABI for a given event." - }, - { - "code": "def parse_docstring(thing):\n assert not isinstance(thing, bytes)\n doc = cleandoc(thing) if isinstance(thing, str) else getdoc(thing)\n doc = empty if doc is None else doc\n assert not isinstance(doc, bytes)\n parts = docstring_split(doc)\n if len(parts) == 2:\n title, body = parts[0], parts[1]\n else:\n title, body = parts[0], empty\n title = remove_line_breaks(title)\n body = body.replace(\"\\r\\n\", newline).replace(\"\\r\", newline)\n return docstring(title, body)", - "docstring": "Parse a Python docstring, or the docstring found on `thing`.\n\n :return: a ``(title, body)`` tuple. As per docstring convention, title is\n the docstring's first paragraph and body is the rest." - }, - { - "code": "def _get_video_ts_file_paths(dvd_path):\n video_ts_folder_path = join(dvd_path, \"VIDEO_TS\")\n video_ts_file_paths = []\n for video_ts_folder_content_name in listdir(video_ts_folder_path):\n video_ts_folder_content_path = join(video_ts_folder_path, video_ts_folder_content_name)\n if isfile(video_ts_folder_content_path):\n video_ts_file_paths.append(video_ts_folder_content_path)\n return sorted(video_ts_file_paths)", - "docstring": "Returns a sorted list of paths for files contained in th VIDEO_TS folder of the specified\n DVD path." - }, - { - "code": "def struct(self):\r\n data = {}\r\n for var, fmap in self._def.items():\r\n if hasattr(self, var):\r\n data.update(fmap.get_outputs(getattr(self, var)))\r\n return data", - "docstring": "XML-RPC-friendly representation of the current object state" - }, - { - "code": "def has_object_permission(self, request, view, obj):\n return (\n request.user.is_superuser or\n super(IAWPOrSuperuser, self).has_object_permission(\n request=request, view=view, obj=obj\n )\n )", - "docstring": "Checks if user is superuser or it has permission over object\n\n Parameters\n ----------\n request\n view\n obj\n\n Returns\n -------" - }, - { - "code": "def build_job(name=None, parameters=None):\n if not name:\n raise SaltInvocationError('Required parameter \\'name\\' is missing')\n server = _connect()\n if not job_exists(name):\n raise CommandExecutionError('Job \\'{0}\\' does not exist.'.format(name))\n try:\n server.build_job(name, parameters)\n except jenkins.JenkinsException as err:\n raise CommandExecutionError(\n 'Encountered error building job \\'{0}\\': {1}'.format(name, err)\n )\n return True", - "docstring": "Initiate a build for the provided job.\n\n :param name: The name of the job is check if it exists.\n :param parameters: Parameters to send to the job.\n :return: True is successful, otherwise raise an exception.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' jenkins.build_job jobname" - }, - { - "code": "def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):\n self.purge_metadata()\n self.pre_fork(**pre_fork_opts or {})\n pid = os.fork()\n if pid == 0:\n try:\n os.setsid()\n os.chdir(self._buildroot)\n self.post_fork_child(**post_fork_child_opts or {})\n except Exception:\n logger.critical(traceback.format_exc())\n finally:\n os._exit(0)\n else:\n try:\n self.post_fork_parent(**post_fork_parent_opts or {})\n except Exception:\n logger.critical(traceback.format_exc())", - "docstring": "Perform a single-fork to run a subprocess and write the child pid file.\n\n Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this\n case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.\n Using this daemonization method vs daemonize() leaves the responsibility of writing the pid\n to the caller to allow for library-agnostic flexibility in subprocess execution." - }, - { - "code": "def bool_from_exists_clause(session: Session,\n exists_clause: Exists) -> bool:\n if session.get_bind().dialect.name == SqlaDialectName.MSSQL:\n result = session.query(literal(True)).filter(exists_clause).scalar()\n else:\n result = session.query(exists_clause).scalar()\n return bool(result)", - "docstring": "Database dialects are not consistent in how ``EXISTS`` clauses can be\n converted to a boolean answer. This function manages the inconsistencies.\n\n See:\n \n - https://bitbucket.org/zzzeek/sqlalchemy/issues/3212/misleading-documentation-for-queryexists\n - http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.exists\n \n Specifically, we want this:\n \n *SQL Server*\n \n .. code-block:: sql\n \n SELECT 1 WHERE EXISTS (SELECT 1 FROM table WHERE ...)\n -- ... giving 1 or None (no rows)\n -- ... fine for SQL Server, but invalid for MySQL (no FROM clause)\n \n *Others, including MySQL*\n \n .. code-block:: sql\n \n SELECT EXISTS (SELECT 1 FROM table WHERE ...)\n -- ... giving 1 or 0\n -- ... fine for MySQL, but invalid syntax for SQL Server" - }, - { - "code": "def apply_limit_to_sql(cls, sql, limit, database):\n if cls.limit_method == LimitMethod.WRAP_SQL:\n sql = sql.strip('\\t\\n ;')\n qry = (\n select('*')\n .select_from(\n TextAsFrom(text(sql), ['*']).alias('inner_qry'),\n )\n .limit(limit)\n )\n return database.compile_sqla_query(qry)\n elif LimitMethod.FORCE_LIMIT:\n parsed_query = sql_parse.ParsedQuery(sql)\n sql = parsed_query.get_query_with_new_limit(limit)\n return sql", - "docstring": "Alters the SQL statement to apply a LIMIT clause" - }, - { - "code": "def up(force=True, env=None, **kwargs):\n \"Starts a new experiment\"\n inventory = os.path.join(os.getcwd(), \"hosts\")\n conf = Configuration.from_dictionnary(provider_conf)\n provider = Enos_vagrant(conf)\n roles, networks = provider.init()\n check_networks(roles, networks)\n env[\"roles\"] = roles\n env[\"networks\"] = networks", - "docstring": "Starts a new experiment" - }, - { - "code": "def GetEntries(self, parser_mediator, data=None, **unused_kwargs):\n for key, value in iter(data.items()):\n if not '.torrent' in key:\n continue\n caption = value.get('caption')\n path = value.get('path')\n seedtime = value.get('seedtime')\n if not caption or not path or seedtime < 0:\n raise errors.WrongBencodePlugin(self.NAME)\n for torrent, value in iter(data.items()):\n if not '.torrent' in torrent:\n continue\n event_data = UTorrentEventData()\n event_data.caption = value.get('caption', None)\n event_data.path = value.get('path', None)\n seedtime = value.get('seedtime', None)\n event_data.seedtime, _ = divmod(seedtime, 60)\n for event_key, event_value in iter(value.items()):\n if event_key == 'added_on':\n date_time = dfdatetime_posix_time.PosixTime(timestamp=event_value)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_ADDED)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n elif event_key == 'completed_on':\n date_time = dfdatetime_posix_time.PosixTime(timestamp=event_value)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n elif event_key == 'modtimes':\n for modtime in event_value:\n if not modtime:\n continue\n date_time = dfdatetime_posix_time.PosixTime(timestamp=modtime)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", - "docstring": "Extracts uTorrent active torrents.\n\n This is the main parsing engine for the plugin. It determines if\n the selected file is the proper file to parse and extracts current\n running torrents.\n\n interface.Process() checks for the given BENCODE_KEYS set, ensures\n that it matches, and then passes the bencoded data to this function for\n parsing. This plugin then parses the entire set of bencoded data to extract\n the variable file-name keys to retrieve their values.\n\n uTorrent creates a file, resume.dat, and a backup, resume.dat.old, to\n for all active torrents. This is typically stored in the user's\n application data folder.\n\n These files, at a minimum, contain a '.fileguard' key and a dictionary\n with a key name for a particular download with a '.torrent' file\n extension.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n data (Optional[dict[str, object]]): bencode data values." - }, - { - "code": "def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens):\n ra_1, dec_1, mag_1 = edge1\n ra_2, dec_2, mag_2 = edge2\n ra_3, dec_3, mag_3 = edge_90\n sign_list = np.sign([mag_1, mag_2, mag_3])\n if sign_list[0] == sign_list[1] and sign_list[0] == sign_list[2]:\n return [], []\n else:\n max_order -= 1\n if max_order <= 0:\n return [(ra_1 + ra_2 + ra_3)/3], [(dec_1 + dec_2 + dec_3)/3]\n else:\n ra_90_ = (ra_1 + ra_2)/2\n dec_90_ = (dec_1 + dec_2)/2\n mag_90_ = self._lensModel.magnification(ra_90_, dec_90_, kwargs_lens)\n edge_90_ = [ra_90_, dec_90_, mag_90_]\n ra_crit, dec_crit = self._tiling_crit(edge1=edge_90, edge2=edge1, edge_90=edge_90_, max_order=max_order,\n kwargs_lens=kwargs_lens)\n ra_crit_2, dec_crit_2 = self._tiling_crit(edge1=edge_90, edge2=edge2, edge_90=edge_90_, max_order=max_order,\n kwargs_lens=kwargs_lens)\n ra_crit += ra_crit_2\n dec_crit += dec_crit_2\n return ra_crit, dec_crit", - "docstring": "tiles a rectangular triangle and compares the signs of the magnification\n\n :param edge1: [ra_coord, dec_coord, magnification]\n :param edge2: [ra_coord, dec_coord, magnification]\n :param edge_90: [ra_coord, dec_coord, magnification]\n :param max_order: maximal order to fold triangle\n :return:" - }, - { - "code": "def _send_string(self,value):\n if type(value) != bytes:\n value = \"{}\".format(value).encode(\"ascii\")\n return value", - "docstring": "Convert a string to a bytes object. If value is not a string, it is\n be converted to one with a standard string.format call." - }, - { - "code": "def overlap_summary(self):\n olaps = self.compute_overlaps()\n table = [[\"5%: \",np.percentile(olaps,5)],\n [\"25%: \",np.percentile(olaps,25)],\n [\"50%: \",np.percentile(olaps,50)],\n [\"75%: \",np.percentile(olaps,75)],\n [\"95%: \",np.percentile(olaps,95)],\n [\" \" , \" \"],\n [\"Min: \",np.min(olaps)],\n [\"Mean: \",np.mean(olaps)],\n [\"Max: \",np.max(olaps)]]\n header = [\"Percentile\",\"Overlap\"]\n print tabulate(table,header,tablefmt=\"rst\")", - "docstring": "print summary of reconstruction overlaps" - }, - { - "code": "def token(self):\n return self._portalTokenHandler.servertoken(serverURL=self._serverUrl,\n referer=self._referer)", - "docstring": "gets the AGS server token" - }, - { - "code": "async def query_firmware(self):\n _version = await self.request.get(join_path(self._base_path, \"/fwversion\"))\n _fw = _version.get(\"firmware\")\n if _fw:\n _main = _fw.get(\"mainProcessor\")\n if _main:\n self._main_processor_version = self._make_version(_main)\n _radio = _fw.get(\"radio\")\n if _radio:\n self._radio_version = self._make_version(_radio)", - "docstring": "Query the firmware versions." - }, - { - "code": "def showMessageDialog(title, text):\n dlg = QgsMessageOutput.createMessageOutput()\n dlg.setTitle(title)\n dlg.setMessage(text, QgsMessageOutput.MessageHtml)\n dlg.showMessage()", - "docstring": "Show a dialog containing a given text, with a given title.\n\n The text accepts HTML syntax" - }, - { - "code": "def process_array(elt, ascii=False):\n del ascii\n chld = elt.getchildren()\n if len(chld) > 1:\n raise ValueError()\n chld = chld[0]\n try:\n name, current_type, scale = CASES[chld.tag](chld)\n size = None\n except ValueError:\n name, current_type, size, scale = CASES[chld.tag](chld)\n del name\n myname = elt.get(\"name\") or elt.get(\"label\")\n if elt.get(\"length\").startswith(\"$\"):\n length = int(VARIABLES[elt.get(\"length\")[1:]])\n else:\n length = int(elt.get(\"length\"))\n if size is not None:\n return (myname, current_type, (length, ) + size, scale)\n else:\n return (myname, current_type, (length, ), scale)", - "docstring": "Process an 'array' tag." - }, - { - "code": "def _compute_path(self, name):\n path = self._normalize_path(os.path.join(self.base_directory, name))\n if not path.startswith(self.base_directory):\n raise FileNotWithinStorageError(name)\n return path", - "docstring": "Compute the file path in the filesystem from the given name.\n\n :param name: the filename for which the to compute the path\n :raises FileNotWithinStorage: if the computed path is not within\n :attr:`base_directory`." - }, - { - "code": "def xyz(self):\n if not self.children:\n pos = np.expand_dims(self._pos, axis=0)\n else:\n arr = np.fromiter(itertools.chain.from_iterable(\n particle.pos for particle in self.particles()), dtype=float)\n pos = arr.reshape((-1, 3))\n return pos", - "docstring": "Return all particle coordinates in this compound.\n\n Returns\n -------\n pos : np.ndarray, shape=(n, 3), dtype=float\n Array with the positions of all particles." - }, - { - "code": "def suspend(self):\n hThread = self.get_handle(win32.THREAD_SUSPEND_RESUME)\n if self.is_wow64():\n try:\n return win32.Wow64SuspendThread(hThread)\n except AttributeError:\n pass\n return win32.SuspendThread(hThread)", - "docstring": "Suspends the thread execution.\n\n @rtype: int\n @return: Suspend count. If zero, the thread is running." - }, - { - "code": "def fetch_arc_errors(self):\n error_list = []\n hnode = self.validate_first_element()\n if hnode:\n error_list.append({'hook_error': hnode})\n rnode = self.validate_last_element()\n if rnode:\n error_list.append({'reso_error': rnode})\n try:\n self.validate_generations()\n except ArcGenerationError as ag:\n error_list.append({'generation_error': str(ag)})\n milecheck = self.validate_milestones()\n if milecheck:\n error_list.append({'mseq_error': milecheck})\n return error_list", - "docstring": "Evaluates the current tree of the arc and provides a list of errors that\n the user should correct." - }, - { - "code": "def endpoint_absent(name, region=None, profile=None, interface=None, **connection_args):\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'Endpoint for service \"{0}\"{1} is already absent'.format(name,\n ', interface \"{0}\",'.format(interface) if interface is not None else '')}\n endpoint = __salt__['keystone.endpoint_get'](name, region,\n profile=profile,\n interface=interface,\n **connection_args)\n if not endpoint:\n return ret\n else:\n if __opts__.get('test'):\n ret['result'] = None\n ret['comment'] = 'Endpoint for service \"{0}\" will be deleted'.format(name)\n return ret\n __salt__['keystone.endpoint_delete'](name, region,\n profile=profile,\n interface=interface,\n **connection_args)\n ret['comment'] = 'Endpoint for service \"{0}\"{1} has been deleted'.format(name,\n ', interface \"{0}\",'.format(interface) if interface is not None else '')\n ret['changes']['endpoint'] = 'Deleted'\n return ret", - "docstring": "Ensure that the endpoint for a service doesn't exist in Keystone catalog\n\n name\n The name of the service whose endpoints should not exist\n\n region (optional)\n The region of the endpoint. Defaults to ``RegionOne``.\n\n interface\n The interface type, which describes the visibility\n of the endpoint. (for V3 API)" - }, - { - "code": "def create_snapshot(self, volume_id, notes='', **kwargs):\n return self.client.call('Network_Storage', 'createSnapshot',\n notes, id=volume_id, **kwargs)", - "docstring": "Creates a snapshot on the given block volume.\n\n :param integer volume_id: The id of the volume\n :param string notes: The notes or \"name\" to assign the snapshot\n :return: Returns the id of the new snapshot" - }, - { - "code": "def _post_analysis(self):\n self._make_completed_functions()\n new_changes = self._iteratively_analyze_function_features()\n functions_do_not_return = new_changes['functions_do_not_return']\n self._update_function_callsites(functions_do_not_return)\n for _, edges in self._pending_edges.items():\n for src_node, dst_node, data in edges:\n self._graph_add_edge(src_node, dst_node, **data)\n self._remove_non_return_edges()\n CFGBase._post_analysis(self)", - "docstring": "Post-CFG-construction.\n\n :return: None" - }, - { - "code": "def is_bool_dtype(arr_or_dtype):\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except TypeError:\n return False\n if isinstance(arr_or_dtype, CategoricalDtype):\n arr_or_dtype = arr_or_dtype.categories\n if isinstance(arr_or_dtype, ABCIndexClass):\n return (arr_or_dtype.is_object and\n arr_or_dtype.inferred_type == 'boolean')\n elif is_extension_array_dtype(arr_or_dtype):\n dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)\n return dtype._is_boolean\n return issubclass(dtype.type, np.bool_)", - "docstring": "Check whether the provided array or dtype is of a boolean dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a boolean dtype.\n\n Notes\n -----\n An ExtensionArray is considered boolean when the ``_is_boolean``\n attribute is set to True.\n\n Examples\n --------\n >>> is_bool_dtype(str)\n False\n >>> is_bool_dtype(int)\n False\n >>> is_bool_dtype(bool)\n True\n >>> is_bool_dtype(np.bool)\n True\n >>> is_bool_dtype(np.array(['a', 'b']))\n False\n >>> is_bool_dtype(pd.Series([1, 2]))\n False\n >>> is_bool_dtype(np.array([True, False]))\n True\n >>> is_bool_dtype(pd.Categorical([True, False]))\n True\n >>> is_bool_dtype(pd.SparseArray([True, False]))\n True" - }, - { - "code": "def inverse_lin_decay(max_step, min_value=0.01, step=None):\n if step is None:\n step = tf.train.get_global_step()\n if step is None:\n return 1.0\n step = to_float(step)\n progress = tf.minimum(step / float(max_step), 1.0)\n return progress * (1.0 - min_value) + min_value", - "docstring": "Inverse-decay linearly from 0.01 to 1.0 reached at max_step." - }, - { - "code": "def cmd_ssh_user(tar_aminame, inst_name):\n if tar_aminame == \"Unknown\":\n tar_aminame = inst_name\n userlu = {\"ubunt\": \"ubuntu\", \"debia\": \"admin\", \"fedor\": \"root\",\n \"cento\": \"centos\", \"openb\": \"root\"}\n usertemp = ['name'] + [value for key, value in list(userlu.items())\n if key in tar_aminame.lower()]\n usertemp = dict(zip(usertemp[::2], usertemp[1::2]))\n username = usertemp.get('name', 'ec2-user')\n debg.dprint(\"loginuser Calculated: \", username)\n return username", - "docstring": "Calculate instance login-username based on image-name.\n\n Args:\n tar_aminame (str): name of the image instance created with.\n inst_name (str): name of the instance.\n Returns:\n username (str): name for ssh based on AMI-name." - }, - { - "code": "def filter(self, order_by=None, limit=0, **kwargs):\n with rconnect() as conn:\n if len(kwargs) == 0:\n raise ValueError\n try:\n query = self._base()\n query = query.filter(kwargs)\n if order_by is not None:\n query = self._order_by(query, order_by)\n if limit > 0:\n query = self._limit(query, limit)\n log.debug(query)\n rv = query.run(conn)\n except ReqlOpFailedError as e:\n log.warn(e)\n raise\n except Exception as e:\n log.warn(e)\n raise\n else:\n data = [self._model(_) for _ in rv]\n return data", - "docstring": "Fetch a list of instances.\n\n :param order_by: column on which to order the results. \\\n To change the sort, prepend with < or >.\n :param limit: How many rows to fetch.\n :param kwargs: keyword args on which to filter, column=value" - }, - { - "code": "def require_api_auth(allow_anonymous=False):\n def wrapper(f):\n f_oauth_required = oauth2.require_oauth()(f)\n @wraps(f)\n def decorated(*args, **kwargs):\n if not hasattr(current_user, 'login_via_oauth2'):\n if not current_user.is_authenticated:\n if allow_anonymous:\n return f(*args, **kwargs)\n abort(401)\n if current_app.config['ACCOUNTS_JWT_ENABLE']:\n current_oauth2server.jwt_veryfication_factory(\n request.headers)\n return f(*args, **kwargs)\n else:\n return f_oauth_required(*args, **kwargs)\n return decorated\n return wrapper", - "docstring": "Decorator to require API authentication using OAuth token.\n\n :param allow_anonymous: Allow access without OAuth token\n (default: ``False``)." - }, - { - "code": "def div_filter(key: str, value: list, format: str, meta: Any) -> Optional[list]:\n if key != \"Div\" or format != \"latex\":\n return None\n [[_, classes, _], contents] = value\n try:\n alert_type = [name.split(\"-\")[1] for name in classes if \"-\" in name][0]\n except IndexError:\n return None\n if alert_type not in ALLOWED_ALERT_TYPES.__members__:\n return None\n filtered = [RawBlock(\"latex\", rf\"\\begin{{{alert_type}box}}\")]\n filtered.extend(contents)\n filtered.append(RawBlock(\"latex\", rf\"\\end{{{alert_type}box}}\"))\n return filtered", - "docstring": "Filter the JSON ``value`` for alert divs.\n\n Arguments\n ---------\n key\n Key of the structure\n value\n Values in the structure\n format\n Output format of the processing\n meta\n Meta information" - }, - { - "code": "def recurseforumcontents(parser, token):\n bits = token.contents.split()\n forums_contents_var = template.Variable(bits[1])\n template_nodes = parser.parse(('endrecurseforumcontents',))\n parser.delete_first_token()\n return RecurseTreeForumVisibilityContentNode(template_nodes, forums_contents_var)", - "docstring": "Iterates over the content nodes and renders the contained forum block for each node." - }, - { - "code": "def eval(self, id1, id2, inst1):\n jinst1 = None\n if inst1 is not None:\n jinst1 = inst1.jobject\n return javabridge.call(self.jobject, \"eval\", \"(IILweka/core/Instance;)D\", id1, id2, jinst1)", - "docstring": "Computes the result of the kernel function for two instances. If id1 == -1, eval use inst1 instead of an\n instance in the dataset.\n\n :param id1: the index of the first instance in the dataset\n :type id1: int\n :param id2: the index of the second instance in the dataset\n :type id2: int\n :param inst1: the instance corresponding to id1 (used if id1 == -1)\n :type inst1: Instance" - }, - { - "code": "def count_alleles_subpops(self, subpops, max_allele=None):\n if max_allele is None:\n max_allele = self.max()\n out = {name: self.count_alleles(max_allele=max_allele, subpop=subpop)\n for name, subpop in subpops.items()}\n return out", - "docstring": "Count alleles for multiple subpopulations simultaneously.\n\n Parameters\n ----------\n subpops : dict (string -> sequence of ints)\n Mapping of subpopulation names to sample indices.\n max_allele : int, optional\n The highest allele index to count. Alleles above this will be\n ignored.\n\n Returns\n -------\n out : dict (string -> AlleleCountsArray)\n A mapping of subpopulation names to allele counts arrays." - }, - { - "code": "def create_model(self, name, role, container_defs, vpc_config=None,\n enable_network_isolation=False, primary_container=None,\n tags=None):\n if container_defs and primary_container:\n raise ValueError('Both container_defs and primary_container can not be passed as input')\n if primary_container:\n msg = 'primary_container is going to be deprecated in a future release. Please use container_defs instead.'\n warnings.warn(msg, DeprecationWarning)\n container_defs = primary_container\n role = self.expand_role(role)\n if isinstance(container_defs, list):\n container_definition = container_defs\n else:\n container_definition = _expand_container_def(container_defs)\n create_model_request = _create_model_request(name=name,\n role=role,\n container_def=container_definition,\n tags=tags)\n if vpc_config:\n create_model_request['VpcConfig'] = vpc_config\n if enable_network_isolation:\n create_model_request['EnableNetworkIsolation'] = True\n LOGGER.info('Creating model with name: {}'.format(name))\n LOGGER.debug('CreateModel request: {}'.format(json.dumps(create_model_request, indent=4)))\n try:\n self.sagemaker_client.create_model(**create_model_request)\n except ClientError as e:\n error_code = e.response['Error']['Code']\n message = e.response['Error']['Message']\n if error_code == 'ValidationException' and 'Cannot create already existing model' in message:\n LOGGER.warning('Using already existing model: {}'.format(name))\n else:\n raise\n return name", - "docstring": "Create an Amazon SageMaker ``Model``.\n Specify the S3 location of the model artifacts and Docker image containing\n the inference code. Amazon SageMaker uses this information to deploy the\n model in Amazon SageMaker. This method can also be used to create a Model for an Inference Pipeline\n if you pass the list of container definitions through the containers parameter.\n\n Args:\n name (str): Name of the Amazon SageMaker ``Model`` to create.\n role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs\n that create Amazon SageMaker endpoints use this role to access training data and model artifacts.\n You must grant sufficient permissions to this role.\n container_defs (list[dict[str, str]] or [dict[str, str]]): A single container definition or a list of\n container definitions which will be invoked sequentially while performing the prediction. If the list\n contains only one container, then it'll be passed to SageMaker Hosting as the ``PrimaryContainer`` and\n otherwise, it'll be passed as ``Containers``.You can also specify the return value of\n ``sagemaker.get_container_def()`` or ``sagemaker.pipeline_container_def()``, which will used to\n create more advanced container configurations ,including model containers which need artifacts from S3.\n vpc_config (dict[str, list[str]]): The VpcConfig set on the model (default: None)\n * 'Subnets' (list[str]): List of subnet ids.\n * 'SecurityGroupIds' (list[str]): List of security group ids.\n enable_network_isolation (bool): Wether the model requires network isolation or not.\n primary_container (str or dict[str, str]): Docker image which defines the inference code.\n You can also specify the return value of ``sagemaker.container_def()``, which is used to create\n more advanced container configurations, including model containers which need artifacts from S3. This\n field is deprecated, please use container_defs instead.\n tags(List[dict[str, str]]): Optional. The list of tags to add to the model. Example:\n >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]\n For more information about tags, see https://boto3.amazonaws.com/v1/documentation\\\n /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags\n\n\n Returns:\n str: Name of the Amazon SageMaker ``Model`` created." - }, - { - "code": "def utcnow_ts():\n if utcnow.override_time is None:\n return int(time.time())\n return calendar.timegm(utcnow().timetuple())", - "docstring": "Timestamp version of our utcnow function." - }, - { - "code": "def new(self, **dict):\n if not self._item_new_path:\n raise AttributeError('new is not available for %s' % self._item_name)\n for tag in self._object._remap_to_id:\n self._object._remap_tag_to_tag_id(tag, dict)\n target = self._item_new_path\n payload = json.dumps({self._item_type:dict})\n json_data = self._redmine.post(target, payload)\n data = self._redmine.unwrap_json(self._item_type, json_data)\n data['_source_path'] = target\n return self._objectify(data=data)", - "docstring": "Create a new item with the provided dict information. Returns the new item." - }, - { - "code": "def write_fasta(\n init_fasta, info_frags, output=DEFAULT_NEW_GENOME_NAME, junction=False\n):\n init_genome = {\n record.id: record.seq for record in SeqIO.parse(init_fasta, \"fasta\")\n }\n my_new_records = []\n with open(info_frags, \"r\") as info_frags_handle:\n current_seq = \"\"\n current_id = None\n previous_contig = None\n for line in info_frags_handle:\n if line.startswith(\">\"):\n previous_contig = None\n if current_id is not None:\n new_record = SeqRecord(\n current_seq, id=current_id, description=\"\"\n )\n my_new_records.append(new_record)\n current_seq = \"\"\n current_id = str(line[1:])\n elif line.startswith(\"init_contig\"):\n previous_contig = None\n else:\n (init_contig, _, orientation, pos_start, pos_end) = str(\n line[:-1]\n ).split(\"\\t\")\n start = int(pos_start)\n end = int(pos_end)\n ori = int(orientation)\n assert start < end\n assert ori in {-1, 1}\n seq_to_add = init_genome[init_contig][start:end]\n if ori == 1:\n current_seq += seq_to_add\n elif ori == -1:\n current_seq += seq_to_add.reverse_complement()\n if junction and previous_contig not in {init_contig, None}:\n error_was_raised = False\n try:\n extra_seq = Seq(junction, IUPAC.ambiguous_dna)\n current_seq = extra_seq + current_seq\n except TypeError:\n if not error_was_raised:\n print(\"Invalid junction sequence\")\n error_was_raised = True\n previous_contig = init_contig\n new_record = SeqRecord(current_seq, id=current_id, description=\"\")\n my_new_records.append(new_record)\n SeqIO.write(my_new_records, output, \"fasta\")", - "docstring": "Convert an info_frags.txt file into a fasta file given a reference.\n Optionally adds junction sequences to reflect the possibly missing base\n pairs between two newly joined scaffolds." - }, - { - "code": "def _create_dictionary_of_marshall(\n self,\n marshallQuery,\n marshallTable):\n self.log.debug(\n 'starting the ``_create_dictionary_of_marshall`` method')\n dictList = []\n tableName = self.dbTableName\n rows = readquery(\n log=self.log,\n sqlQuery=marshallQuery,\n dbConn=self.pmDbConn,\n quiet=False\n )\n totalCount = len(rows)\n count = 0\n for row in rows:\n if \"dateCreated\" in row:\n del row[\"dateCreated\"]\n count += 1\n if count > 1:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n print \"%(count)s / %(totalCount)s `%(tableName)s` data added to memory\" % locals()\n dictList.append(dict(row))\n self.log.debug(\n 'completed the ``_create_dictionary_of_marshall`` method')\n return dictList", - "docstring": "create a list of dictionaries containing all the rows in the marshall stream\n\n **Key Arguments:**\n - ``marshallQuery`` -- the query used to lift the required data from the marshall database.\n - ``marshallTable`` -- the name of the marshall table we are lifting the data from.\n\n **Return:**\n - ``dictList`` - a list of dictionaries containing all the rows in the marshall stream" - }, - { - "code": "def get_octal(self, c, i):\n index = i.index\n value = []\n zero_count = 0\n try:\n if c == '0':\n for x in range(3):\n if c != '0':\n break\n value.append(c)\n c = next(i)\n zero_count = len(value)\n if zero_count < 3:\n for x in range(3 - zero_count):\n if c not in _OCTAL:\n break\n value.append(c)\n c = next(i)\n i.rewind(1)\n except StopIteration:\n pass\n octal_count = len(value)\n if not (self.use_format and octal_count) and not (zero_count and octal_count < 3) and octal_count != 3:\n i.rewind(i.index - index)\n value = []\n return ''.join(value) if value else None", - "docstring": "Get octal." - }, - { - "code": "def import_from_xlsx(\n filename_or_fobj,\n sheet_name=None,\n sheet_index=0,\n start_row=None,\n start_column=None,\n end_row=None,\n end_column=None,\n workbook_kwargs=None,\n *args,\n **kwargs\n):\n workbook_kwargs = workbook_kwargs or {}\n if \"read_only\" not in workbook_kwargs:\n workbook_kwargs[\"read_only\"] = True\n workbook = load_workbook(filename_or_fobj, **workbook_kwargs)\n if sheet_name is None:\n sheet_name = workbook.sheetnames[sheet_index]\n sheet = workbook[sheet_name]\n min_row, min_column = sheet.min_row - 1, sheet.min_column - 1\n max_row, max_column = sheet.max_row - 1, sheet.max_column - 1\n start_row = start_row if start_row is not None else min_row\n end_row = end_row if end_row is not None else max_row\n start_column = start_column if start_column is not None else min_column\n end_column = end_column if end_column is not None else max_column\n table_rows = []\n is_empty = lambda row: all(cell is None for cell in row)\n selected_rows = sheet.iter_rows(\n min_row=start_row + 1,\n max_row=end_row + 1,\n min_col=start_column + 1,\n max_col=end_column + 1,\n )\n for row in selected_rows:\n row = [_cell_to_python(cell) for cell in row]\n if not is_empty(row):\n table_rows.append(row)\n source = Source.from_file(filename_or_fobj, plugin_name=\"xlsx\")\n source.fobj.close()\n metadata = {\"imported_from\": \"xlsx\", \"source\": source, \"name\": sheet_name}\n return create_table(table_rows, meta=metadata, *args, **kwargs)", - "docstring": "Return a rows.Table created from imported XLSX file.\n\n workbook_kwargs will be passed to openpyxl.load_workbook" - }, - { - "code": "def _get_total_sigma(self, C, std_intra, std_inter):\n return np.sqrt(std_intra ** 2. + std_inter ** 2. + C['c_lny'] ** 2.)", - "docstring": "Returns the total sigma term for the arbitrary horizontal component of\n ground motion defined by equation 18, page 150" - }, - { - "code": "def roles(self):\n roles = []\n for ur in self.roleusers:\n roles.append(ur.role)\n return set(roles)", - "docstring": "Return a set with all roles granted to the user." - }, - { - "code": "def add_lambda_permissions(function='',\n statement_id='',\n action='lambda:InvokeFunction',\n principal='',\n source_arn='',\n env='',\n region='us-east-1'):\n session = boto3.Session(profile_name=env, region_name=region)\n lambda_client = session.client('lambda')\n response_action = None\n prefixed_sid = FOREMAST_PREFIX + statement_id\n add_permissions_kwargs = {\n 'FunctionName': function,\n 'StatementId': prefixed_sid,\n 'Action': action,\n 'Principal': principal,\n }\n if source_arn:\n add_permissions_kwargs['SourceArn'] = source_arn\n try:\n lambda_client.add_permission(**add_permissions_kwargs)\n response_action = 'Add permission with Sid: {}'.format(prefixed_sid)\n except boto3.exceptions.botocore.exceptions.ClientError as error:\n LOG.debug('Add permission error: %s', error)\n response_action = \"Did not add permissions\"\n LOG.debug('Related StatementId (SID): %s', prefixed_sid)\n LOG.info(response_action)", - "docstring": "Add permission to Lambda for the event trigger.\n\n Args:\n function (str): Lambda function name\n statement_id (str): IAM policy statement (principal) id\n action (str): Lambda action to allow\n principal (str): AWS principal to add permissions\n source_arn (str): ARN of the source of the event. Only needed for S3\n env (str): Environment/account of function\n region (str): AWS region of function" - }, - { - "code": "def fetch(self, remote='origin'):\n git(self.gitdir, \"fetch\", remote, _env=self.env())", - "docstring": "fetch from a remote" - }, - { - "code": "def get_backend(backend_class=None):\n cache_name = '_backend_instance'\n if not hasattr(get_backend, cache_name):\n backend_class = backend_class or settings.ROUGHPAGES_BACKEND\n if isinstance(backend_class, basestring):\n module_path, class_name = backend_class.rsplit(\".\", 1)\n module = import_module(module_path)\n backend_class = getattr(module, class_name)\n setattr(get_backend, cache_name, backend_class())\n return getattr(get_backend, cache_name)", - "docstring": "Get backend instance\n\n If no `backend_class` is specified, the backend class is determined from\n the value of `settings.ROUGHPAGES_BACKEND`.\n `backend_class` can be a class object or dots separated python import path\n\n Returns:\n backend instance" - }, - { - "code": "def next_epoch(self):\n epoch = next(self._all_epochs)\n folder = os.path.join(self._root, str(epoch), self._subset)\n self.data = []\n silence = None\n gc.disable()\n for filename in os.listdir(folder):\n command = os.path.splitext(os.path.basename(filename))[0]\n with open(os.path.join(folder, filename), \"r\") as pkl_file:\n audio = pickle.load(pkl_file)\n if command == \"silence\":\n silence = audio\n else:\n target = self.classes.index(os.path.basename(command))\n self.data.extend(itertools.product(audio, [target]))\n gc.enable()\n target = self.classes.index(\"silence\")\n self.data += [(silence, target)] * int(len(self.data) * self._silence_percentage)\n return epoch", - "docstring": "Load next epoch from disk" - }, - { - "code": "def get_subscribed_services_names(cls):\n accounts_for_service = Account.get_accounts_for_service\n service_data = cls._get_music_services_data().values()\n return [\n service['Name'] for service in service_data\n if len(\n accounts_for_service(service['ServiceType'])\n ) > 0\n ]", - "docstring": "Get a list of the names of all subscribed music services.\n\n Returns:\n list: A list of strings." - }, - { - "code": "def _to_diagonally_dominant_weighted(mat):\n mat += np.diag(np.sum(np.abs(mat), axis=1) + 0.01)\n return mat", - "docstring": "Make matrix weighted diagonally dominant using the Laplacian." - }, - { - "code": "def get_params():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\", type=str, default='/tmp/tensorflow/mnist/input_data', help=\"data directory\")\n parser.add_argument(\"--dropout_rate\", type=float, default=0.5, help=\"dropout rate\")\n parser.add_argument(\"--channel_1_num\", type=int, default=32)\n parser.add_argument(\"--channel_2_num\", type=int, default=64)\n parser.add_argument(\"--conv_size\", type=int, default=5)\n parser.add_argument(\"--pool_size\", type=int, default=2)\n parser.add_argument(\"--hidden_size\", type=int, default=1024)\n parser.add_argument(\"--learning_rate\", type=float, default=1e-4)\n parser.add_argument(\"--batch_num\", type=int, default=2700)\n parser.add_argument(\"--batch_size\", type=int, default=32)\n args, _ = parser.parse_known_args()\n return args", - "docstring": "Get parameters from command line" - }, - { - "code": "def impute_using_statistics(df, method='min'):\n sf = SimpleFill(method)\n imputed_matrix = sf.complete(df.values)\n imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns)\n return imputed_df", - "docstring": "Imputes the missing values by the selected statistical property of each column\n\n :param df: The input dataframe that contains missing values\n :param method: The imputation method (min by default)\n \"zero\": fill missing entries with zeros\n \"mean\": fill with column means\n \"median\" : fill with column medians\n \"min\": fill with min value per column\n \"random\": fill with gaussian noise according to mean/std of column\n :return: the imputed dataframe" - }, - { - "code": "def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = configparser.ConfigParser(allow_no_value=True)\n config.readfp(io.StringIO(file_contents))\n return config", - "docstring": "Get a ConfigParser object for parsing a unit's config file." - }, - { - "code": "def signal_to_noise_map(self):\n signal_to_noise_map = np.divide(self.data, self.noise_map)\n signal_to_noise_map[signal_to_noise_map < 0] = 0\n return signal_to_noise_map", - "docstring": "The signal-to-noise_map of the data and noise-map which are fitted." - }, - { - "code": "def driverDebugRequest(self, unDeviceIndex, pchRequest, pchResponseBuffer, unResponseBufferSize):\n fn = self.function_table.driverDebugRequest\n result = fn(unDeviceIndex, pchRequest, pchResponseBuffer, unResponseBufferSize)\n return result", - "docstring": "Sends a request to the driver for the specified device and returns the response. The maximum response size is 32k,\n but this method can be called with a smaller buffer. If the response exceeds the size of the buffer, it is truncated. \n The size of the response including its terminating null is returned." - }, - { - "code": "def template(self):\n return Template(\n self._env, lib.EnvFactDeftemplate(self._env, self._fact))", - "docstring": "The associated Template." - }, - { - "code": "def setup_logging(\n handler, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, log_level=logging.INFO\n):\n all_excluded_loggers = set(excluded_loggers + EXCLUDED_LOGGER_DEFAULTS)\n logger = logging.getLogger()\n logger.setLevel(log_level)\n logger.addHandler(handler)\n logger.addHandler(logging.StreamHandler())\n for logger_name in all_excluded_loggers:\n logger = logging.getLogger(logger_name)\n logger.propagate = False\n logger.addHandler(logging.StreamHandler())", - "docstring": "Attach a logging handler to the Python root logger\n\n Excludes loggers that this library itself uses to avoid\n infinite recursion.\n\n :type handler: :class:`logging.handler`\n :param handler: the handler to attach to the global handler\n\n :type excluded_loggers: tuple\n :param excluded_loggers: (Optional) The loggers to not attach the handler\n to. This will always include the loggers in the\n path of the logging client itself.\n\n :type log_level: int\n :param log_level: (Optional) Python logging log level. Defaults to\n :const:`logging.INFO`.\n\n Example:\n\n .. code-block:: python\n\n import logging\n import google.cloud.logging\n from google.cloud.logging.handlers import CloudLoggingHandler\n\n client = google.cloud.logging.Client()\n handler = CloudLoggingHandler(client)\n google.cloud.logging.handlers.setup_logging(handler)\n logging.getLogger().setLevel(logging.DEBUG)\n\n logging.error('bad news') # API call" - }, - { - "code": "def Dirname(self):\n result = self.Copy()\n while 1:\n last_directory = posixpath.dirname(result.last.path)\n if last_directory != \"/\" or len(result) <= 1:\n result.last.path = last_directory\n result.last.inode = None\n break\n result.Pop(-1)\n return result", - "docstring": "Get a new copied object with only the directory path." - }, - { - "code": "def reload(self):\n new = self.__class__.pickle_load(self.workdir)\n self = new", - "docstring": "Reload the flow from the pickle file. Used when we are monitoring the flow\n executed by the scheduler. In this case, indeed, the flow might have been changed\n by the scheduler and we have to reload the new flow in memory." - }, - { - "code": "def _merge_configs(configs):\n result = {\n u\"contexts\": [],\n u\"users\": [],\n u\"clusters\": [],\n u\"current-context\": None,\n }\n for config in configs:\n for k in {u\"contexts\", u\"users\", u\"clusters\"}:\n try:\n values = config.doc[k]\n except KeyError:\n pass\n else:\n result[k].extend(values)\n if result[u\"current-context\"] is None:\n try:\n result[u\"current-context\"] = config.doc[u\"current-context\"]\n except KeyError:\n pass\n return KubeConfig(result)", - "docstring": "Merge one or more ``KubeConfig`` objects.\n\n :param list[KubeConfig] configs: The configurations to merge.\n\n :return KubeConfig: A single configuration object with the merged\n configuration." - }, - { - "code": "def score(self, X, y):\n from sklearn.metrics import accuracy_score\n return accuracy_score(y, self.predict(X))", - "docstring": "Force use of accuracy score since we don't inherit\n from ClassifierMixin" - }, - { - "code": "def to_dict(self, remove_nones=False):\n if remove_nones:\n return super().to_dict(remove_nones=True)\n tags = None\n if self.tags is not None:\n tags = [tag.to_dict(remove_nones=remove_nones) for tag in self.tags]\n return {\n 'value': self.value,\n 'indicatorType': self.type,\n 'priorityLevel': self.priority_level,\n 'correlationCount': self.correlation_count,\n 'whitelisted': self.whitelisted,\n 'weight': self.weight,\n 'reason': self.reason,\n 'firstSeen': self.first_seen,\n 'lastSeen': self.last_seen,\n 'source': self.source,\n 'notes': self.notes,\n 'tags': tags,\n 'enclaveIds': self.enclave_ids\n }", - "docstring": "Creates a dictionary representation of the indicator.\n\n :param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.\n :return: A dictionary representation of the indicator." - }, - { - "code": "def setPlainText(self, txt, mime_type, encoding):\n self.file.mimetype = mime_type\n self.file._encoding = encoding\n self._original_text = txt\n self._modified_lines.clear()\n import time\n t = time.time()\n super(CodeEdit, self).setPlainText(txt)\n _logger().log(5, 'setPlainText duration: %fs' % (time.time() - t))\n self.new_text_set.emit()\n self.redoAvailable.emit(False)\n self.undoAvailable.emit(False)", - "docstring": "Extends setPlainText to force the user to setup an encoding and a\n mime type.\n\n Emits the new_text_set signal.\n\n :param txt: The new text to set.\n :param mime_type: Associated mimetype. Setting the mime will update the\n pygments lexer.\n :param encoding: text encoding" - }, - { - "code": "def disable_notebook():\n try:\n from IPython.core.getipython import get_ipython\n except ImportError:\n raise ImportError('This feature requires IPython 1.0+')\n ip = get_ipython()\n f = ip.display_formatter.formatters['text/html']\n f.type_printers.pop(np.ndarray, None)", - "docstring": "Disable automatic visualization of NumPy arrays in the IPython Notebook." - }, - { - "code": "def is_tagged(required_tags, has_tags):\n if not required_tags and not has_tags:\n return True\n elif not required_tags:\n return False\n found_tags = []\n for tag in required_tags:\n if tag in has_tags:\n found_tags.append(tag)\n return len(found_tags) == len(required_tags)", - "docstring": "Checks if tags match" - }, - { - "code": "def callHook(self, hookname, *args, **kwargs):\n 'Call all functions registered with `addHook` for the given hookname.'\n r = []\n for f in self.hooks[hookname]:\n try:\n r.append(f(*args, **kwargs))\n except Exception as e:\n exceptionCaught(e)\n return r", - "docstring": "Call all functions registered with `addHook` for the given hookname." - }, - { - "code": "def echo(root_resource, message):\n params = dict(message=message)\n return root_resource.get(ECHO_PATH, params)", - "docstring": "Have the server echo our message back." - }, - { - "code": "def node_ls(server=str):\n try:\n salt_return = {}\n client = docker.APIClient(base_url='unix://var/run/docker.sock')\n service = client.nodes(filters=({'name': server}))\n getdata = salt.utils.json.dumps(service)\n dump = salt.utils.json.loads(getdata)\n for items in dump:\n docker_version = items['Description']['Engine']['EngineVersion']\n platform = items['Description']['Platform']\n hostnames = items['Description']['Hostname']\n ids = items['ID']\n role = items['Spec']['Role']\n availability = items['Spec']['Availability']\n status = items['Status']\n version = items['Version']['Index']\n salt_return.update({'Docker Version': docker_version,\n 'Platform': platform,\n 'Hostname': hostnames,\n 'ID': ids,\n 'Roles': role,\n 'Availability': availability,\n 'Status': status,\n 'Version': version})\n except TypeError:\n salt_return = {}\n salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'})\n return salt_return", - "docstring": "Displays Information about Swarm Nodes with passing in the server\n\n server\n The minion/server name\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' swarm.node_ls server=minion1" - }, - { - "code": "def _grid_widgets(self):\n self._canvas.grid(sticky=\"nswe\")\n self.header_label.grid(row=1, column=1, sticky=\"nswe\", pady=5, padx=5)\n self.text_label.grid(row=3, column=1, sticky=\"nswe\", pady=6, padx=5)", - "docstring": "Place the widgets in the Toplevel." - }, - { - "code": "def conv(arg,default=None,func=None):\n if func:\n return func(arg) if arg else default;\n else:\n return arg if arg else default;", - "docstring": "essentially, the generalization of\n \n arg if arg else default\n\n or\n\n func(arg) if arg else default" - }, - { - "code": "def has_sequential_ids(data_wrapper):\n db = data_wrapper.data_block\n ids = db[:, COLS.ID]\n steps = ids[np.where(np.diff(ids) != 1)[0] + 1].astype(int)\n return CheckResult(len(steps) == 0, steps)", - "docstring": "Check that IDs are increasing and consecutive\n\n returns tuple (bool, list of IDs that are not consecutive\n with their predecessor)" - }, - { - "code": "def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10):\n curve = Curve.by_name(curve)\n privkey = curve.passphrase_to_privkey(passphrase)\n return privkey.decrypt(s, mac_bytes)", - "docstring": "Decrypts `s' with passphrase `passphrase'" - }, - { - "code": "def make_target(url, extra_opts=None):\n parts = compat.urlparse(url, allow_fragments=False)\n scheme = parts.scheme.lower()\n if scheme in [\"ftp\", \"ftps\"]:\n creds = parts.username, parts.password\n tls = scheme == \"ftps\"\n from ftpsync import ftp_target\n target = ftp_target.FtpTarget(\n parts.path,\n parts.hostname,\n parts.port,\n username=creds[0],\n password=creds[1],\n tls=tls,\n timeout=None,\n extra_opts=extra_opts,\n )\n else:\n target = FsTarget(url, extra_opts)\n return target", - "docstring": "Factory that creates `_Target` objects from URLs.\n\n FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS.\n\n Note:\n TLS is only supported on Python 2.7/3.2+.\n Args:\n url (str):\n extra_opts (dict, optional): Passed to Target constructor. Default: None.\n Returns:\n :class:`_Target`" - }, - { - "code": "def get_for(self, historics_id, with_estimate=None):\n return self.get(historics_id, maximum=None, page=None, with_estimate=with_estimate)", - "docstring": "Get the historic query for the given ID\n\n Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsget\n\n :param historics_id: playback id of the query\n :type historics_id: str\n :return: dict of REST API output with headers attached\n :rtype: :class:`~datasift.request.DictResponse`\n :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`" - }, - { - "code": "def decrypt(self, msg):\n error = False\n signature = msg[0:SHA256.digest_size]\n iv = msg[SHA256.digest_size:SHA256.digest_size + AES.block_size]\n cipher_text = msg[SHA256.digest_size + AES.block_size:]\n if self.sign(iv + cipher_text) != signature:\n error = True\n ctr = Counter.new(AES.block_size * 8, initial_value=self.bin2long(iv))\n cipher = AES.AESCipher(self._cipherkey, AES.MODE_CTR, counter=ctr)\n plain_text = cipher.decrypt(cipher_text)\n if error:\n raise DecryptionError\n return plain_text", - "docstring": "decrypt a message" - }, - { - "code": "def explicit_line_join(logical_line, tokens):\n r\n prev_start = prev_end = parens = 0\n comment = False\n backslash = None\n for token_type, text, start, end, line in tokens:\n if token_type == tokenize.COMMENT:\n comment = True\n if start[0] != prev_start and parens and backslash and not comment:\n yield backslash, \"E502 the backslash is redundant between brackets\"\n if end[0] != prev_end:\n if line.rstrip('\\r\\n').endswith('\\\\'):\n backslash = (end[0], len(line.splitlines()[-1]) - 1)\n else:\n backslash = None\n prev_start = prev_end = end[0]\n else:\n prev_start = start[0]\n if token_type == tokenize.OP:\n if text in '([{':\n parens += 1\n elif text in ')]}':\n parens -= 1", - "docstring": "r\"\"\"Avoid explicit line join between brackets.\n\n The preferred way of wrapping long lines is by using Python's implied line\n continuation inside parentheses, brackets and braces. Long lines can be\n broken over multiple lines by wrapping expressions in parentheses. These\n should be used in preference to using a backslash for line continuation.\n\n E502: aaa = [123, \\\\n 123]\n E502: aaa = (\"bbb \" \\\\n \"ccc\")\n\n Okay: aaa = [123,\\n 123]\n Okay: aaa = (\"bbb \"\\n \"ccc\")\n Okay: aaa = \"bbb \" \\\\n \"ccc\"\n Okay: aaa = 123 # \\\\" - }, - { - "code": "def find_methods(self, classname=\".*\", methodname=\".*\", descriptor=\".*\",\n accessflags=\".*\", no_external=False):\n for cname, c in self.classes.items():\n if re.match(classname, cname):\n for m in c.get_methods():\n z = m.get_method()\n if no_external and isinstance(z, ExternalMethod):\n continue\n if re.match(methodname, z.get_name()) and \\\n re.match(descriptor, z.get_descriptor()) and \\\n re.match(accessflags, z.get_access_flags_string()):\n yield m", - "docstring": "Find a method by name using regular expression.\n This method will return all MethodClassAnalysis objects, which match the\n classname, methodname, descriptor and accessflags of the method.\n\n :param classname: regular expression for the classname\n :param methodname: regular expression for the method name\n :param descriptor: regular expression for the descriptor\n :param accessflags: regular expression for the accessflags\n :param no_external: Remove external method from the output (default False)\n :rtype: generator of `MethodClassAnalysis`" - }, - { - "code": "def _process_deriv_args(f, kwargs):\n n = f.ndim\n axis = normalize_axis_index(kwargs.get('axis', 0), n)\n if f.shape[axis] < 3:\n raise ValueError('f must have at least 3 point along the desired axis.')\n if 'delta' in kwargs:\n if 'x' in kwargs:\n raise ValueError('Cannot specify both \"x\" and \"delta\".')\n delta = atleast_1d(kwargs['delta'])\n if delta.size == 1:\n diff_size = list(f.shape)\n diff_size[axis] -= 1\n delta_units = getattr(delta, 'units', None)\n delta = np.broadcast_to(delta, diff_size, subok=True)\n if delta_units is not None:\n delta = delta * delta_units\n else:\n delta = _broadcast_to_axis(delta, axis, n)\n elif 'x' in kwargs:\n x = _broadcast_to_axis(kwargs['x'], axis, n)\n delta = diff(x, axis=axis)\n else:\n raise ValueError('Must specify either \"x\" or \"delta\" for value positions.')\n return n, axis, delta", - "docstring": "Handle common processing of arguments for derivative functions." - }, - { - "code": "def overlap_add(blk_sig, size=None, hop=None, wnd=None, normalize=True):\n import numpy as np\n if size is None:\n blk_sig = Stream(blk_sig)\n size = len(blk_sig.peek())\n if hop is None:\n hop = size\n if wnd is None:\n wnd = np.ones(size)\n elif callable(wnd) and not isinstance(wnd, Stream):\n wnd = wnd(size)\n if isinstance(wnd, Sequence):\n wnd = np.array(wnd)\n elif isinstance(wnd, Iterable):\n wnd = np.hstack(wnd)\n else:\n raise TypeError(\"Window should be an iterable or a callable\")\n if normalize:\n steps = Stream(wnd).blocks(hop).map(np.array)\n gain = np.sum(np.abs(np.vstack(steps)), 0).max()\n if gain:\n wnd = wnd / gain\n old = np.zeros(size)\n for blk in (wnd * blk for blk in blk_sig):\n blk[:-hop] += old[hop:]\n for el in blk[:hop]:\n yield el\n old = blk\n for el in old[hop:]:\n yield el", - "docstring": "Overlap-add algorithm using Numpy arrays.\n\n Parameters\n ----------\n blk_sig :\n An iterable of blocks (sequences), such as the ``Stream.blocks`` result.\n size :\n Block size for each ``blk_sig`` element, in samples.\n hop :\n Number of samples for two adjacent blocks (defaults to the size).\n wnd :\n Windowing function to be applied to each block or any iterable with\n exactly ``size`` elements. If ``None`` (default), applies a rectangular\n window.\n normalize :\n Flag whether the window should be normalized so that the process could\n happen in the [-1; 1] range, dividing the window by its hop gain.\n Default is ``True``.\n\n Returns\n -------\n A Stream instance with the blocks overlapped and added.\n\n See Also\n --------\n Stream.blocks :\n Splits the Stream instance into blocks with given size and hop.\n blocks :\n Same to Stream.blocks but for without using the Stream class.\n chain :\n Lazily joins all iterables given as parameters.\n chain.from_iterable :\n Same to ``chain(*data)``, but the ``data`` evaluation is lazy.\n window :\n Window/apodization/tapering functions for a given size as a StrategyDict.\n\n Note\n ----\n Each block has the window function applied to it and the result is the\n sum of the blocks without any edge-case special treatment for the first\n and last few blocks." - }, - { - "code": "def get_assets(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AssetList(self._results, runtime=self._runtime)", - "docstring": "Gets the asset list resulting from a search.\n\n return: (osid.repository.AssetList) - the asset list\n raise: IllegalState - the list has already been retrieved\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def doOutages(self):\n assert len(self.branchOutages) == len(self.market.case.branches)\n weights = [[(False, r), (True, 1 - (r))] for r in self.branchOutages]\n for i, ln in enumerate(self.market.case.branches):\n ln.online = weighted_choice(weights[i])\n if ln.online == False:\n print \"Branch outage [%s] in period %d.\" %(ln.name,self.stepid)", - "docstring": "Applies branch outtages." - }, - { - "code": "def _makeJobGraphs(self, jobGraph, jobStore):\n jobsToJobGraphs = {self:jobGraph}\n for successors in (self._followOns, self._children):\n jobs = [successor._makeJobGraphs2(jobStore, jobsToJobGraphs) for successor in successors]\n jobGraph.stack.append(jobs)\n return jobsToJobGraphs", - "docstring": "Creates a jobGraph for each job in the job graph, recursively." - }, - { - "code": "def create_menu(self):\n menu = QtWidgets.QMenu(self.editor)\n menu.setTitle(_('Select'))\n menu.menuAction().setIcon(QtGui.QIcon.fromTheme('edit-select'))\n menu.addAction(self.action_select_word)\n menu.addAction(self.action_select_extended_word)\n menu.addAction(self.action_select_matched)\n menu.addAction(self.action_select_line)\n menu.addSeparator()\n menu.addAction(self.editor.action_select_all)\n icon = QtGui.QIcon.fromTheme(\n 'edit-select-all', QtGui.QIcon(\n ':/pyqode-icons/rc/edit-select-all.png'))\n self.editor.action_select_all.setIcon(icon)\n return menu", - "docstring": "Creates the extended selection menu." - }, - { - "code": "def get_attributes(self, item_name, attribute_name=None,\n consistent_read=False, item=None):\n return self.connection.get_attributes(self, item_name, attribute_name,\n consistent_read, item)", - "docstring": "Retrieve attributes for a given item.\n\n :type item_name: string\n :param item_name: The name of the item whose attributes are being retrieved.\n\n :type attribute_names: string or list of strings\n :param attribute_names: An attribute name or list of attribute names. This\n parameter is optional. If not supplied, all attributes\n will be retrieved for the item.\n\n :rtype: :class:`boto.sdb.item.Item`\n :return: An Item mapping type containing the requested attribute name/values" - }, - { - "code": "def restore_all_edges(self):\n for edge in self.hidden_edges.keys():\n try:\n self.restore_edge(edge)\n except GraphError:\n pass", - "docstring": "Restores all hidden edges." - }, - { - "code": "def get_all_synDelays(self):\n tic = time()\n randomstate = np.random.get_state()\n delays = {}\n for cellindex in self.RANK_CELLINDICES:\n np.random.seed(self.POPULATIONSEED + cellindex + 2*self.POPULATION_SIZE)\n delays[cellindex] = {}\n for j, X in enumerate(self.X):\n delays[cellindex][X] = []\n for i in self.k_yXL[:, j]:\n loc = self.synDelayLoc[j]\n loc /= self.dt\n scale = self.synDelayScale[j]\n if scale is not None:\n scale /= self.dt\n delay = np.random.normal(loc, scale, i).astype(int)\n while np.any(delay < 1):\n inds = delay < 1\n delay[inds] = np.random.normal(loc, scale,\n inds.sum()).astype(int)\n delay = delay.astype(float)\n delay *= self.dt\n else:\n delay = np.zeros(i) + self.synDelayLoc[j]\n delays[cellindex][X].append(delay)\n np.random.set_state(randomstate)\n if RANK == 0:\n print('found delays in %.2f seconds' % (time()-tic))\n return delays", - "docstring": "Create and load arrays of connection delays per connection on this rank\n\n Get random normally distributed synaptic delays,\n returns dict of nested list of same shape as SpCells.\n\n Delays are rounded to dt.\n\n This function takes no kwargs.\n\n\n Parameters\n ----------\n None\n\n\n Returns\n -------\n dict\n output[cellindex][populationname][layerindex]`, np.array of\n delays per connection.\n\n\n See also\n --------\n numpy.random.normal" - }, - { - "code": "def retain_all(self, items):\n check_not_none(items, \"Value can't be None\")\n data_items = []\n for item in items:\n check_not_none(item, \"Value can't be None\")\n data_items.append(self._to_data(item))\n return self._encode_invoke(queue_compare_and_retain_all_codec, data_list=data_items)", - "docstring": "Removes the items which are not contained in the specified collection. In other words, only the items that\n are contained in the specified collection will be retained.\n\n :param items: (Collection), collection which includes the elements to be retained in this set.\n :return: (bool), ``true`` if this queue changed as a result of the call." - }, - { - "code": "def abort(self):\n if self._state == _State.CLOSED:\n self._invalid_state(\"abort() called\")\n return\n self._force_close(None)", - "docstring": "Immediately close the stream, without sending remaining buffers or\n performing a proper shutdown." - }, - { - "code": "def encode(self, value):\n value = self.serialize(value)\n if self.encoding:\n value = value.encode(self.encoding)\n return value", - "docstring": "Encode value." - }, - { - "code": "def vm_action(name, kwargs=None, call=None):\n if call != 'action':\n raise SaltCloudSystemExit(\n 'The vm_action function must be called with -a or --action.'\n )\n if kwargs is None:\n kwargs = {}\n action = kwargs.get('action', None)\n if action is None:\n raise SaltCloudSystemExit(\n 'The vm_action function must have an \\'action\\' provided.'\n )\n server, user, password = _get_xml_rpc()\n auth = ':'.join([user, password])\n vm_id = int(get_vm_id(kwargs={'name': name}))\n response = server.one.vm.action(auth, action, vm_id)\n data = {\n 'action': 'vm.action.' + six.text_type(action),\n 'actioned': response[0],\n 'vm_id': response[1],\n 'error_code': response[2],\n }\n return data", - "docstring": "Submits an action to be performed on a given virtual machine.\n\n .. versionadded:: 2016.3.0\n\n name\n The name of the VM to action.\n\n action\n The action to be performed on the VM. Available options include:\n - boot\n - delete\n - delete-recreate\n - hold\n - poweroff\n - poweroff-hard\n - reboot\n - reboot-hard\n - release\n - resched\n - resume\n - shutdown\n - shutdown-hard\n - stop\n - suspend\n - undeploy\n - undeploy-hard\n - unresched\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -a vm_action my-vm action='release'" - }, - { - "code": "def add_listener(self, on_lifecycle_change):\n id = str(uuid.uuid4())\n self._listeners[id] = on_lifecycle_change\n return id", - "docstring": "Add a listener object to listen for lifecycle events.\n\n :param on_lifecycle_change: (Function), function to be called when LifeCycle state is changed.\n :return: (str), id of the listener." - }, - { - "code": "def get_as_datadict(self):\n return dict(type=self.__class__.__name__, tags=list(self.tags))", - "docstring": "Get information about this object as a dictionary. Used by WebSocket interface to pass some\n relevant information to client applications." - }, - { - "code": "def rytov_sc(radius=5e-6, sphere_index=1.339, medium_index=1.333,\n wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80),\n center=(39.5, 39.5), radius_sampling=42):\n r\n r_ryt, n_ryt = correct_rytov_sc_input(radius_sc=radius,\n sphere_index_sc=sphere_index,\n medium_index=medium_index,\n radius_sampling=radius_sampling)\n qpi = mod_rytov.rytov(radius=r_ryt,\n sphere_index=n_ryt,\n medium_index=medium_index,\n wavelength=wavelength,\n pixel_size=pixel_size,\n grid_size=grid_size,\n center=center,\n radius_sampling=radius_sampling)\n qpi[\"sim radius\"] = radius\n qpi[\"sim index\"] = sphere_index\n qpi[\"sim model\"] = \"rytov-sc\"\n return qpi", - "docstring": "r\"\"\"Field behind a dielectric sphere, systematically corrected Rytov\n\n This method implements a correction of\n :func:`qpsphere.models.rytov`, where the\n `radius` :math:`r_\\text{Ryt}` and the `sphere_index`\n :math:`n_\\text{Ryt}` are corrected using\n the approach described in :cite:`Mueller2018` (eqns. 3,4, and 5).\n\n .. math::\n\n n_\\text{Ryt-SC} &= n_\\text{Ryt} + n_\\text{med} \\cdot\n \\left( a_n x^2 + b_n x + c_n \\right)\n\n r_\\text{Ryt-SC} &= r_\\text{Ryt} \\cdot\n \\left( a_r x^2 +b_r x + c_r \\right)\n\n &\\text{with} x = \\frac{n_\\text{Ryt}}{n_\\text{med}} - 1\n\n The correction factors are given in\n :data:`qpsphere.models.mod_rytov_sc.RSC_PARAMS`.\n\n Parameters\n ----------\n radius: float\n Radius of the sphere [m]\n sphere_index: float\n Refractive index of the sphere\n medium_index: float\n Refractive index of the surrounding medium\n wavelength: float\n Vacuum wavelength of the imaging light [m]\n pixel_size: float\n Pixel size [m]\n grid_size: tuple of floats\n Resulting image size in x and y [px]\n center: tuple of floats\n Center position in image coordinates [px]\n radius_sampling: int\n Number of pixels used to sample the sphere radius when\n computing the Rytov field. The default value of 42\n pixels is a reasonable number for single-cell analysis.\n\n Returns\n -------\n qpi: qpimage.QPImage\n Quantitative phase data set" - }, - { - "code": "def fmt_duration(secs):\n return ' '.join(fmt.human_duration(secs, 0, precision=2, short=True).strip().split())", - "docstring": "Format a duration in seconds." - }, - { - "code": "def getVersion(init_file):\n try:\n return os.environ['BUILDBOT_VERSION']\n except KeyError:\n pass\n try:\n cwd = os.path.dirname(os.path.abspath(init_file))\n fn = os.path.join(cwd, 'VERSION')\n with open(fn) as f:\n return f.read().strip()\n except IOError:\n pass\n version = getVersionFromArchiveId()\n if version is not None:\n return version\n try:\n p = Popen(['git', 'describe', '--tags', '--always'], stdout=PIPE, stderr=STDOUT, cwd=cwd)\n out = p.communicate()[0]\n if (not p.returncode) and out:\n v = gitDescribeToPep440(str(out))\n if v:\n return v\n except OSError:\n pass\n try:\n return mTimeVersion(init_file)\n except Exception:\n return \"latest\"", - "docstring": "Return BUILDBOT_VERSION environment variable, content of VERSION file, git\n tag or 'latest'" - }, - { - "code": "def _close(self, e):\n self.stop()\n self.sock.close()\n self.closed = True\n self.close_cb(e)", - "docstring": "Really close the transport with a reason.\n\n e -- reason the socket is being closed." - }, - { - "code": "def get_neighbors_of_site_with_index(struct, n, approach=\"min_dist\", delta=0.1, \\\n cutoff=10.0):\n if approach == \"min_dist\":\n return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(\n struct, n)\n elif approach == \"voronoi\":\n return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(\n struct, n)\n elif approach == \"min_OKeeffe\":\n return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(\n struct, n)\n elif approach == \"min_VIRE\":\n return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(\n struct, n)\n else:\n raise RuntimeError(\"unsupported neighbor-finding method ({}).\".format(\n approach))", - "docstring": "Returns the neighbors of a given site using a specific neighbor-finding\n method.\n\n Args:\n struct (Structure): input structure.\n n (int): index of site in Structure object for which motif type\n is to be determined.\n approach (str): type of neighbor-finding approach, where\n \"min_dist\" will use the MinimumDistanceNN class,\n \"voronoi\" the VoronoiNN class, \"min_OKeeffe\" the\n MinimumOKeeffe class, and \"min_VIRE\" the MinimumVIRENN class.\n delta (float): tolerance involved in neighbor finding.\n cutoff (float): (large) radius to find tentative neighbors.\n\n Returns: neighbor sites." - }, - { - "code": "def replace_values(in_m, out_m, map_from=(), map_to=()):\n for link in in_m.match():\n new_link = list(link)\n if map_from:\n if link[ORIGIN] in map_from: new_link[ORIGIN] = map_to[map_from.index(link[ORIGIN])]\n new_link[ATTRIBUTES] = link[ATTRIBUTES].copy()\n out_m.add(*new_link)\n return", - "docstring": "Make a copy of a model with one value replaced with another" - }, - { - "code": "def run(cmd, filename=None, threads=True, verbose=False):\n _run(threads, verbose, 'run', filename, cmd)", - "docstring": "Similar to profile.run ." - }, - { - "code": "def dumps(x, float_bits=DEFAULT_FLOAT_BITS):\n with lock:\n if float_bits == 32:\n encode_func[float] = encode_float32\n elif float_bits == 64:\n encode_func[float] = encode_float64\n else:\n raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)\n r = []\n encode_func[type(x)](x, r)\n return b''.join(r)", - "docstring": "Dump data structure to str.\n\n Here float_bits is either 32 or 64." - }, - { - "code": "def _machine_bytes():\n machine_hash = hashlib.md5()\n if PY3:\n machine_hash.update(socket.gethostname().encode())\n else:\n machine_hash.update(socket.gethostname())\n return machine_hash.digest()[0:3]", - "docstring": "Get the machine portion of an ObjectId." - }, - { - "code": "def _lookup_global(self, symbol):\n assert symbol.parts\n namespace = self.namespaces\n if len(symbol.parts) == 1:\n namespace = self.namespaces[None]\n try:\n return self._lookup_namespace(symbol, namespace)\n except Error as orig_exc:\n try:\n namespace = self.namespaces[None]\n return self._lookup_namespace(symbol, namespace)\n except Error:\n raise orig_exc", - "docstring": "Helper for lookup_symbol that only looks up global variables.\n\n Args:\n symbol: Symbol" - }, - { - "code": "def serialize_attrs(self, *args):\n cls = type(self)\n result = {}\n for a in args:\n if hasattr(cls, a) and a not in cls.attrs_forbidden_for_serialization():\n val = getattr(self, a)\n if is_list_like(val):\n result[a] = list(val)\n else:\n result[a] = val\n return result", - "docstring": "Converts and instance to a dictionary with only the specified\n attributes as keys\n\n Args:\n *args (list): The attributes to serialize\n\n Examples:\n\n >>> customer = Customer.create(name=\"James Bond\", email=\"007@mi.com\",\n phone=\"007\", city=\"London\")\n >>> customer.serialize_attrs('name', 'email')\n {'name': u'James Bond', 'email': u'007@mi.com'}" - }, - { - "code": "def find_one(self, **kwargs):\n future = TracebackFuture()\n def handle_response(result, error):\n if error:\n future.set_exception(error)\n else:\n instance = self.__entity()\n instance.map_dict(result)\n future.set_result(instance)\n self.__collection.find_one(kwargs, callback=handle_response)\n return future", - "docstring": "Returns future.\n\n Executes collection's find_one method based on keyword args\n maps result ( dict to instance ) and return future\n\n Example::\n\n manager = EntityManager(Product)\n product_saved = yield manager.find_one(_id=object_id)" - }, - { - "code": "def full(self, external=False):\n return self.fs.url(self.filename, external=external) if self.filename else None", - "docstring": "Get the full image URL in respect with ``max_size``" - }, - { - "code": "def get_factory(self, factory, default='File'):\n name = default\n try:\n is_node = issubclass(factory, SCons.Node.FS.Base)\n except TypeError:\n pass\n else:\n if is_node:\n try: name = factory.__name__\n except AttributeError: pass\n else: factory = None\n if not factory:\n factory = getattr(self.fs, name)\n return factory", - "docstring": "Return a factory function for creating Nodes for this\n construction environment." - }, - { - "code": "def to_json(self):\n self.logger.debug(\"Returning json info\")\n individual_info = {\n 'family_id': self.family,\n 'id':self.individual_id, \n 'sex':str(self.sex), \n 'phenotype': str(self.phenotype), \n 'mother': self.mother, \n 'father': self.father,\n 'extra_info': self.extra_info\n }\n return individual_info", - "docstring": "Return the individual info in a dictionary for json." - }, - { - "code": "def promote_pipeline(conf, args):\n src = conf.config['instances'][args.src_instance]\n src_url = api.build_pipeline_url(build_instance_url(src))\n src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])\n verify_ssl = src.get('verify_ssl', True)\n export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)\n dest = conf.config['instances'][args.dest_instance]\n dest_url = api.build_pipeline_url(build_instance_url(dest))\n dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])\n dest_pipeline_id = args.dest_pipeline_id\n if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:\n api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)\n else:\n create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)\n dest_pipeline_id = create_json['info']['pipelineId']\n result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)\n if args.start_dest:\n api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)\n return result", - "docstring": "Export a pipeline from a lower environment and import into higher environment." - }, - { - "code": "def check_node(self, tup_tree, nodename, required_attrs=None,\n optional_attrs=None, allowed_children=None,\n allow_pcdata=False):\n if name(tup_tree) != nodename:\n raise CIMXMLParseError(\n _format(\"Unexpected element {0!A} (expecting element {1!A})\",\n name(tup_tree), nodename),\n conn_id=self.conn_id)\n tt_attrs = {}\n if attrs(tup_tree) is not None:\n tt_attrs = attrs(tup_tree).copy()\n if required_attrs:\n for attr in required_attrs:\n if attr not in tt_attrs:\n raise CIMXMLParseError(\n _format(\"Element {0!A} missing required attribute \"\n \"{1!A} (only has attributes {2!A})\",\n name(tup_tree), attr, attrs(tup_tree).keys()),\n conn_id=self.conn_id)\n del tt_attrs[attr]\n if optional_attrs:\n for attr in optional_attrs:\n if attr in tt_attrs:\n del tt_attrs[attr]\n if tt_attrs:\n raise CIMXMLParseError(\n _format(\"Element {0!A} has invalid attribute(s) {1!A}\",\n name(tup_tree), tt_attrs.keys()),\n conn_id=self.conn_id)\n if allowed_children is not None:\n invalid_children = []\n for child in kids(tup_tree):\n if name(child) not in allowed_children:\n invalid_children.append(name(child))\n if invalid_children:\n if not allowed_children:\n allow_txt = \"no child elements are allowed\"\n else:\n allow_txt = _format(\"allowed are child elements {0!A}\",\n allowed_children)\n raise CIMXMLParseError(\n _format(\"Element {0!A} has invalid child element(s) \"\n \"{1!A} ({2})\",\n name(tup_tree), set(invalid_children), allow_txt),\n conn_id=self.conn_id)\n if not allow_pcdata:\n for child in tup_tree[2]:\n if isinstance(child, six.string_types):\n if child.lstrip(' \\t\\n') != '':\n raise CIMXMLParseError(\n _format(\"Element {0!A} has unexpected non-blank \"\n \"text content {1!A}\",\n name(tup_tree), child),\n conn_id=self.conn_id)", - "docstring": "Check static local constraints on a tuple tree node.\n\n The node must have the given nodename.\n\n Required_attrs is a list/tuple of attribute names that must be present.\n None means the same as an empty list: No attributes are required.\n\n Optional_attrs is a list/tuple of attribute names that may be present.\n None means the same as an empty list: No attributes are optional.\n\n Present attributes is a list/tuple of attributes that are neither\n required nor optional, are rejected.\n\n If allowed_children is not None, it is a list/tuple where the node may\n have children of the given types. It can be [] for nodes that may not\n have any children. If it's None, no validation of the children is\n performed.\n\n If allow_pcdata is True, then non-whitespace text nodes are allowed as\n children. (Whitespace text nodes are always allowed as children.)" - }, - { - "code": "def splitFile(inputFileName, linePerFile, outPrefix):\n nbTmpFile = 1\n nbLine = 0\n tmpFile = None\n try:\n with open(inputFileName, \"r\") as inputFile:\n for line in inputFile:\n row = line.rstrip(\"\\r\\n\").split(\" \")\n nbLine += 1\n if tmpFile is None:\n try:\n tmpFile = open(\n outPrefix + \"_tmp.list%d\" % nbTmpFile,\n \"w\",\n )\n except IOError:\n msg = \"tmp.list%d: can't write file\" % nbTmpFile\n raise ProgramError(msg)\n print >>tmpFile, \" \".join(row[:2])\n if nbLine == linePerFile:\n nbLine = 0\n nbTmpFile += 1\n tmpFile.close()\n try:\n tmpFile = open(\n outPrefix + \"_tmp.list%d\" % nbTmpFile,\n \"w\",\n )\n except IOError:\n msg = \"tmp.list%d: can't write file\" % nbTmpFile\n raise ProgramError(msg)\n tmpFile.close()\n if nbLine == 0:\n file_name = outPrefix + \"_tmp.list{}\".format(nbTmpFile)\n if os.path.isfile(file_name):\n os.remove(file_name)\n nbTmpFile -= 1\n except IOError:\n msg = \"%s: no such file\" % inputFileName\n raise ProgramError(msg)\n return nbTmpFile", - "docstring": "Split a file.\n\n :param inputFileName: the name of the input file.\n :param linePerFile: the number of line per file (after splitting).\n :param outPrefix: the prefix of the output files.\n\n :type inputFileName: str\n :type linePerFile: int\n :type outPrefix: str\n\n :returns: the number of created temporary files.\n\n Splits a file (``inputFileName`` into multiple files containing at most\n ``linePerFile`` lines." - }, - { - "code": "def send(self, cmd, *payload):\n if not self._sock:\n raise ConnectionClosed(\"Connection closed\")\n msg = json.dumps(dict(cmd=cmd, payload=payload)) + '\\n'\n try:\n self._sock.sendall(msg)\n except socket.error:\n e_type, e_value, e_tb = sys.exc_info()\n self.close()\n raise e_type, e_value, e_tb", - "docstring": "Send a command message to the other end.\n\n :param cmd: The command to send to the other end.\n :param payload: The command payload. Note that all elements\n of the payload must be serializable to JSON." - }, - { - "code": "def _write(self, request):\n with sw(\"serialize_request\"):\n request_str = request.SerializeToString()\n with sw(\"write_request\"):\n with catch_websocket_connection_errors():\n self._sock.send(request_str)", - "docstring": "Actually serialize and write the request." - }, - { - "code": "def _fits_surface(self, width, height):\n assert(width > 0 and height > 0)\n if self.rot and (width > self.width or height > self.height):\n width, height = height, width\n if width > self.width or height > self.height:\n return False\n else:\n return True", - "docstring": "Test surface is big enough to place a rectangle\n\n Arguments:\n width (int, float): Rectangle width\n height (int, float): Rectangle height\n\n Returns:\n boolean: True if it could be placed, False otherwise" - }, - { - "code": "def _sitelist(self, matrix):\n _list = []\n for item in matrix:\n sites = []\n if isinstance(matrix[item], list):\n sites = matrix[item]\n elif isinstance(matrix[item], dict):\n sites = matrix[item]['site']\n for site in sites:\n if len(site.keys()) > 4:\n continue\n domain = self.params.get('domain')\n if domain:\n if domain in site['url']:\n _list.append(site['url'])\n else:\n _list.append(site['url'])\n return _list", - "docstring": "Returns a list of sites from a SiteMatrix, optionally filtered\n by 'domain' param" - }, - { - "code": "def prepare_callable(self, fn, partial=False):\n notes, keyword_notes = self.get_annotations(fn)\n return self.prepare_notes(*notes, __partial=partial, **keyword_notes)", - "docstring": "Prepare arguments required to apply function." - }, - { - "code": "async def helo(\n self, hostname: str = None, timeout: DefaultNumType = _default\n ) -> SMTPResponse:\n if hostname is None:\n hostname = self.source_address\n async with self._command_lock:\n response = await self.execute_command(\n b\"HELO\", hostname.encode(\"ascii\"), timeout=timeout\n )\n self.last_helo_response = response\n if response.code != SMTPStatus.completed:\n raise SMTPHeloError(response.code, response.message)\n return response", - "docstring": "Send the SMTP HELO command.\n Hostname to send for this command defaults to the FQDN of the local\n host.\n\n :raises SMTPHeloError: on unexpected server response code" - }, - { - "code": "def login(self, return_to=None, force_authn=False, is_passive=False, set_nameid_policy=True, name_id_value_req=None):\n authn_request = OneLogin_Saml2_Authn_Request(self.__settings, force_authn, is_passive, set_nameid_policy, name_id_value_req)\n self.__last_request = authn_request.get_xml()\n self.__last_request_id = authn_request.get_id()\n saml_request = authn_request.get_request()\n parameters = {'SAMLRequest': saml_request}\n if return_to is not None:\n parameters['RelayState'] = return_to\n else:\n parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)\n security = self.__settings.get_security_data()\n if security.get('authnRequestsSigned', False):\n parameters['SigAlg'] = security['signatureAlgorithm']\n parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])\n return self.redirect_to(self.get_sso_url(), parameters)", - "docstring": "Initiates the SSO process.\n\n :param return_to: Optional argument. The target URL the user should be redirected to after login.\n :type return_to: string\n\n :param force_authn: Optional argument. When true the AuthNRequest will set the ForceAuthn='true'.\n :type force_authn: bool\n\n :param is_passive: Optional argument. When true the AuthNRequest will set the Ispassive='true'.\n :type is_passive: bool\n\n :param set_nameid_policy: Optional argument. When true the AuthNRequest will set a nameIdPolicy element.\n :type set_nameid_policy: bool\n\n :param name_id_value_req: Optional argument. Indicates to the IdP the subject that should be authenticated\n :type name_id_value_req: string\n\n :returns: Redirection URL\n :rtype: string" - }, - { - "code": "def json_get(parsed_json, key):\n if key not in parsed_json:\n raise ValueError(\"JSON does not contain a {} field\".format(key))\n return parsed_json[key]", - "docstring": "Retrieves the key from a parsed_json dictionary, or raises an exception if the\n key is not present" - }, - { - "code": "def get_next_state(self, state, ret, oper):\n if oper == fw_const.FW_CR_OP:\n return self.get_next_create_state(state, ret)\n else:\n return self.get_next_del_state(state, ret)", - "docstring": "Returns the next state for a create or delete operation." - }, - { - "code": "def stop(self):\n self.logger.info('Stopping client fuzzer')\n self._target_control_thread.stop()\n self.target.signal_mutated()\n super(ClientFuzzer, self).stop()", - "docstring": "Stop the fuzzing session" - }, - { - "code": "def setCursorSize(self, p):\n 'sets width based on diagonal corner p'\n self.cursorBox = BoundingBox(self.cursorBox.xmin, self.cursorBox.ymin, p.x, p.y)\n self.cursorBox.w = max(self.cursorBox.w, self.canvasCharWidth)\n self.cursorBox.h = max(self.cursorBox.h, self.canvasCharHeight)", - "docstring": "sets width based on diagonal corner p" - }, - { - "code": "def makeMet(segID, N, CA, C, O, geo):\n CA_CB_length=geo.CA_CB_length\n C_CA_CB_angle=geo.C_CA_CB_angle\n N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle\n CB_CG_length=geo.CB_CG_length\n CA_CB_CG_angle=geo.CA_CB_CG_angle\n N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle\n CG_SD_length=geo.CG_SD_length\n CB_CG_SD_angle=geo.CB_CG_SD_angle\n CA_CB_CG_SD_diangle=geo.CA_CB_CG_SD_diangle\n SD_CE_length=geo.SD_CE_length\n CG_SD_CE_angle=geo.CG_SD_CE_angle\n CB_CG_SD_CE_diangle=geo.CB_CG_SD_CE_diangle\n carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)\n CB= Atom(\"CB\", carbon_b, 0.0 , 1.0, \" \",\" CB\", 0,\"C\")\n carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)\n CG= Atom(\"CG\", carbon_g, 0.0, 1.0, \" \", \" CG\", 0, \"C\")\n sulfur_d= calculateCoordinates(CA, CB, CG, CG_SD_length, CB_CG_SD_angle, CA_CB_CG_SD_diangle)\n SD= Atom(\"SD\", sulfur_d, 0.0, 1.0, \" \", \" SD\", 0, \"S\")\n carbon_e= calculateCoordinates(CB, CG, SD, SD_CE_length, CG_SD_CE_angle, CB_CG_SD_CE_diangle)\n CE= Atom(\"CE\", carbon_e, 0.0, 1.0, \" \", \" CE\", 0, \"C\")\n res= Residue((' ', segID, ' '), \"MET\", ' ')\n res.add(N)\n res.add(CA)\n res.add(C)\n res.add(O)\n res.add(CB)\n res.add(CG)\n res.add(SD)\n res.add(CE)\n return res", - "docstring": "Creates a Methionine residue" - }, - { - "code": "def _get_tls_object(self, ssl_params):\n if ssl_params is None:\n return None\n if not ssl_params[\"verify\"] and ssl_params[\"ca_certs\"]:\n self.warning(\n \"Incorrect configuration: trying to disable server certificate validation, \"\n \"while also specifying a capath. No validation will be performed. Fix your \"\n \"configuration to remove this warning\"\n )\n validate = ssl.CERT_REQUIRED if ssl_params[\"verify\"] else ssl.CERT_NONE\n if ssl_params[\"ca_certs\"] is None or os.path.isfile(ssl_params[\"ca_certs\"]):\n tls = ldap3.core.tls.Tls(\n local_private_key_file=ssl_params[\"key\"],\n local_certificate_file=ssl_params[\"cert\"],\n ca_certs_file=ssl_params[\"ca_certs\"],\n version=ssl.PROTOCOL_SSLv23,\n validate=validate,\n )\n elif os.path.isdir(ssl_params[\"ca_certs\"]):\n tls = ldap3.core.tls.Tls(\n local_private_key_file=ssl_params[\"key\"],\n local_certificate_file=ssl_params[\"cert\"],\n ca_certs_path=ssl_params[\"ca_certs\"],\n version=ssl.PROTOCOL_SSLv23,\n validate=validate,\n )\n else:\n raise ConfigurationError(\n 'Invalid path {} for ssl_ca_certs: no such file or directory'.format(ssl_params['ca_certs'])\n )\n return tls", - "docstring": "Return a TLS object to establish a secure connection to a server" - }, - { - "code": "def _do_highlight(content, query, tag='em'):\n for term in query:\n term = term.decode('utf-8')\n for match in re.findall('[^A-Z]+', term):\n match_re = re.compile(match, re.I)\n content = match_re.sub('<%s>%s' % (tag, term, tag), content)\n return content", - "docstring": "Highlight `query` terms in `content` with html `tag`.\n\n This method assumes that the input text (`content`) does not contain\n any special formatting. That is, it does not contain any html tags\n or similar markup that could be screwed up by the highlighting.\n\n Required arguments:\n `content` -- Content to search for instances of `text`\n `text` -- The text to be highlighted" - }, - { - "code": "async def do_teardown_request(\n self,\n exc: Optional[BaseException],\n request_context: Optional[RequestContext]=None,\n ) -> None:\n request_ = (request_context or _request_ctx_stack.top).request\n functions = self.teardown_request_funcs[None]\n blueprint = request_.blueprint\n if blueprint is not None:\n functions = chain(functions, self.teardown_request_funcs[blueprint])\n for function in functions:\n await function(exc=exc)\n await request_tearing_down.send(self, exc=exc)", - "docstring": "Teardown the request, calling the teardown functions.\n\n Arguments:\n exc: Any exception not handled that has caused the request\n to teardown.\n request_context: The request context, optional as Flask\n omits this argument." - }, - { - "code": "def is_gzippable(self, path):\n if not getattr(settings, 'BAKERY_GZIP', False):\n return False\n whitelist = getattr(\n settings,\n 'GZIP_CONTENT_TYPES',\n DEFAULT_GZIP_CONTENT_TYPES\n )\n return mimetypes.guess_type(path)[0] in whitelist", - "docstring": "Returns a boolean indicating if the provided file path is a candidate\n for gzipping." - }, - { - "code": "def transcript_associated_plot (self):\n keys = OrderedDict()\n keys['Exonic Rate'] = { 'name': 'Exonic', 'color': '\n keys['Intronic Rate'] = { 'name': 'Intronic', 'color': '\n keys['Intergenic Rate'] = { 'name': 'Intergenic', 'color': '\n pconfig = {\n 'id': 'rna_seqc_position_plot',\n 'title': 'RNA-SeQC: Transcript-associated reads',\n 'ylab': 'Ratio of Reads',\n 'cpswitch': False,\n 'ymax': 1,\n 'ymin': 0,\n 'tt_decimals': 3,\n 'cpswitch_c_active': False\n }\n self.add_section (\n name = 'Transcript-associated reads',\n anchor = 'Transcript_associated',\n helptext = 'All of the above rates are per mapped read. Exonic Rate is the fraction mapping within exons. '\n 'Intronic Rate is the fraction mapping within introns. '\n 'Intergenic Rate is the fraction mapping in the genomic space between genes. ',\n plot = bargraph.plot(self.rna_seqc_metrics, keys, pconfig)\n )", - "docstring": "Plot a bargraph showing the Transcript-associated reads" - }, - { - "code": "def interpolate(x, scale=None, output_size=None, mode='linear', align_corners=None):\n from .function_bases import interpolate as interpolate_base\n import math\n if scale is None and output_size is None:\n raise ValueError('Either scale or output_size must be given')\n elif output_size is None:\n output_size = [int(math.floor(s * d))\n for d, s in zip(x.shape[-len(scale):], scale)]\n if align_corners is None:\n if mode == 'linear':\n align_corners = True\n else:\n align_corners = False\n return interpolate_base(x, output_size, mode, align_corners)", - "docstring": "Resize an ND array with interpolation.\n\n Scaling factors for spatial dimensions are determined by either\n ``scale`` or ``output_size``.\n\n ``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of\n spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are \n considered as the spatial dimensions to be resized.\n\n\n If ``scale`` is given, the ``output_size`` is calculated by\n ``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``.\n\n Example:\n\n .. code-block:: python\n\n import numpy as np\n import nnabla as nn\n import nnabla.functions as F\n\n x_data = np.random.rand(64, 3, 224, 224)\n x = nn.Variable.from_numpy_array(x_data)\n\n # Resize by scales\n y = F.interpolate(x, scale=(2, 2), mode='linear')\n print(y.shape) # (64, 3, 448, 448)\n y.forward()\n print(y.d) # Print output\n\n # Resize to a size\n y2 = F.interpolate(x, output_size=(320, 257), mode='linear')\n print(y2.shape) # (64, 3, 320, 257)\n y2.forward()\n print(y2.d) # Print output\n\n Args:\n x(~nnabla.Variable): N-D array with an arbitrary number of dimensions.\n scale(tuple of ints): Scale factors along axes. The default is\n ``None``, and if this is omitted, ``output_size`` must be specified.\n output_size(tuple of ints): The output sizes for axes. If this is\n given, the scale factors are determined by the output sizes and the\n input sizes. The default is ``None``, and if this is omitted,\n ``scale`` must be specified.\n mode(str): Interpolation mode chosen from ('linear'|'nearest').\n The default is 'linear'.\n align_corners(bool): If true, the corner pixels of input and output\n arrays are aligned, such that the output corner pixels have the\n same values with the input corner pixels.\n The default is ``None``, and it becomes ``True`` if mode is\n 'linear', otherwise ``False``.\n\n Returns:\n ~nnabla.Variable: N-D array." - }, - { - "code": "def at(self, row, col):\n if not (isinstance(row, int) and isinstance(col, int)):\n raise TypeError(row, col)\n return self._values[row][col]", - "docstring": "Return the value at the given cell position.\n\n Args:\n row (int): zero-based row number\n col (int): zero-based column number\n Returns:\n cell value\n Raises:\n TypeError: if ``row`` or ``col`` is not an ``int``\n IndexError: if the position is out of range" - }, - { - "code": "def updateEditorGeometry(self, editor, option, index):\n super(WidgetDelegate, self).updateEditorGeometry(editor, option, index)\n editor.setGeometry(option.rect)\n if self.keep_editor_size:\n esh = editor.sizeHint()\n osh = option.rect.size()\n w = osh.width() if osh.width() > esh.width() else esh.width()\n h = osh.height() if osh.height() > esh.height() else esh.height()\n editor.resize(w, h)", - "docstring": "Make sure the editor is the same size as the widget\n\n By default it can get smaller because does not expand over viewport size.\n This will make sure it will resize to the same size as the widget.\n\n :param editor: the editor to update\n :type editor: :class:`QtGui.QWidget`\n :param option: the options for painting\n :type option: QtGui.QStyleOptionViewItem\n :param index: the index to paint\n :type index: QtCore.QModelIndex\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def convert_the_args(raw_args):\n if not raw_args:\n return \"\"\n if isinstance(raw_args,dict):\n out_args = \", \".join([ \"{}={}\".format(k,v) for k,v in raw_args.iteritems() ])\n elif isinstance(raw_args,(list,tuple)):\n new_list = []\n for x in raw_args:\n if isinstance(x,basestring):\n new_list.append(x)\n elif isinstance(x,dict):\n new_list.append( \", \".join([ \"{}={}\".format(k,v) for k,v in x.iteritems() ]) )\n else:\n raise ValueError(\"Error preparing the getters\")\n out_args = \", \".join(new_list)\n else:\n raise ValueError(\"Couldn't recognize list of getters\")\n return out_args", - "docstring": "Function used to convert the arguments of methods" - }, - { - "code": "def load_forth_commands(self, help_dir):\n try:\n help_file_path = os.path.join(help_dir, 'roboforth.txt')\n commands, help_text = self.parse_help_text(help_file_path)\n except IOError:\n print(self.style.warn('Warning: ',\n 'Failed to load ROBOFORTH help.'))\n return\n self.commands['forth'] = commands\n self.help['forth'] = '\\n'.join([self.style.theme('Forth Commands'),\n help_text])", - "docstring": "Load completion list for ROBOFORTH commands." - }, - { - "code": "def from_callback(cls, cb, transf_cbs, nx, nparams=0, pre_adj=None,\n **kwargs):\n be = Backend(kwargs.pop('backend', None))\n x = be.real_symarray('x', nx)\n p = be.real_symarray('p', nparams)\n try:\n transf = [(transf_cbs[idx][0](xi),\n transf_cbs[idx][1](xi))\n for idx, xi in enumerate(x)]\n except TypeError:\n transf = zip(_map2(transf_cbs[0], x), _map2(transf_cbs[1], x))\n try:\n exprs = cb(x, p, be)\n except TypeError:\n exprs = _ensure_3args(cb)(x, p, be)\n return cls(x, _map2l(pre_adj, exprs), transf, p, backend=be, **kwargs)", - "docstring": "Generate a TransformedSys instance from a callback\n\n Parameters\n ----------\n cb : callable\n Should have the signature ``cb(x, p, backend) -> list of exprs``.\n The callback ``cb`` should return *untransformed* expressions.\n transf_cbs : pair or iterable of pairs of callables\n Callables for forward- and backward-transformations. Each\n callable should take a single parameter (expression) and\n return a single expression.\n nx : int\n Number of unkowns.\n nparams : int\n Number of parameters.\n pre_adj : callable, optional\n To tweak expression prior to transformation. Takes a\n sinlge argument (expression) and return a single argument\n rewritten expression.\n \\\\*\\\\*kwargs :\n Keyword arguments passed on to :class:`TransformedSys`. See also\n :class:`SymbolicSys` and :class:`pyneqsys.NeqSys`.\n\n Examples\n --------\n >>> import sympy as sp\n >>> transformed = TransformedSys.from_callback(lambda x, p, be: [\n ... x[0]*x[1] - p[0],\n ... be.exp(-x[0]) + be.exp(-x[1]) - p[0]**-2\n ... ], (sp.log, sp.exp), 2, 1)\n ..." - }, - { - "code": "def file2abspath(filename, this_file=__file__):\n return os.path.abspath(\n os.path.join(os.path.dirname(os.path.abspath(this_file)), filename))", - "docstring": "generate absolute path for the given file and base dir" - }, - { - "code": "def connect(\n self,\n host_or_hosts,\n port_or_ports=7051,\n rpc_timeout=None,\n admin_timeout=None,\n ):\n self.client = kudu.connect(\n host_or_hosts,\n port_or_ports,\n rpc_timeout_ms=rpc_timeout,\n admin_timeout_ms=admin_timeout,\n )", - "docstring": "Pass-through connection interface to the Kudu client\n\n Parameters\n ----------\n host_or_hosts : string or list of strings\n If you have multiple Kudu masters for HA, pass a list\n port_or_ports : int or list of int, default 7051\n If you pass multiple host names, pass multiple ports\n rpc_timeout : kudu.TimeDelta\n See Kudu client documentation for details\n admin_timeout : kudu.TimeDelta\n See Kudu client documentation for details\n\n Returns\n -------\n None" - }, - { - "code": "def write_moc_fits(moc, filename, **kwargs):\n tbhdu = write_moc_fits_hdu(moc)\n prihdr = fits.Header()\n prihdu = fits.PrimaryHDU(header=prihdr)\n hdulist = fits.HDUList([prihdu, tbhdu])\n hdulist.writeto(filename, **kwargs)", - "docstring": "Write a MOC as a FITS file.\n\n Any additional keyword arguments are passed to the\n astropy.io.fits.HDUList.writeto method." - }, - { - "code": "def train_df(self, df, drop=False):\n if (len(df) == 0) or (len(self) == 0):\n return df\n for sc in self:\n sc.train_df(df)\n return df", - "docstring": "Train scales from a dataframe" - }, - { - "code": "def initialize_extensions(shell, extensions):\n try:\n iter(extensions)\n except TypeError:\n pass\n else:\n for ext in extensions:\n try:\n shell.extension_manager.load_extension(ext)\n except:\n ipy_utils.warn.warn(\n \"Error in loading extension: %s\" % ext +\n \"\\nCheck your config files in %s\" % ipy_utils.path.get_ipython_dir())\n shell.showtraceback()", - "docstring": "Partial copy of `InteractiveShellApp.init_extensions` from IPython." - }, - { - "code": "def hide(self):\n self.tk.withdraw()\n self._visible = False\n if self._modal:\n self.tk.grab_release()", - "docstring": "Hide the window." - }, - { - "code": "def inspiral_range_psd(psd, snr=8, mass1=1.4, mass2=1.4, horizon=False):\n mass1 = units.Quantity(mass1, 'solMass').to('kg')\n mass2 = units.Quantity(mass2, 'solMass').to('kg')\n mtotal = mass1 + mass2\n mchirp = (mass1 * mass2) ** (3/5.) / mtotal ** (1/5.)\n fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')\n prefactor = (\n (1.77**2 * 5 * constants.c ** (1/3.) *\n (mchirp * constants.G / constants.c ** 2) ** (5/3.)) /\n (96 * pi ** (4/3.) * snr ** 2)\n )\n integrand = 1 / psd * psd.frequencies ** (-7/3.) * prefactor\n integrand = integrand[psd.frequencies.value < fisco.value]\n if integrand.f0.value == 0.0:\n integrand[0] = 0.0\n if horizon:\n integrand *= 2.26 ** 2\n return integrand.to('Mpc^2 / Hz')", - "docstring": "Compute the inspiral sensitive distance PSD from a GW strain PSD\n\n Parameters\n ----------\n psd : `~gwpy.frequencyseries.FrequencySeries`\n the instrumental power-spectral-density data\n\n snr : `float`, optional\n the signal-to-noise ratio for which to calculate range,\n default: `8`\n\n mass1 : `float`, `~astropy.units.Quantity`, optional\n the mass (`float` assumed in solar masses) of the first binary\n component, default: `1.4`\n\n mass2 : `float`, `~astropy.units.Quantity`, optional\n the mass (`float` assumed in solar masses) of the second binary\n component, default: `1.4`\n\n horizon : `bool`, optional\n if `True`, return the maximal 'horizon' sensitive distance, otherwise\n return the angle-averaged range, default: `False`\n\n Returns\n -------\n rspec : `~gwpy.frequencyseries.FrequencySeries`\n the calculated inspiral sensitivity PSD [Mpc^2 / Hz]" - }, - { - "code": "def sort_vid_split(vs):\n match = var_re.match(vs)\n if match is None:\n raise ValueError('Invalid variable string: {}'.format(str(vs)))\n else:\n return match.groups()", - "docstring": "Split a valid variable string into its variable sort and id.\n\n Examples:\n >>> sort_vid_split('h3')\n ('h', '3')\n >>> sort_vid_split('ref-ind12')\n ('ref-ind', '12')" - }, - { - "code": "def watch(logger_name, level=DEBUG, out=stdout):\n watcher = Watcher(logger_name)\n watcher.watch(level, out)\n return watcher", - "docstring": "Quick wrapper for using the Watcher.\n\n :param logger_name: name of logger to watch\n :param level: minimum log level to show (default INFO)\n :param out: where to send output (default stdout)\n :return: Watcher instance" - }, - { - "code": "def EXPIRING_TOKEN_LIFESPAN(self):\n try:\n val = settings.EXPIRING_TOKEN_LIFESPAN\n except AttributeError:\n val = timedelta(days=30)\n return val", - "docstring": "Return the allowed lifespan of a token as a TimeDelta object.\n\n Defaults to 30 days." - }, - { - "code": "def log_error(msg, logger=\"TaskLogger\"):\n tasklogger = get_tasklogger(logger)\n tasklogger.error(msg)\n return tasklogger", - "docstring": "Log an ERROR message\n\n Convenience function to log a message to the default Logger\n\n Parameters\n ----------\n msg : str\n Message to be logged\n logger : str, optional (default: \"TaskLogger\")\n Unique name of the logger to retrieve\n\n Returns\n -------\n logger : TaskLogger" - }, - { - "code": "def set_residual(self, pores=[], throats=[], overwrite=False):\n r\n Ps = self._parse_indices(pores)\n if overwrite:\n self['pore.residual'] = False\n self['pore.residual'][Ps] = True\n Ts = self._parse_indices(throats)\n if overwrite:\n self['throat.residual'] = False\n self['throat.residual'][Ts] = True", - "docstring": "r\"\"\"\n Specify locations of any residual invader. These locations are set\n to invaded at the start of the simulation.\n\n Parameters\n ----------\n pores : array_like\n The pores locations that are to be filled with invader at the\n beginning of the simulation.\n\n throats : array_like\n The throat locations that are to be filled with invader at the\n beginning of the simulation.\n\n overwrite : boolean\n If ``True`` then all existing inlet locations will be removed and\n then the supplied locations will be added. If ``False``, then\n supplied locations are added to any already existing locations." - }, - { - "code": "def add_manager(self, manager):\n select_action = 'add_manager'\n self._update_scope_project_team(select_action=select_action, user=manager, user_type='manager')", - "docstring": "Add a single manager to the scope.\n\n :param manager: single username to be added to the scope list of managers\n :type manager: basestring\n :raises APIError: when unable to update the scope manager" - }, - { - "code": "def get_py_dtypes(data_frame):\n df_py_dtypes = data_frame.dtypes.map(get_py_dtype).to_frame('dtype').copy()\n df_py_dtypes.loc[df_py_dtypes.dtype == object, 'dtype'] = \\\n (df_py_dtypes.loc[df_py_dtypes.dtype == object].index\n .map(lambda c: str if data_frame[c]\n .map(lambda v: isinstance(v, str)).all() else object))\n df_py_dtypes.insert(0, 'i', range(df_py_dtypes.shape[0]))\n df_py_dtypes.index.name = 'column'\n return df_py_dtypes", - "docstring": "Return a `pandas.DataFrame` containing Python type information for the\n columns in `data_frame`.\n\n Args:\n\n data_frame (pandas.DataFrame) : Data frame containing data columns.\n\n Returns:\n\n (pandas.DataFrame) : Data frame indexed by the column names from\n `data_frame`, with the columns `'i'` and `'dtype'` indicating the\n index and Python type of the corresponding `data_frame` column,\n respectively." - }, - { - "code": "def keypress(self, viewer, event, data_x, data_y):\n keyname = event.key\n chname = self.get_channel_name(viewer)\n self.logger.debug(\"key press (%s) in channel %s\" % (\n keyname, chname))\n if keyname == 'Z':\n self.ds.raise_tab('Zoom')\n elif keyname == 'I':\n self.ds.raise_tab('Info')\n elif keyname == 'H':\n self.ds.raise_tab('Header')\n elif keyname == 'C':\n self.ds.raise_tab('Contents')\n elif keyname == 'D':\n self.ds.raise_tab('Dialogs')\n elif keyname == 'F':\n self.build_fullscreen()\n elif keyname == 'f':\n self.toggle_fullscreen()\n elif keyname == 'm':\n self.maximize()\n elif keyname == '<':\n self.collapse_pane('left')\n elif keyname == '>':\n self.collapse_pane('right')\n elif keyname == 'n':\n self.next_channel()\n elif keyname == 'J':\n self.cycle_workspace_type()\n elif keyname == 'k':\n self.add_channel_auto()\n elif keyname == 'K':\n self.remove_channel_auto()\n elif keyname == 'f1':\n self.show_channel_names()\n elif keyname in ('up',):\n self.prev_img()\n elif keyname in ('down',):\n self.next_img()\n elif keyname in ('left',):\n self.prev_channel()\n elif keyname in ('right',):\n self.next_channel()\n return True", - "docstring": "Key press event in a channel window." - }, - { - "code": "def enter_position(self):\n state = self.state\n dlg = wx.TextEntryDialog(self, 'Enter new position', 'Position')\n dlg.SetValue(\"%f %f\" % (state.lat, state.lon))\n if dlg.ShowModal() == wx.ID_OK:\n latlon = dlg.GetValue().split()\n dlg.Destroy()\n state.lat = float(latlon[0])\n state.lon = float(latlon[1])\n self.re_center(state.width/2,state.height/2, state.lat, state.lon)\n self.redraw_map()", - "docstring": "enter new position" - }, - { - "code": "def return_dat(self, chan, begsam, endsam):\n dat = empty((len(chan), endsam - begsam))\n dat.fill(NaN)\n stc, all_stamp = _read_stc(self._filename.with_suffix('.stc'))\n all_erd = all_stamp['segment_name'].astype('U')\n all_beg = all_stamp['start_stamp']\n all_end = all_stamp['end_stamp']\n try:\n begrec = where((all_end >= begsam))[0][0]\n endrec = where((all_beg < endsam))[0][-1]\n except IndexError:\n return dat\n for rec in range(begrec, endrec + 1):\n begpos_rec = max(begsam, all_beg[rec])\n endpos_rec = min(endsam, all_end[rec] + 1)\n d1 = begpos_rec - begsam\n d2 = endpos_rec - begsam\n erd_file = (Path(self.filename) / all_erd[rec]).with_suffix('.erd')\n try:\n dat_rec = _read_erd(erd_file, begpos_rec, endpos_rec)\n dat[:, d1:d2] = dat_rec[chan, :]\n except (FileNotFoundError, PermissionError):\n lg.warning('{} does not exist'.format(erd_file))\n return dat", - "docstring": "Read the data based on begsam and endsam.\n\n Parameters\n ----------\n chan : list of int\n list of channel indeces\n begsam : int\n index of the first sample\n endsam :\n index of the last sample\n\n Returns\n -------\n ndarray\n 2-d matrix with data (might contain NaN)\n\n Notes\n -----\n The sample numbering is not based on the samples in the files (i.e.\n the first sample of the first file is NOT the first sample of the\n dataset) because it depends on the stamps in the STC file. Usually, the\n recording starts and after a few millisecond (maybe one second), the\n actual acquisition starts. STC takes the offset into account. This has\n the counterintuitive result that if you call read_data, the first few\n hundreds samples are nan." - }, - { - "code": "def _get_num_tokens_from_first_line(line: str) -> Optional[int]:\n fields = line.split(' ')\n if 1 <= len(fields) <= 2:\n try:\n int_fields = [int(x) for x in fields]\n except ValueError:\n return None\n else:\n num_tokens = max(int_fields)\n logger.info('Recognized a header line in the embedding file with number of tokens: %d',\n num_tokens)\n return num_tokens\n return None", - "docstring": "This function takes in input a string and if it contains 1 or 2 integers, it assumes the\n largest one it the number of tokens. Returns None if the line doesn't match that pattern." - }, - { - "code": "def _maybe_nest_bare_single(items_by_key, parallel):\n if (parallel == \"multi-parallel\" and\n (sum([1 for x in items_by_key.values() if not _is_nested_item(x)]) >=\n sum([1 for x in items_by_key.values() if _is_nested_item(x)]))):\n out = {}\n for k, v in items_by_key.items():\n out[k] = [v]\n return out\n else:\n return items_by_key", - "docstring": "Nest single inputs to avoid confusing single items and lists like files." - }, - { - "code": "def to_unicode(text, charset=None):\n\tif not isinstance(text, str):\n\t\tif isinstance(text, Exception):\n\t\t\ttry:\n\t\t\t\treturn unicode(text)\n\t\t\texcept UnicodeError:\n\t\t\t\treturn ' '.join([to_unicode(arg) for arg in text.args])\n\t\treturn unicode(text)\n\tif charset:\n\t\treturn unicode(text, charset, 'replace')\n\telse:\n\t\ttry:\n\t\t\treturn unicode(text, 'utf-8')\n\t\texcept UnicodeError:\n\t\t\treturn unicode(text, locale.getpreferredencoding(), 'replace')", - "docstring": "Convert a `str` object to an `unicode` object.\n\n\tIf `charset` is given, we simply assume that encoding for the text,\n\tbut we'll use the \"replace\" mode so that the decoding will always\n\tsucceed.\n\tIf `charset` is ''not'' specified, we'll make some guesses, first\n\ttrying the UTF-8 encoding, then trying the locale preferred encoding,\n\tin \"replace\" mode. This differs from the `unicode` builtin, which\n\tby default uses the locale preferred encoding, in 'strict' mode,\n\tand is therefore prompt to raise `UnicodeDecodeError`s.\n\n\tBecause of the \"replace\" mode, the original content might be altered.\n\tIf this is not what is wanted, one could map the original byte content\n\tby using an encoding which maps each byte of the input to an unicode\n\tcharacter, e.g. by doing `unicode(text, 'iso-8859-1')`." - }, - { - "code": "def get_info(self):\n info_response = self.send_command(\"show info\")\n if not info_response:\n return {}\n def convert_camel_case(string):\n return all_cap_re.sub(\n r'\\1_\\2',\n first_cap_re.sub(r'\\1_\\2', string)\n ).lower()\n return dict(\n (convert_camel_case(label), value)\n for label, value in [\n line.split(\": \")\n for line in info_response.split(\"\\n\")\n ]\n )", - "docstring": "Parses the output of a \"show info\" HAProxy command and returns a\n simple dictionary of the results." - }, - { - "code": "def set_float(val):\n out = None\n if not val in (None, ''):\n try:\n out = float(val)\n except ValueError:\n return None\n if numpy.isnan(out):\n out = default\n return out", - "docstring": "utility to set a floating value,\n useful for converting from strings" - }, - { - "code": "def _get_params(self):\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", - "docstring": "return the value of the parameters." - }, - { - "code": "def start_index(self):\n paginator = self.paginator\n if paginator.count == 0:\n return 0\n elif self.number == 1:\n return 1\n return (\n (self.number - 2) * paginator.per_page + paginator.first_page + 1)", - "docstring": "Return the 1-based index of the first item on this page." - }, - { - "code": "def set_window_focus_callback(window, cbfun):\n window_addr = ctypes.cast(ctypes.pointer(window),\n ctypes.POINTER(ctypes.c_long)).contents.value\n if window_addr in _window_focus_callback_repository:\n previous_callback = _window_focus_callback_repository[window_addr]\n else:\n previous_callback = None\n if cbfun is None:\n cbfun = 0\n c_cbfun = _GLFWwindowfocusfun(cbfun)\n _window_focus_callback_repository[window_addr] = (cbfun, c_cbfun)\n cbfun = c_cbfun\n _glfw.glfwSetWindowFocusCallback(window, cbfun)\n if previous_callback is not None and previous_callback[0] != 0:\n return previous_callback[0]", - "docstring": "Sets the focus callback for the specified window.\n\n Wrapper for:\n GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun cbfun);" - }, - { - "code": "def run_example(path):\n cmd = \"{0} {1}\".format(sys.executable, path)\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = proc.communicate()\n if proc.returncode:\n print(res[1].decode())\n return proc.returncode", - "docstring": "Returns returncode of example" - }, - { - "code": "def quality_to_bitmap(quality):\n if quality not in QUALITIES:\n raise InvalidChordException(\n \"Unsupported chord quality shorthand: '%s' \"\n \"Did you mean to reduce extended chords?\" % quality)\n return np.array(QUALITIES[quality])", - "docstring": "Return the bitmap for a given quality.\n\n Parameters\n ----------\n quality : str\n Chord quality name.\n\n Returns\n -------\n bitmap : np.ndarray\n Bitmap representation of this quality (12-dim)." - }, - { - "code": "def get_item_lookup_session_for_bank(self, bank_id):\n if not self.supports_item_lookup():\n raise errors.Unimplemented()\n return sessions.ItemLookupSession(bank_id, runtime=self._runtime)", - "docstring": "Gets the ``OsidSession`` associated with the item lookup service for the given bank.\n\n arg: bank_id (osid.id.Id): the ``Id`` of the bank\n return: (osid.assessment.ItemLookupSession) - ``an\n _item_lookup_session``\n raise: NotFound - ``bank_id`` not found\n raise: NullArgument - ``bank_id`` is ``null``\n raise: OperationFailed - ``unable to complete request``\n raise: Unimplemented - ``supports_item_lookup()`` or\n ``supports_visible_federation()`` is ``false``\n *compliance: optional -- This method must be implemented if\n ``supports_item_lookup()`` and ``supports_visible_federation()``\n are ``true``.*" - }, - { - "code": "def generate_root(self, key, nonce):\n params = {\n 'key': key,\n 'nonce': nonce,\n }\n api_path = '/v1/sys/generate-root/update'\n response = self._adapter.put(\n url=api_path,\n json=params,\n )\n return response.json()", - "docstring": "Enter a single master key share to progress the root generation attempt.\n\n If the threshold number of master key shares is reached, Vault will complete the root generation and issue the\n new token. Otherwise, this API must be called multiple times until that threshold is met. The attempt nonce must\n be provided with each call.\n\n Supported methods:\n PUT: /sys/generate-root/update. Produces: 200 application/json\n\n :param key: Specifies a single master key share.\n :type key: str | unicode\n :param nonce: The nonce of the attempt.\n :type nonce: str | unicode\n :return: The JSON response of the request.\n :rtype: dict" - }, - { - "code": "def upload_marcxml(self, marcxml, mode):\n if mode not in [\"-i\", \"-r\", \"-c\", \"-a\", \"-ir\"]:\n raise NameError, \"Incorrect mode \" + str(mode)\n if self.local:\n (code, marcxml_filepath) = tempfile.mkstemp(prefix=\"upload_%s\" % \\\n time.strftime(\"%Y%m%d_%H%M%S_\",\n time.localtime()))\n marcxml_file_d = os.fdopen(code, \"w\")\n marcxml_file_d.write(marcxml)\n marcxml_file_d.close()\n return task_low_level_submission(\"bibupload\", \"\", mode, marcxml_filepath)\n else:\n params = urllib.urlencode({'file': marcxml,\n 'mode': mode})\n opener = urllib2.build_opener()\n opener.addheaders = [('User-Agent', CFG_USER_AGENT)]\n return opener.open(self.server_url + \"/batchuploader/robotupload\", params,)", - "docstring": "Uploads a record to the server\n\n Parameters:\n marcxml - *str* the XML to upload.\n mode - *str* the mode to use for the upload.\n \"-i\" insert new records\n \"-r\" replace existing records\n \"-c\" correct fields of records\n \"-a\" append fields to records\n \"-ir\" insert record or replace if it exists" - }, - { - "code": "def compute_video_metrics_from_predictions(predictions, decode_hparams):\n all_results = {}\n ssim_all_decodes, psnr_all_decodes = [], []\n for single_decode in predictions:\n args = get_zipped_dataset_from_predictions(single_decode)\n psnr_single, ssim_single = compute_one_decoding_video_metrics(*args)\n psnr_all_decodes.append(psnr_single)\n ssim_all_decodes.append(ssim_single)\n psnr_all_decodes = np.array(psnr_all_decodes)\n ssim_all_decodes = np.array(ssim_all_decodes)\n all_results.update({\"PSNR\": psnr_all_decodes, \"SSIM\": ssim_all_decodes})\n return compute_all_metrics_statistics(all_results)", - "docstring": "Computes metrics from predictions.\n\n Args:\n predictions: list of list of dicts.\n outer length: num_decodes, inner_length: num_samples\n decode_hparams: Decode hparams. instance of HParams.\n Returns:\n statistics: dict of Tensors, key being the metric with each Tensor\n having the shape (num_samples, num_frames)." - }, - { - "code": "def clear(self):\r\n for act in self.actions():\r\n act.setParent(None)\r\n act.deleteLater()\r\n for lbl in self.actionLabels():\r\n lbl.close()\r\n lbl.deleteLater()", - "docstring": "Clears out all the actions and items from this toolbar." - }, - { - "code": "def timeslice_generator(self):\n slice_id = 0\n while slice_id < self.n_timeslices:\n blob = self.get_blob(slice_id)\n yield blob\n slice_id += 1", - "docstring": "Uses slice ID as iterator" - }, - { - "code": "def fillkeys(Recs):\n keylist, OutRecs = [], []\n for rec in Recs:\n for key in list(rec.keys()):\n if key not in keylist:\n keylist.append(key)\n for rec in Recs:\n for key in keylist:\n if key not in list(rec.keys()):\n rec[key] = \"\"\n OutRecs.append(rec)\n return OutRecs, keylist", - "docstring": "reconciles keys of dictionaries within Recs." - }, - { - "code": "def MultifactorSchedule(history=None,\n factors=\"constant * linear_warmup * rsqrt_decay\",\n constant=0.1,\n warmup_steps=100,\n decay_factor=0.5,\n steps_per_decay=20000):\n del history\n cache_args = (factors, constant, warmup_steps)\n if cache_args in _memoized_multifactor_schedules:\n return _memoized_multifactor_schedules[cache_args]\n factors = [n.strip() for n in factors.split(\"*\")]\n def learning_rate(step):\n ret = 1.0\n for name in factors:\n if name == \"constant\":\n ret *= constant\n elif name == \"linear_warmup\":\n ret *= np.minimum(1.0, step / warmup_steps)\n elif name == \"rsqrt_decay\":\n ret /= np.sqrt(np.maximum(step, warmup_steps))\n elif name == \"decay_every\":\n ret *= (decay_factor ** (step//steps_per_decay))\n else:\n raise ValueError(\"Unknown factor %s.\" % name)\n return ret\n _memoized_multifactor_schedules[cache_args] = learning_rate\n return learning_rate", - "docstring": "Factor-based learning rate schedule.\n\n Interprets factors in the factors string which can consist of:\n * constant: interpreted as the constant value,\n * linear_warmup: interpreted as linear warmup until warmup_steps,\n * rsqrt_decay: divide by square root of max(step, warmup_steps)\n * decay_every: Every k steps decay the learning rate by decay_factor.\n\n Args:\n history: the history of training and evaluation (History object).\n factors: a string with factors separated by \"*\" that defines the schedule.\n constant: float, the starting constant for the learning rate schedule.\n warmup_steps: how many steps to warm up for in the warmup schedule.\n decay_factor: The amount to decay the learning rate by.\n steps_per_decay: How often to decay the learning rate.\n\n Returns:\n a function learning_rate(step): float -> float, the step-dependent lr." - }, - { - "code": "def get_instance(self, payload):\n return DialogueInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )", - "docstring": "Build an instance of DialogueInstance\n\n :param dict payload: Payload response from the API\n\n :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance\n :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance" - }, - { - "code": "def pcolor(text, color, indent=0):\n r\n esc_dict = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n \"none\": -1,\n }\n if not isinstance(text, str):\n raise RuntimeError(\"Argument `text` is not valid\")\n if not isinstance(color, str):\n raise RuntimeError(\"Argument `color` is not valid\")\n if not isinstance(indent, int):\n raise RuntimeError(\"Argument `indent` is not valid\")\n color = color.lower()\n if color not in esc_dict:\n raise ValueError(\"Unknown color {color}\".format(color=color))\n if esc_dict[color] != -1:\n return \"\\033[{color_code}m{indent}{text}\\033[0m\".format(\n color_code=esc_dict[color], indent=\" \" * indent, text=text\n )\n return \"{indent}{text}\".format(indent=\" \" * indent, text=text)", - "docstring": "r\"\"\"\n Return a string that once printed is colorized.\n\n :param text: Text to colorize\n :type text: string\n\n :param color: Color to use, one of :code:`'black'`, :code:`'red'`,\n :code:`'green'`, :code:`'yellow'`, :code:`'blue'`,\n :code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or\n :code:`'none'` (case insensitive)\n :type color: string\n\n :param indent: Number of spaces to prefix the output with\n :type indent: integer\n\n :rtype: string\n\n :raises:\n * RuntimeError (Argument \\`color\\` is not valid)\n\n * RuntimeError (Argument \\`indent\\` is not valid)\n\n * RuntimeError (Argument \\`text\\` is not valid)\n\n * ValueError (Unknown color *[color]*)" - }, - { - "code": "def get_formset(self, data=None, queryset=None):\n if not self.can_submit:\n return None\n FormSet = self.get_formset_class()\n if queryset is None:\n queryset = self.get_queryset()\n if FormSet:\n if data:\n queryset = self._add_formset_id(data, queryset)\n return FormSet(data, queryset=queryset)", - "docstring": "Returns an instantiated FormSet if available.\n If `self.can_submit` is False then no formset\n is returned." - }, - { - "code": "async def proposal(self):\n proposals = await aionationstates.wa.proposals()\n for proposal in proposals:\n if (proposal.name == self.proposal_name):\n return proposal\n raise aionationstates.NotFound", - "docstring": "Get the proposal in question.\n\n Actually just the first proposal with the same name, but the\n chance of a collision is tiny.\n\n Returns\n -------\n awaitable of :class:`aionationstates.Proposal`\n The proposal submitted.\n\n Raises\n ------\n aionationstates.NotFound\n If the proposal has since been withdrawn or promoted." - }, - { - "code": "def _fields_list_to_dict(fields):\n for key in fields:\n assert isinstance(key, (str,unicode))\n return dict([[key, 1] for key in fields])", - "docstring": "Takes a list of field names and returns a matching dictionary.\n\n [\"a\", \"b\"] becomes {\"a\": 1, \"b\": 1}\n\n and\n\n [\"a.b.c\", \"d\", \"a.c\"] becomes {\"a.b.c\": 1, \"d\": 1, \"a.c\": 1}" - }, - { - "code": "def select_with_index(\n self,\n selector=IndexedElement,\n transform=identity):\n if self.closed():\n raise ValueError(\"Attempt to call select_with_index() on a \"\n \"closed Queryable.\")\n if not is_callable(selector):\n raise TypeError(\"select_with_index() parameter selector={0} is \"\n \"not callable\".format(repr(selector)))\n if not is_callable(transform):\n raise TypeError(\"select_with_index() parameter item_selector={0} is \"\n \"not callable\".format(repr(selector)))\n return self._create(itertools.starmap(selector, enumerate(imap(transform, iter(self)))))", - "docstring": "Transforms each element of a sequence into a new form, incorporating\n the index of the element.\n\n Each element is transformed through a selector function which accepts\n the element value and its zero-based index in the source sequence. The\n generated sequence is lazily evaluated.\n\n Note: This method uses deferred execution.\n\n Args:\n selector: A binary function mapping the index of a value in\n the source sequence and the element value itself to the\n corresponding value in the generated sequence. The two\n positional arguments of the selector function are the zero-\n based index of the current element and the value of the current\n element. The return value should be the corresponding value in\n the result sequence. The default selector produces an IndexedElement\n containing the index and the element giving this function\n similar behaviour to the built-in enumerate().\n\n Returns:\n A Queryable whose elements are the result of invoking the selector\n function on each element of the source sequence\n\n Raises:\n ValueError: If this Queryable has been closed.\n TypeError: If selector is not callable." - }, - { - "code": "def demographics(self):\n body = {\n \"audience_definition\": self.audience_definition,\n \"targeting_inputs\": self.targeting_inputs\n }\n resource = self.RESOURCE_DEMOGRAPHICS.format(account_id=self.account.id)\n response = Request(\n self.account.client, self.METHOD,\n resource, headers=self.HEADERS, body=json.dumps(body)).perform()\n return response.body['data']", - "docstring": "Get the demographic breakdown for an input targeting criteria" - }, - { - "code": "def _expand_var(self, in_string, available_variables):\n instances = self._get_instances(in_string)\n for instance in instances:\n for name, value in available_variables.items():\n variable_string = self._get_variable_string(name)\n if instance == variable_string:\n in_string = in_string.replace(variable_string, value)\n return in_string", - "docstring": "Expand variable to its corresponding value in_string\n\n :param string variable: variable name\n :param value: value to replace with\n :param string in_string: the string to replace in" - }, - { - "code": "def pid(self):\n if hasattr(self.subprocess, 'proc'):\n return self.subprocess.proc.pid\n return self.subprocess.pid", - "docstring": "The process' PID." - }, - { - "code": "def _row_heights2pys(self):\n for row, tab in self.code_array.dict_grid.row_heights:\n if row < self.code_array.shape[0] and \\\n tab < self.code_array.shape[2]:\n height = self.code_array.dict_grid.row_heights[(row, tab)]\n height_strings = map(repr, [row, tab, height])\n self.pys_file.write(u\"\\t\".join(height_strings) + u\"\\n\")", - "docstring": "Writes row_heights to pys file\n\n Format: \\t\\t\\n" - }, - { - "code": "def add_family(self, major_number):\n keys = ['unreleased_bugfix', 'unreleased_feature']\n if major_number == 0 and self.config.releases_unstable_prehistory:\n keys = ['unreleased']\n self[major_number] = {key: [] for key in keys}", - "docstring": "Expand to a new release line with given ``major_number``.\n\n This will flesh out mandatory buckets like ``unreleased_bugfix`` and do\n other necessary bookkeeping." - }, - { - "code": "def reset(self, **kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])\n self.command = self.COMMAND_RESET", - "docstring": "Reset all of the motor parameter attributes to their default value.\n This will also have the effect of stopping the motor." - }, - { - "code": "def get_unit_by_name(self, unit_name: str) -> typing.Optional['BaseUnit']:\n VALID_STR.validate(unit_name, 'get_unit_by_name')\n for unit in self.units:\n if unit.unit_name == unit_name:\n return unit\n return None", - "docstring": "Gets a unit from its name\n\n Args:\n unit_name: unit name\n\n Returns:" - }, - { - "code": "def create_db_info():\n result = {}\n result['instrument'] = ''\n result['uuid'] = ''\n result['tags'] = {}\n result['type'] = ''\n result['mode'] = ''\n result['observation_date'] = \"\"\n result['origin'] = {}\n return result", - "docstring": "Create metadata structure" - }, - { - "code": "def on_info_click(self, event):\n def on_close(event, wind):\n wind.Close()\n wind.Destroy()\n event.Skip()\n wind = wx.PopupTransientWindow(self, wx.RAISED_BORDER)\n if self.auto_save.GetValue():\n info = \"'auto-save' is currently selected. Temperature bounds will be saved when you click 'next' or 'back'.\"\n else:\n info = \"'auto-save' is not selected. Temperature bounds will only be saved when you click 'save'.\"\n text = wx.StaticText(wind, -1, info)\n box = wx.StaticBox(wind, -1, 'Info:')\n boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n boxSizer.Add(text, 5, wx.ALL | wx.CENTER)\n exit_btn = wx.Button(wind, wx.ID_EXIT, 'Close')\n wind.Bind(wx.EVT_BUTTON, lambda evt: on_close(evt, wind), exit_btn)\n boxSizer.Add(exit_btn, 5, wx.ALL | wx.CENTER)\n wind.SetSizer(boxSizer)\n wind.Layout()\n wind.Popup()", - "docstring": "Show popup info window when user clicks \"?\"" - }, - { - "code": "def roles_accepted(*accepted_rolenames):\n def decorator(method):\n @functools.wraps(method)\n def wrapper(*args, **kwargs):\n role_set = set([str(n) for n in accepted_rolenames])\n _verify_and_add_jwt()\n try:\n MissingRoleError.require_condition(\n not current_rolenames().isdisjoint(role_set),\n \"This endpoint requires one of the following roles: {}\",\n [', '.join(role_set)],\n )\n return method(*args, **kwargs)\n finally:\n remove_jwt_data_from_app_context()\n return wrapper\n return decorator", - "docstring": "This decorator ensures that any uses accessing the decorated route have one\n of the needed roles to access it. If an @auth_required decorator is not\n supplied already, this decorator will implicitly check @auth_required first" - }, - { - "code": "def register_metric_descriptor(self, oc_md):\n metric_type = self.get_metric_type(oc_md)\n with self._md_lock:\n if metric_type in self._md_cache:\n return self._md_cache[metric_type]\n descriptor = self.get_metric_descriptor(oc_md)\n project_name = self.client.project_path(self.options.project_id)\n sd_md = self.client.create_metric_descriptor(project_name, descriptor)\n with self._md_lock:\n self._md_cache[metric_type] = sd_md\n return sd_md", - "docstring": "Register a metric descriptor with stackdriver." - }, - { - "code": "def _complete_type_chain(self, symbol, fullsymbol):\n target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol)\n if target is None:\n return {}\n result = {}\n if symbol != \"\":\n if self.context.el_call != \"sub\":\n for mkey in target.members:\n if self._symbol_in(symbol, mkey):\n result[mkey] = target.members[mkey]\n for ekey in target.executables:\n if (self._symbol_in(symbol, ekey)):\n if self.context.el_call == \"sub\":\n if (isinstance(target.executables[ekey], Subroutine)):\n result[ekey] = target.executables[ekey]\n else:\n if (isinstance(target.executables[ekey], Function)):\n result[ekey] = target.executables[ekey]\n else:\n if self.context.el_call != \"sub\":\n result.update(target.members)\n subdict = {k: target.executables[k] for k in target.executables\n if isinstance(target.executables[k].target, Function)}\n result.update(subdict)\n else:\n subdict = {k: target.executables[k] for k in target.executables\n if isinstance(target.executables[k].target, Subroutine)}\n result.update(subdict)\n return result", - "docstring": "Suggests completion for the end of a type chain." - }, - { - "code": "def arccalibration(wv_master,\n xpos_arc,\n naxis1_arc,\n crpix1,\n wv_ini_search,\n wv_end_search,\n wvmin_useful,\n wvmax_useful,\n error_xpos_arc,\n times_sigma_r,\n frac_triplets_for_sum,\n times_sigma_theil_sen,\n poly_degree_wfit,\n times_sigma_polfilt,\n times_sigma_cook,\n times_sigma_inclusion,\n geometry=None,\n debugplot=0):\n ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \\\n gen_triplets_master(wv_master=wv_master, geometry=geometry,\n debugplot=debugplot)\n list_of_wvfeatures = arccalibration_direct(\n wv_master=wv_master,\n ntriplets_master=ntriplets_master,\n ratios_master_sorted=ratios_master_sorted,\n triplets_master_sorted_list=triplets_master_sorted_list,\n xpos_arc=xpos_arc,\n naxis1_arc=naxis1_arc,\n crpix1=crpix1,\n wv_ini_search=wv_ini_search,\n wv_end_search=wv_end_search,\n wvmin_useful=wvmin_useful,\n wvmax_useful=wvmax_useful,\n error_xpos_arc=error_xpos_arc,\n times_sigma_r=times_sigma_r,\n frac_triplets_for_sum=frac_triplets_for_sum,\n times_sigma_theil_sen=times_sigma_theil_sen,\n poly_degree_wfit=poly_degree_wfit,\n times_sigma_polfilt=times_sigma_polfilt,\n times_sigma_cook=times_sigma_cook,\n times_sigma_inclusion=times_sigma_inclusion,\n geometry=geometry,\n debugplot=debugplot)\n return list_of_wvfeatures", - "docstring": "Performs arc line identification for arc calibration.\n\n This function is a wrapper of two functions, which are responsible\n of computing all the relevant information concerning the triplets\n generated from the master table and the actual identification\n procedure of the arc lines, respectively.\n\n The separation of those computations in two different functions\n helps to avoid the repetition of calls to the first function when\n calibrating several arcs using the same master table.\n\n Parameters\n ----------\n wv_master : 1d numpy array, float\n Array with wavelengths corresponding to the master table\n (Angstroms).\n xpos_arc : 1d numpy array, float\n Location of arc lines (pixels).\n naxis1_arc : int\n NAXIS1 for arc spectrum.\n crpix1 : float\n CRPIX1 value to be employed in the wavelength calibration.\n wv_ini_search : float\n Minimum expected wavelength in spectrum.\n wv_end_search : float\n Maximum expected wavelength in spectrum.\n wvmin_useful : float\n If not None, this value is used to clip detected lines below it.\n wvmax_useful : float\n If not None, this value is used to clip detected lines above it.\n error_xpos_arc : float\n Error in arc line position (pixels).\n times_sigma_r : float\n Times sigma to search for valid line position ratios.\n frac_triplets_for_sum : float\n Fraction of distances to different triplets to sum when\n computing the cost function.\n times_sigma_theil_sen : float\n Number of times the (robust) standard deviation around the\n linear fit (using the Theil-Sen method) to reject points.\n poly_degree_wfit : int\n Degree for polynomial fit to wavelength calibration.\n times_sigma_polfilt : float\n Number of times the (robust) standard deviation around the\n polynomial fit to reject points.\n times_sigma_cook : float\n Number of times the standard deviation of Cook's distances\n to detect outliers. If zero, this method of outlier detection\n is ignored.\n times_sigma_inclusion : float\n Number of times the (robust) standard deviation around the\n polynomial fit to include a new line in the set of identified\n lines.\n geometry : tuple (4 integers) or None\n x, y, dx, dy values employed to set the window geometry.\n debugplot : int\n Determines whether intermediate computations and/or plots\n are displayed. The valid codes are defined in\n numina.array.display.pause_debugplot.\n\n Returns\n -------\n list_of_wvfeatures : list (of WavecalFeature instances)\n A list of size equal to the number of identified lines, which\n elements are instances of the class WavecalFeature, containing\n all the relevant information concerning the line\n identification." - }, - { - "code": "def get_event(self):\n if self.event_queue.qsize() == 0:\n return None\n evt = self.event_queue.get()\n while isinstance(evt, win_layout.WinLayout):\n win_layout.set_layout(evt, self.set_layout)\n if self.event_queue.qsize() == 0:\n return None\n evt = self.event_queue.get()\n return evt", - "docstring": "return next event or None" - }, - { - "code": "def _colorize(val, color):\n if termcolor is not None:\n val = termcolor.colored(val, color)\n elif colorama is not None:\n val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL\n return val", - "docstring": "Colorize a string using termcolor or colorama.\n\n If any of them are available." - }, - { - "code": "def get_helper(name=None, quiet=True, **kwargs):\n from helpme.defaults import HELPME_CLIENT\n if name is not None:\n HELPME_CLIENT = name\n if HELPME_CLIENT == 'github': from .github import Helper;\n elif HELPME_CLIENT == 'uservoice': from .uservoice import Helper\n elif HELPME_CLIENT == 'discourse': from .discourse import Helper\n else: from .github import Helper\n Helper.name = HELPME_CLIENT\n Helper.quiet = quiet\n return Helper()", - "docstring": "get the correct helper depending on the environment variable\n HELPME_CLIENT\n\n quiet: if True, suppress most output about the client (e.g. speak)" - }, - { - "code": "def confirm_cw_log(self, account, region, vpcname):\n try:\n cw = self.session.client('logs', region)\n token = None\n log_groups = []\n while True:\n result = cw.describe_log_groups() if not token else cw.describe_log_groups(nextToken=token)\n token = result.get('nextToken')\n log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])])\n if not token:\n break\n if vpcname not in log_groups:\n cw.create_log_group(logGroupName=vpcname)\n cw_vpc = VPC.get(vpcname)\n cw_vpc.set_property('vpc_flow_logs_log_group', vpcname)\n self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname))\n auditlog(\n event='vpc_flow_logs.create_cw_log_group',\n actor=self.ns,\n data={\n 'account': account.account_name,\n 'region': region,\n 'log_group_name': vpcname,\n 'vpc': vpcname\n }\n )\n return True\n except Exception:\n self.log.exception('Failed creating log group for {}/{}/{}.'.format(\n account,\n region, vpcname\n ))", - "docstring": "Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful\n\n Args:\n account (:obj:`Account`): Account to create the log group in\n region (`str`): Region to create the log group in\n vpcname (`str`): Name of the VPC the log group is fow\n\n Returns:\n `bool`" - }, - { - "code": "def count(self):\n return (\n self\n .mapPartitions(lambda p: [sum(1 for _ in p)])\n .reduce(operator.add)\n )", - "docstring": "Count elements per RDD.\n\n Creates a new RDD stream where each RDD has a single entry that\n is the count of the elements.\n\n :rtype: DStream" - }, - { - "code": "def guard(ctx, opts=\"\"):\n return test(ctx, include_slow=True, loop_on_fail=True, opts=opts)", - "docstring": "Execute all tests and then watch for changes, re-running." - }, - { - "code": "def _visible_layers_count(self):\n treeroot = QgsProject.instance().layerTreeRoot()\n return len([lyr for lyr in treeroot.findLayers() if lyr.isVisible()])", - "docstring": "Calculate the number of visible layers in the legend.\n\n .. versionadded: 3.1\n\n :returns: Count of layers that are actually visible.\n :rtype: int" - }, - { - "code": "def _walk(self, target, visitor):\n visited = set()\n def walk(current):\n if current not in visited:\n visited.add(current)\n keep_going = visitor(current)\n if keep_going:\n for dependency in self.dependencies(current):\n walk(dependency)\n walk(target)", - "docstring": "Walks the dependency graph for the given target.\n\n :param target: The target to start the walk from.\n :param visitor: A function that takes a target and returns `True` if its dependencies should\n also be visited." - }, - { - "code": "def accel_zoom_out(self, *args):\n for term in self.get_notebook().iter_terminals():\n term.decrease_font_size()\n return True", - "docstring": "Callback to zoom out." - }, - { - "code": "def get_code(self):\n stub = []\n for child in self.variables:\n stub.extend(child.get_code())\n if (\n (len(self.variables) > 0)\n and (len(self.children) > 0)\n and (not isinstance(self, ClassNode))\n ):\n stub.append(\"\")\n for child in self.children:\n stub.extend(child.get_code())\n return stub", - "docstring": "Get the stub code for this node.\n\n The stub code for a node consists of the type annotations of its variables,\n followed by the prototypes of its functions/methods and classes.\n\n :sig: () -> List[str]\n :return: Lines of stub code for this node." - }, - { - "code": "def get_authorization_query_session_for_vault(self, vault_id):\n if not self.supports_authorization_query():\n raise errors.Unimplemented()\n return sessions.AuthorizationQuerySession(vault_id, runtime=self._runtime)", - "docstring": "Gets the ``OsidSession`` associated with the authorization query service for the given vault.\n\n arg: vault_id (osid.id.Id): the ``Id`` of the vault\n return: (osid.authorization.AuthorizationQuerySession) - ``an\n _authorization_query_session``\n raise: NotFound - ``vault_id`` not found\n raise: NullArgument - ``vault_id`` is ``null``\n raise: OperationFailed - ``unable to complete request``\n raise: Unimplemented - ``supports_authorization_query()`` or\n ``supports_visible_federation()`` is ``false``\n *compliance: optional -- This method must be implemented if\n ``supports_authorization_query()`` and\n ``supports_visible_federation()`` are ``true``.*" - }, - { - "code": "def plot_pseudosections(self, column, filename=None, return_fig=False):\n assert column in self.data.columns\n g = self.data.groupby('frequency')\n fig, axes = plt.subplots(\n 4, 2,\n figsize=(15 / 2.54, 20 / 2.54),\n sharex=True, sharey=True\n )\n for ax, (key, item) in zip(axes.flat, g):\n fig, ax, cb = PS.plot_pseudosection_type2(\n item, ax=ax, column=column\n )\n ax.set_title('f: {} Hz'.format(key))\n fig.tight_layout()\n if filename is not None:\n fig.savefig(filename, dpi=300)\n if return_fig:\n return fig\n else:\n plt.close(fig)", - "docstring": "Create a multi-plot with one pseudosection for each frequency.\n\n Parameters\n ----------\n column : string\n which column to plot\n filename : None|string\n output filename. If set to None, do not write to file. Default:\n None\n return_fig : bool\n if True, return the generated figure object. Default: False\n\n Returns\n -------\n fig : None|matplotlib.Figure\n if return_fig is set to True, return the generated Figure object" - }, - { - "code": "def make_menu(self):\n menu = wx.Menu()\n item = menu.Append(-1, \"Recent Searches\")\n item.Enable(False)\n for __id, txt in enumerate(self.search_history):\n menu.Append(__id, txt)\n return menu", - "docstring": "Creates the search menu" - }, - { - "code": "def clear(self):\n for field in self.__privfields__:\n delattr(self, field)\n setattr(self, field, MPI(0))", - "docstring": "delete and re-initialize all private components to zero" - }, - { - "code": "def view_set(method_name):\n def view_set(value, context, **_params):\n method = getattr(context[\"view\"], method_name)\n return _set(method, context[\"key\"], value, (), {})\n return view_set", - "docstring": "Creates a setter that will call the view method with the context's\n key as first parameter and the value as second parameter.\n @param method_name: the name of a method belonging to the view.\n @type method_name: str" - }, - { - "code": "def get_filter(cls, mimetype):\n filters = ' '.join(\n ['*%s' % ext for ext in mimetypes.guess_all_extensions(mimetype)])\n return '%s (%s)' % (mimetype, filters)", - "docstring": "Returns a filter string for the file dialog. The filter is based\n on the mime type.\n\n :param mimetype: path from which the filter must be derived.\n :return: Filter string" - }, - { - "code": "def cfset_to_set(cfset):\n count = cf.CFSetGetCount(cfset)\n buffer = (c_void_p * count)()\n cf.CFSetGetValues(cfset, byref(buffer))\n return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)])", - "docstring": "Convert CFSet to python set." - }, - { - "code": "def log_status (self, checked, in_progress, queue, duration, num_urls):\n msg = _n(\"%2d thread active\", \"%2d threads active\", in_progress) % \\\n in_progress\n self.write(u\"%s, \" % msg)\n msg = _n(\"%5d link queued\", \"%5d links queued\", queue) % queue\n self.write(u\"%s, \" % msg)\n msg = _n(\"%4d link\", \"%4d links\", checked) % checked\n self.write(u\"%s\" % msg)\n msg = _n(\"%3d URL\", \"%3d URLs\", num_urls) % num_urls\n self.write(u\" in %s checked, \" % msg)\n msg = _(\"runtime %s\") % strformat.strduration_long(duration)\n self.writeln(msg)\n self.flush()", - "docstring": "Write status message to file descriptor." - }, - { - "code": "def info(cls, name, message, *args):\n cls.getLogger(name).info(message, *args)", - "docstring": "Convenience function to log a message at the INFO level.\n\n :param name: The name of the logger instance in the VSG namespace (VSG.)\n :param message: A message format string.\n :param args: The arguments that are are merged into msg using the string formatting operator.\n :..note: The native logger's `kwargs` are not used in this function." - }, - { - "code": "def get_exposure(image, filters=None):\n def _get_exposure(histogram):\n total = sum(histogram)\n range_offset = len(histogram) / 4\n dark = float(sum(histogram[0:range_offset])) / total\n light = float(sum(histogram[-range_offset:])) / total\n return PictureExposure.UnderExposed if dark > 0.5 and light < 0.5 \\\n else PictureExposure.OverExposed if dark < 0.5 and light > 0.5 \\\n else PictureExposure.NormallyExposed\n FILTER_SETTINGS = {\n ColorComponentFilter.Red: ('RGB', 0, 256),\n ColorComponentFilter.Green: ('RGB', 256, 512),\n ColorComponentFilter.Blue: ('RGB', 512, 768),\n ColorComponentFilter.Grey: ('L', 0, 256)\n }\n exposures = collections.defaultdict(int)\n for exposure in [ _get_exposure(image.convert(mode).histogram()[start_index:end_index])\n for (mode, start_index, end_index) in [ FILTER_SETTINGS[filtr]\n for filtr in filters or [\n ColorComponentFilter.Red,\n ColorComponentFilter.Green,\n ColorComponentFilter.Blue,\n ColorComponentFilter.Grey\n ] ] ]:\n exposures[exposure] += 1\n return sorted(exposures.iterkeys(), key=lambda k: exposures[k], reverse=True)[0]", - "docstring": "Determine the exposure of a photo, which can be under-exposed,\n normally exposed or over-exposed.\n\n\n @param image: a Python Library Image (PIL) object to determine the\n exposure.\n\n @param filters: a list of ``ColorComponentFilter`` filter or ``None``\n to use all the filters.\n\n\n @return: an ``ExposureStatus`` instance that represents the exposure\n of the given PIL object." - }, - { - "code": "def bin_range_strings(bins, fmt=':g'):\n return [('{' + fmt + '}-{' + fmt + '}').format(i, j)\n for i, j in zip(bins, bins[1:])]", - "docstring": "Given a list of bins, make a list of strings of those bin ranges\n\n Parameters\n ----------\n bins : list_like\n List of anything, usually values of bin edges\n\n Returns\n -------\n bin_ranges : list\n List of bin ranges\n\n >>> bin_range_strings((0, 0.5, 1))\n ['0-0.5', '0.5-1']" - }, - { - "code": "def cancel_merge_when_pipeline_succeeds(self, **kwargs):\n path = ('%s/%s/cancel_merge_when_pipeline_succeeds' %\n (self.manager.path, self.get_id()))\n server_data = self.manager.gitlab.http_put(path, **kwargs)\n self._update_attrs(server_data)", - "docstring": "Cancel merge when the pipeline succeeds.\n\n Args:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabMROnBuildSuccessError: If the server could not handle the\n request" - }, - { - "code": "def has(self, key):\n if not self.options.enabled:\n return CACHE_DISABLED\n ret = key in self._dict.keys() and not self._dict[key].is_expired()\n logger.debug('has({}) == {}'.format(repr(key), ret))\n return ret", - "docstring": "See if a key is in the cache\n\n Returns CACHE_DISABLED if the cache is disabled\n\n :param key: key to search for" - }, - { - "code": "def get_variation_from_key(self, experiment_key, variation_key):\n variation_map = self.variation_key_map.get(experiment_key)\n if variation_map:\n variation = variation_map.get(variation_key)\n if variation:\n return variation\n else:\n self.logger.error('Variation key \"%s\" is not in datafile.' % variation_key)\n self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION_ERROR))\n return None\n self.logger.error('Experiment key \"%s\" is not in datafile.' % experiment_key)\n self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n return None", - "docstring": "Get variation given experiment and variation key.\n\n Args:\n experiment: Key representing parent experiment of variation.\n variation_key: Key representing the variation.\n\n Returns\n Object representing the variation." - }, - { - "code": "def face_adjacency_tree(self):\n segment_bounds = np.column_stack((\n self.vertices[self.face_adjacency_edges].min(axis=1),\n self.vertices[self.face_adjacency_edges].max(axis=1)))\n tree = util.bounds_tree(segment_bounds)\n return tree", - "docstring": "An R-tree of face adjacencies.\n\n Returns\n --------\n tree: rtree.index\n Where each edge in self.face_adjacency has a\n rectangular cell" - }, - { - "code": "def clear(self):\n all_keys = self._find_keys_raw(settings.THUMBNAIL_KEY_PREFIX)\n if all_keys:\n self._delete_raw(*all_keys)", - "docstring": "Brutely clears the key value store for keys with THUMBNAIL_KEY_PREFIX\n prefix. Use this in emergency situations. Normally you would probably\n want to use the ``cleanup`` method instead." - }, - { - "code": "def off(self, event, handler):\n event_hook = self.get_or_create(event)\n event_hook.unsubscribe(handler)\n return self", - "docstring": "Detaches the handler from the specified event.\n\n @param event: event to detach the handler to. Any object can be passed\n as event, but string is preferable. If qcore.EnumBase\n instance is passed, its name is used as event key.\n @param handler: event handler.\n @return: self, so calls like this can be chained together." - }, - { - "code": "def serialize(self, data, fmt='%10.7E'):\n gmf_set_nodes = []\n for gmf_set in data:\n gmf_set_node = Node('gmfSet')\n if gmf_set.investigation_time:\n gmf_set_node['investigationTime'] = str(\n gmf_set.investigation_time)\n gmf_set_node['stochasticEventSetId'] = str(\n gmf_set.stochastic_event_set_id)\n gmf_set_node.nodes = gen_gmfs(gmf_set)\n gmf_set_nodes.append(gmf_set_node)\n gmf_container = Node('gmfCollection')\n gmf_container[SM_TREE_PATH] = self.sm_lt_path\n gmf_container[GSIM_TREE_PATH] = self.gsim_lt_path\n gmf_container.nodes = gmf_set_nodes\n with open(self.dest, 'wb') as dest:\n nrml.write([gmf_container], dest, fmt)", - "docstring": "Serialize a collection of ground motion fields to XML.\n\n :param data:\n An iterable of \"GMF set\" objects.\n Each \"GMF set\" object should:\n\n * have an `investigation_time` attribute\n * have an `stochastic_event_set_id` attribute\n * be iterable, yielding a sequence of \"GMF\" objects\n\n Each \"GMF\" object should:\n\n * have an `imt` attribute\n * have an `sa_period` attribute (only if `imt` is 'SA')\n * have an `sa_damping` attribute (only if `imt` is 'SA')\n * have a `event_id` attribute (to indicate which rupture\n contributed to this gmf)\n * be iterable, yielding a sequence of \"GMF node\" objects\n\n Each \"GMF node\" object should have:\n\n * a `gmv` attribute (to indicate the ground motion value\n * `lon` and `lat` attributes (to indicate the geographical location\n of the ground motion field)" - }, - { - "code": "def _generate(self, size=None):\n \"Generates a new word\"\n corpus_letters = list(self.vectors.keys())\n current_letter = random.choice(corpus_letters)\n if size is None:\n size = int(random.normalvariate(self.avg, self.std_dev))\n letters = [current_letter]\n for _ in range(size):\n if current_letter not in corpus_letters:\n break\n found_letter = self.vectors[current_letter].choose()\n letters.append(found_letter)\n current_letter = found_letter\n return ''.join(letters)", - "docstring": "Generates a new word" - }, - { - "code": "def max_sparse_hyperplane_size(tree):\n if tree.is_leaf:\n return 0\n else:\n return max(\n tree.hyperplane.shape[1],\n max_sparse_hyperplane_size(tree.left_child),\n max_sparse_hyperplane_size(tree.right_child),\n )", - "docstring": "Determine the most number on non zeros in a hyperplane entry" - }, - { - "code": "def verify(path):\n valid = False\n try:\n h5 = h5py.File(path, mode=\"r\")\n except (OSError, IsADirectoryError):\n pass\n else:\n if (\"file_format\" in h5.attrs and\n h5.attrs[\"file_format\"].lower() == \"hyperspy\" and\n \"Experiments\" in h5):\n valid = True\n return valid", - "docstring": "Verify that `path` has the HyperSpy file format" - }, - { - "code": "def restart_in_venv(venv, base, site_packages, args):\n if base and not os.path.isabs(venv) and not venv.startswith('~'):\n base = os.path.expanduser(base)\n if os.path.isabs(base):\n venv = os.path.join(base, venv)\n if venv.startswith('~'):\n venv = os.path.expanduser(venv)\n if not os.path.exists(venv):\n try:\n import virtualenv\n except ImportError:\n print('The virtual environment does not exist: %s' % venv)\n print('and virtualenv is not installed, so a new environment cannot be created')\n sys.exit(3)\n print('Creating new virtualenv environment in %s' % venv)\n virtualenv.logger = logger\n logger.indent += 2\n virtualenv.create_environment(venv, site_packages=site_packages)\n if sys.platform == 'win32':\n python = os.path.join(venv, 'Scripts', 'python.exe')\n if not os.path.exists(python):\n python = os.path.join(venv, 'bin', 'python.exe')\n else:\n python = os.path.join(venv, 'bin', 'python')\n if not os.path.exists(python):\n python = venv\n if not os.path.exists(python):\n raise BadCommand('Cannot find virtual environment interpreter at %s' % python)\n base = os.path.dirname(os.path.dirname(python))\n file = os.path.join(os.path.dirname(__file__), 'runner.py')\n if file.endswith('.pyc'):\n file = file[:-1]\n proc = subprocess.Popen(\n [python, file] + args + [base, '___VENV_RESTART___'])\n proc.wait()\n sys.exit(proc.returncode)", - "docstring": "Restart this script using the interpreter in the given virtual environment" - }, - { - "code": "def silent(cmd, **kwargs):\n return call(cmd, shell=True, stdout=NULL, stderr=NULL, **kwargs)", - "docstring": "Calls the given shell command. Output will not be displayed. Returns the\n status code.\n\n **Examples**:\n ::\n auxly.shell.silent(\"ls\")" - }, - { - "code": "def delete_object(self, cont, obj):\n try:\n self.conn.delete_object(cont, obj)\n return True\n except Exception as exc:\n log.error('There was an error::')\n if hasattr(exc, 'code') and hasattr(exc, 'msg'):\n log.error(' Code: %s: %s', exc.code, exc.msg)\n log.error(' Content: \\n%s', getattr(exc, 'read', lambda: six.text_type(exc))())\n return False", - "docstring": "Delete a file from Swift" - }, - { - "code": "def reset(self):\n self.output_file.remove()\n self.log_file.remove()\n self.stderr_file.remove()\n self.start_lockfile.remove()\n self.qerr_file.remove()\n self.qout_file.remove()\n if self.mpiabort_file.exists:\n self.mpiabort_file.remove()\n self.set_status(self.S_INIT, msg=\"Reset on %s\" % time.asctime())\n self.num_restarts = 0\n self.set_qjob(None)\n self.work.finalized = False\n self.flow.finalized = False\n return 0", - "docstring": "Reset the task status. Mainly used if we made a silly mistake in the initial\n setup of the queue manager and we want to fix it and rerun the task.\n\n Returns:\n 0 on success, 1 if reset failed." - }, - { - "code": "def parse_response(fields, records):\n data = [i['values']['data'] for i in records]\n return [\n {fields[idx]: row for idx, row in enumerate(d)}\n for d in data\n ]", - "docstring": "Parse an API response into usable objects.\n\n Args:\n fields (list[str]): List of strings indicating the fields that\n are represented in the records, in the order presented in\n the records.::\n\n [\n 'number1',\n 'number2',\n 'number3',\n 'first_name',\n 'last_name',\n 'company',\n 'street',\n 'city',\n 'state',\n 'zip',\n ]\n\n records (list[dict]): A really crappy data structure representing\n records as returned by Five9::\n\n [\n {\n 'values': {\n 'data': [\n '8881234567',\n None,\n None,\n 'Dave',\n 'Lasley',\n 'LasLabs Inc',\n None,\n 'Las Vegas',\n 'NV',\n '89123',\n ]\n }\n }\n ]\n\n Returns:\n list[dict]: List of parsed records." - }, - { - "code": "def connect(self, *args):\n self.log(\"Connect \", args, lvl=verbose)\n try:\n sock = args[0]\n ip = args[1]\n if sock not in self._sockets:\n self.log(\"New client connected:\", ip, lvl=debug)\n clientuuid = str(uuid4())\n self._sockets[sock] = Socket(ip, clientuuid)\n self._clients[clientuuid] = Client(\n sock=sock,\n ip=ip,\n clientuuid=clientuuid,\n )\n self.log(\"Client connected:\", clientuuid, lvl=debug)\n else:\n self.log(\"Old IP reconnected!\", lvl=warn)\n except Exception as e:\n self.log(\"Error during connect: \", e, type(e), lvl=critical)", - "docstring": "Registers new sockets and their clients and allocates uuids" - }, - { - "code": "def rnumlistwithoutreplacement(min, max):\n if checkquota() < 1:\n raise Exception(\"Your www.random.org quota has already run out.\")\n requestparam = build_request_parameterNR(min, max)\n request = urllib.request.Request(requestparam)\n request.add_header('User-Agent', 'randomwrapy/0.1 very alpha')\n opener = urllib.request.build_opener()\n numlist = opener.open(request).read()\n return numlist.split()", - "docstring": "Returns a randomly ordered list of the integers between min and max" - }, - { - "code": "def stats():\n import re\n import sys\n import math\n values = []\n for line in sys.stdin:\n values.extend(map(float, re.findall(r'\\d+\\.?\\d+', line)))\n mean = sum(values) / len(values)\n variance = sum((val - mean) ** 2 for val in values) / len(values)\n print '%3i items; mean: %10.5f; std-dev: %10.5f' % (\n len(values), mean, math.sqrt(variance))", - "docstring": "Read a stream of floats and give summary statistics" - }, - { - "code": "def generate_json_docs(module, pretty_print=False, user=None):\n indent = None\n separators = (',', ':')\n if pretty_print:\n indent = 4\n separators = (',', ': ')\n module_doc_dict = generate_doc_dict(module, user)\n json_str = json.dumps(module_doc_dict,\n indent=indent,\n separators=separators)\n return json_str", - "docstring": "Return a JSON string format of a Pale module's documentation.\n\n This string can either be printed out, written to a file, or piped to some\n other tool.\n\n This method is a shorthand for calling `generate_doc_dict` and passing\n it into a json serializer.\n\n The user argument is optional. If included, it expects the user to be an object with an \"is_admin\"\n boolean attribute. Any endpoint protected with a \"@requires_permission\" decorator will require\n user.is_admin == True to display documentation on that endpoint." - }, - { - "code": "def _pypsa_bus_timeseries(network, buses, timesteps):\n slack_bus = '_'.join(\n ['Bus', network.mv_grid.station.__repr__(side='mv')])\n v_set_dict = {bus: 1 for bus in buses if bus != slack_bus}\n control_deviation = network.config[\n 'grid_expansion_allowed_voltage_deviations'][\n 'hv_mv_trafo_control_deviation']\n if control_deviation != 0:\n control_deviation_ts = \\\n network.timeseries.timesteps_load_feedin_case.case.apply(\n lambda _: control_deviation if _ == 'feedin_case'\n else -control_deviation)\n else:\n control_deviation_ts = 0\n slack_voltage_pu = control_deviation_ts + 1 + \\\n network.config[\n 'grid_expansion_allowed_voltage_deviations'][\n 'hv_mv_trafo_offset']\n v_set_dict.update({slack_bus: slack_voltage_pu})\n v_set_df = pd.DataFrame(v_set_dict, index=timesteps)\n return v_set_df", - "docstring": "Time series in PyPSA compatible format for bus instances\n\n Set all buses except for the slack bus to voltage of 1 pu (it is assumed\n this setting is entirely ignored during solving the power flow problem).\n This slack bus is set to an operational voltage which is typically greater\n than nominal voltage plus a control deviation.\n The control deviation is always added positively to the operational voltage.\n For example, the operational voltage (offset) is set to 1.025 pu plus the\n control deviation of 0.015 pu. This adds up to a set voltage of the slack\n bus of 1.04 pu.\n\n .. warning::\n\n Voltage settings for the slack bus defined by this function assume the\n feedin case (reverse power flow case) as the worst-case for the power\n system. Thus, the set point for the slack is always greater 1.\n\n\n Parameters\n ----------\n network : Network\n The eDisGo grid topology model overall container\n timesteps : array_like\n Timesteps is an array-like object with entries of type\n :pandas:`pandas.Timestamp` specifying which time steps\n to export to pypsa representation and use in power flow analysis.\n buses : list\n Buses names\n\n Returns\n -------\n :pandas:`pandas.DataFrame`\n Time series table in PyPSA format" - }, - { - "code": "def save_map(self, map_path, map_data):\n return self._client.send(save_map=sc_pb.RequestSaveMap(\n map_path=map_path, map_data=map_data))", - "docstring": "Save a map into temp dir so create game can access it in multiplayer." - }, - { - "code": "def get_requirement_files(args=None):\n if args and args.input_filename:\n return [args.input_filename]\n paths = []\n for regex in settings.REQUIREMENTS_SOURCE_GLOBS:\n paths.extend(glob.glob(regex))\n return paths", - "docstring": "Get the \"best\" requirements file we can find" - }, - { - "code": "def avail_sizes(call=None):\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The avail_sizes function must be called with '\n '-f or --function, or with the --list-sizes option'\n )\n rcode, items = query(command='my/packages')\n if rcode not in VALID_RESPONSE_CODES:\n return {}\n return key_list(items=items)", - "docstring": "get list of available packages\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud --list-sizes" - }, - { - "code": "def _check_status(sdp_state):\n try:\n errval = \"error\"\n errdict = dict(state=\"unknown\", reason=\"unknown\")\n if sdp_state.current_state == \"unknown\":\n errdict['reason'] = 'database not initialised.'\n LOG.debug('Current state is unknown;')\n LOG.debug('Target state is %s;', sdp_state.target_state)\n LOG.debug('Current state timestamp is %s;',\n sdp_state.current_timestamp)\n elif sdp_state.current_state is None:\n errdict['reason'] = 'Master Controller Services may have died.'\n LOG.debug('Current state is NONE;')\n LOG.debug('Target state is %s;', sdp_state.target_state)\n LOG.debug('Current state timestamp is %s;',\n sdp_state.current_timestamp)\n elif sdp_state.target_state is None:\n errdict['reason'] = 'Master Controller Services may have died.'\n LOG.debug('Current state is %s;',\n sdp_state.current_state)\n LOG.debug('Target state is NONE;')\n LOG.debug('Current state timestamp is %s;',\n sdp_state.current_timestamp)\n LOG.debug('Target state timestamp is %s;',\n sdp_state.target_timestamp)\n elif sdp_state.current_timestamp is None:\n errdict['reason'] = 'Master Controller Services may have died.'\n LOG.debug('Current state is %s;',\n sdp_state.current_state)\n LOG.debug('Target state is %s;', sdp_state.target_state)\n LOG.debug('Current state timestamp is NONE')\n LOG.debug('Target state timestamp is %s;',\n sdp_state.target_timestamp)\n elif sdp_state.target_timestamp is None:\n errdict['reason'] = 'Master Controller Services may have died.'\n LOG.debug('Current state is %s;',\n sdp_state.current_state)\n LOG.debug('Target state is %s;', sdp_state.target_state)\n LOG.debug('Current state timestamp is %s;',\n sdp_state.current_timestamp)\n LOG.debug('Target state timestamp is NONE')\n elif sdp_state.current_timestamp < sdp_state.target_timestamp:\n errdict['reason'] = \\\n 'Timestamp for Master Controller Services is stale.'\n LOG.debug('Current state is %s;',\n sdp_state.current_state)\n LOG.debug('Target state is %s;', sdp_state.target_state)\n LOG.debug('Current state timestamp is %s;',\n sdp_state.current_timestamp)\n LOG.debug('Target state timestamp is %s;',\n sdp_state.target_timestamp)\n else:\n errval = \"okay\"\n except ConnectionError as err:\n errdict['reason'] = err\n LOG.debug('Connection Error %s', err)\n return errval, errdict", - "docstring": "SDP Status check.\n\n Do all the tests to determine, if the SDP state is\n \"broken\", what could be the cause, and return a\n suitable status message to be sent back by the calling\n function." - }, - { - "code": "def pearson_correlation_coefficient(predictions, labels, weights_fn=None):\n del weights_fn\n _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions,\n labels)\n return pearson, tf.constant(1.0)", - "docstring": "Calculate pearson correlation coefficient.\n\n Args:\n predictions: The raw predictions.\n labels: The actual labels.\n weights_fn: Weighting function.\n\n Returns:\n The pearson correlation coefficient." - }, - { - "code": "def url_to_image(url, flag=cv2.IMREAD_COLOR):\n resp = urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, flag)\n return image", - "docstring": "download the image, convert it to a NumPy array, and then read\n it into OpenCV format" - }, - { - "code": "def traverse_until_fixpoint(predicate, tree):\n old_tree = None\n tree = simplify(tree)\n while tree and old_tree != tree:\n old_tree = tree\n tree = tree.traverse(predicate)\n if not tree:\n return None\n tree = simplify(tree)\n return tree", - "docstring": "Traverses the tree again and again until it is not modified." - }, - { - "code": "def isFullPreferenceOrder(self, candList):\n for cand1 in candList: \n if cand1 not in self.wmgMap.keys():\n return False\n for cand2 in candList:\n if cand1 == cand2:\n continue\n if cand2 not in self.wmgMap[cand1].keys():\n return False\n return True", - "docstring": "Returns True if the underlying weighted majority graph contains a comparision between every\n pair of candidate and returns False otherwise.\n\n :ivar list candList: Contains integer representations of each candidate." - }, - { - "code": "def tasktiger_processor(logger, method_name, event_dict):\n if g['current_tasks'] is not None and not g['current_task_is_batch']:\n event_dict['task_id'] = g['current_tasks'][0].id\n return event_dict", - "docstring": "TaskTiger structlog processor.\n\n Inject the current task id for non-batch tasks." - }, - { - "code": "def set_help(self):\r\n\t\tif not (self.field.help_text and self.attrs.get(\"_help\")):\r\n\t\t\treturn\r\n\t\tself.values[\"help\"] = HELP_TEMPLATE.format(self.field.help_text)", - "docstring": "Set help text markup." - }, - { - "code": "def percolating_continua(target, phi_crit, tau,\n volume_fraction='pore.volume_fraction',\n bulk_property='pore.intrinsic_conductivity'):\n r\n sigma = target[bulk_property]\n phi = target[volume_fraction]\n diff_phi = _sp.clip(phi - phi_crit, a_min=0, a_max=_sp.inf)\n sigma_eff = sigma*(diff_phi)**tau\n return sigma_eff", - "docstring": "r'''\n Calculates the effective property of a continua using percolation theory\n\n Parameters\n ----------\n target : OpenPNM Object\n The object for which these values are being calculated. This\n controls the length of the calculated array, and also provides\n access to other necessary thermofluid properties.\n\n volume_fraction : string\n The dictionary key in the Phase object containing the volume fraction\n of the conducting component\n\n bulk_property : string\n The dictionary key in the Phase object containing the intrinsic\n property of the conducting component\n\n phi_crit : float\n The volume fraction below which percolation does NOT occur\n\n tau : float\n The exponent of the percolation relationship\n\n Notes\n -----\n This model uses the following standard percolation relationship:\n\n .. math::\n\n \\sigma_{effective}=\\sigma_{bulk}(\\phi - \\phi_{critical})^\\lambda" - }, - { - "code": "def get(self, mac):\n data = {\n self._FORMAT_F: 'json',\n self._SEARCH_F: mac\n }\n response = self.__decode_str(self.__call_api(self.__url, data), 'utf-8')\n if len(response) > 0:\n return self.__parse(response)\n raise EmptyResponseException()", - "docstring": "Get data from API as instance of ResponseModel.\n\n Keyword arguments:\n mac -- MAC address or OUI for searching" - }, - { - "code": "def _draw_text(self, pos, text, font, **kw):\n self.drawables.append((pos, text, font, kw))", - "docstring": "Remember a single drawable tuple to paint later." - }, - { - "code": "def _rgevolve_leadinglog(self, scale_out):\n self._check_initial()\n return rge.smeft_evolve_leadinglog(C_in=self.C_in,\n scale_in=self.scale_in,\n scale_out=scale_out)", - "docstring": "Compute the leading logarithmic approximation to the solution\n of the SMEFT RGEs from the initial scale to `scale_out`.\n Returns a dictionary with parameters and Wilson coefficients.\n Much faster but less precise that `rgevolve`." - }, - { - "code": "def modify_instance_attribute(self, instance_id, attribute, value):\n if attribute == 'disableApiTermination':\n if isinstance(value, bool):\n if value:\n value = 'true'\n else:\n value = 'false'\n params = {'InstanceId' : instance_id,\n 'Attribute' : attribute,\n 'Value' : value}\n return self.get_status('ModifyInstanceAttribute', params, verb='POST')", - "docstring": "Changes an attribute of an instance\n\n :type instance_id: string\n :param instance_id: The instance id you wish to change\n\n :type attribute: string\n :param attribute: The attribute you wish to change.\n\n * AttributeName - Expected value (default)\n * instanceType - A valid instance type (m1.small)\n * kernel - Kernel ID (None)\n * ramdisk - Ramdisk ID (None)\n * userData - Base64 encoded String (None)\n * disableApiTermination - Boolean (true)\n * instanceInitiatedShutdownBehavior - stop|terminate\n * rootDeviceName - device name (None)\n\n :type value: string\n :param value: The new value for the attribute\n\n :rtype: bool\n :return: Whether the operation succeeded or not" - }, - { - "code": "def device_status(self):\n return {\n 'active': self.device['active'],\n 'offline': self.device['offline'],\n 'last_update': self.last_update,\n 'battery_level': self.battery_level,\n }", - "docstring": "Status of device." - }, - { - "code": "def get_all_tags(self, filters=None):\n params = {}\n if filters:\n self.build_filter_params(params, filters)\n return self.get_list('DescribeTags', params,\n [('item', Tag)], verb='POST')", - "docstring": "Retrieve all the metadata tags associated with your account.\n\n :type filters: dict\n :param filters: Optional filters that can be used to limit\n the results returned. Filters are provided\n in the form of a dictionary consisting of\n filter names as the key and filter values\n as the value. The set of allowable filter\n names/values is dependent on the request\n being performed. Check the EC2 API guide\n for details.\n\n :rtype: dict\n :return: A dictionary containing metadata tags" - }, - { - "code": "def _get_initial_residual(self, x0):\n if x0 is None:\n Mlr = self.linear_system.Mlb\n else:\n r = self.linear_system.b - self.linear_system.A*x0\n Mlr = self.linear_system.Ml*r\n PMlr, self.UMlr = self.projection.apply_complement(Mlr, return_Ya=True)\n MPMlr = self.linear_system.M*PMlr\n MPMlr_norm = utils.norm(PMlr, MPMlr, ip_B=self.linear_system.ip_B)\n return MPMlr, PMlr, MPMlr_norm", - "docstring": "Return the projected initial residual.\n\n Returns :math:`MPM_l(b-Ax_0)`." - }, - { - "code": "def _read_pidfile(self):\n if self.pidfile is None:\n return None\n if not os.path.isfile(self.pidfile):\n return None\n with open(self.pidfile, 'r') as fp:\n try:\n pid = int(fp.read())\n except ValueError:\n self._emit_warning('Empty or broken pidfile {pidfile}; '\n 'removing'.format(pidfile=self.pidfile))\n pid = None\n if pid is not None and psutil.pid_exists(pid):\n return pid\n else:\n os.remove(self.pidfile)\n return None", - "docstring": "Read the PID file and check to make sure it's not stale." - }, - { - "code": "def reassign_label(cls, destination_cluster, label):\n conn = Qubole.agent(version=Cluster.api_version)\n data = {\n \"destination_cluster\": destination_cluster,\n \"label\": label\n }\n return conn.put(cls.rest_entity_path + \"/reassign-label\", data)", - "docstring": "Reassign a label from one cluster to another.\n\n Args:\n `destination_cluster`: id/label of the cluster to move the label to\n\n `label`: label to be moved from the source cluster" - }, - { - "code": "def _afterpoint(string):\n if _isnumber(string):\n if _isint(string):\n return -1\n else:\n pos = string.rfind(\".\")\n pos = string.lower().rfind(\"e\") if pos < 0 else pos\n if pos >= 0:\n return len(string) - pos - 1\n else:\n return -1\n else:\n return -1", - "docstring": "Symbols after a decimal point, -1 if the string lacks the decimal point.\n\n >>> _afterpoint(\"123.45\")\n 2\n >>> _afterpoint(\"1001\")\n -1\n >>> _afterpoint(\"eggs\")\n -1\n >>> _afterpoint(\"123e45\")\n 2" - }, - { - "code": "def wait_for(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n timeout = kwargs.pop('timeout', TIMEOUT)\n start = None\n while True:\n try:\n return func(*args, **kwargs)\n except AssertionError:\n if not start:\n start = time()\n if time() - start < timeout:\n sleep(CHECK_EVERY)\n continue\n else:\n raise\n return wrapped", - "docstring": "A decorator to invoke a function, retrying on assertion errors for a\n specified time interval.\n\n Adds a kwarg `timeout` to `func` which is a number of seconds to try\n for (default 15)." - }, - { - "code": "def _process_outgoing_msg(self, sink_iter):\n LOG.debug('NetworkController processing outgoing request list.')\n from ryu.services.protocols.bgp.model import (\n FlexinetOutgoingRoute)\n while self.is_connected:\n for outgoing_msg in sink_iter:\n if not self.is_connected:\n self._socket.close()\n return\n if isinstance(outgoing_msg, FlexinetOutgoingRoute):\n rpc_msg = _create_prefix_notification(outgoing_msg, self)\n else:\n raise NotImplementedError(\n 'Do not handle out going message of type %s' %\n outgoing_msg.__class__)\n if rpc_msg:\n self._sendall(rpc_msg)\n self.pause(0)\n if self.green_in:\n self.green_in.kill()", - "docstring": "For every message we construct a corresponding RPC message to be\n sent over the given socket inside given RPC session.\n\n This function should be launched in a new green thread as\n it loops forever." - }, - { - "code": "def get_color(self):\n self.get_status()\n try:\n self.color = self.data['color']\n self.mode = self.data['mode']\n except TypeError:\n self.color = 0\n self.mode = ''\n return {'color': self.color, 'mode': self.mode}", - "docstring": "Get current color." - }, - { - "code": "def from_json(cls, s):\n d = json.loads(s)\n return get_dict_handler(d[\"type\"])(d)", - "docstring": "Restores the object from the given JSON.\n\n :param s: the JSON string to parse\n :type s: str\n :return: the" - }, - { - "code": "def to_esri_wgs_json(self):\n try:\n return ('{ \"xmin\" : %s, '\n '\"ymin\" : %s, '\n '\"xmax\" : %s, '\n '\"ymax\" : %s, '\n '\"spatialReference\" : {\"wkid\" : %d} }'\n % (self.left,\n self.bottom,\n self.right,\n self.top,\n self.wkid))\n except ValueError:\n raise Exception('One or more values could not be cast to a number. '\n 'Four bounding points must be real numbers. '\n 'WKID must be an integer.')", - "docstring": "Convert Viewbox object to a JSON string that can be used\n by the ESRI World Geocoding Service as a parameter." - }, - { - "code": "def build(self, input_path, output_paths):\n for output in output_paths:\n shutil.copy(input_path, output_paths)", - "docstring": "Should be extended by subclasses to actually do stuff. By default\n this will copy `input` over every file in the `outputs` list." - }, - { - "code": "def all_files(self):\n if self.files_command and not self.files:\n cmd = self.files_command\n files = shell_out(cmd, timeout=0, chroot=self.chroot)\n self.files = files.splitlines()\n return self.files", - "docstring": "Returns a list of files known by the package manager" - }, - { - "code": "def list_function_versions(FunctionName,\n region=None, key=None, keyid=None, profile=None):\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n vers = []\n for ret in __utils__['boto3.paged_call'](conn.list_versions_by_function,\n FunctionName=FunctionName):\n vers.extend(ret['Versions'])\n if not bool(vers):\n log.warning('No versions found')\n return {'Versions': vers}\n except ClientError as e:\n return {'error': __utils__['boto3.get_error'](e)}", - "docstring": "List the versions available for the given function.\n\n Returns list of function versions\n\n CLI Example:\n\n .. code-block:: yaml\n\n versions:\n - {...}\n - {...}" - }, - { - "code": "def get_DataFrame(self, data=False, with_unit=True, with_population=True):\n for key in self.__dict__:\n if (key is 'unit') and not with_unit:\n continue\n if (key is 'population') and not with_population:\n continue\n if type(self.__dict__[key]) is pd.DataFrame:\n if data:\n yield getattr(self, key)\n else:\n yield key", - "docstring": "Yields all panda.DataFrames or there names\n\n Notes\n -----\n For IOSystem this does not include the DataFrames in the extensions.\n\n Parameters\n ----------\n data : boolean, optional\n If True, returns a generator which yields the DataFrames.\n If False, returns a generator which\n yields only the names of the DataFrames\n\n with_unit: boolean, optional\n If True, includes the 'unit' DataFrame\n If False, does not include the 'unit' DataFrame.\n The method than only yields the numerical data tables\n\n with_population: boolean, optional\n If True, includes the 'population' vector\n If False, does not include the 'population' vector.\n\n Returns\n -------\n DataFrames or string generator, depending on parameter data" - }, - { - "code": "def read(self, file_or_path):\n if file_or_path in self._cached_templates:\n return self._cached_templates[file_or_path]\n if is_filelike(file_or_path):\n template = file_or_path.read()\n dirname = None\n else:\n with open(file_or_path, 'r') as f:\n template = f.read()\n dirname = os.path.dirname(file_or_path)\n template = self._engine(template,\n dirname=dirname,\n tolerant=self._tolerant)\n self._cached_templates[file_or_path] = template\n return template", - "docstring": "Read template from cache or file." - }, - { - "code": "def get(self, thread_uuid, uuid):\n members = (v for v in self.list(thread_uuid) if v.get('userUuid') == uuid)\n for i in members:\n self.log.debug(i)\n return i\n return None", - "docstring": "Get one thread member." - }, - { - "code": "def cliques(self, xg):\n g = nx.DiGraph()\n for (x,y) in self.merged_ontology.get_graph().edges():\n g.add_edge(x,y)\n for (x,y) in xg.edges():\n g.add_edge(x,y)\n g.add_edge(y,x)\n return list(strongly_connected_components(g))", - "docstring": "Return all equivalence set cliques, assuming each edge in the xref graph is treated as equivalent,\n and all edges in ontology are subClassOf\n\n Arguments\n ---------\n xg : Graph\n an xref graph\n\n Returns\n -------\n list of sets" - }, - { - "code": "def DeleteInstance(self, context, ports):\n resource_details = self._parse_remote_model(context)\n res = self.command_wrapper.execute_command_with_connection(\n context,\n self.destroy_virtual_machine_command.DeleteInstance,\n resource_details.vm_uuid,\n resource_details.fullname)\n return set_command_result(result=res, unpicklable=False)", - "docstring": "Destroy Vm Command, will only destroy the vm and will not remove the resource\n\n :param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on\n :param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!" - }, - { - "code": "def ensure_iam(self, publisher=None):\n topic = self.get_topic_param()\n client = self.session.client('pubsub', 'v1', 'projects.topics')\n policy = client.execute_command('getIamPolicy', {'resource': topic})\n policy.pop('etag')\n found = False\n for binding in policy.get('bindings', {}):\n if binding['role'] != 'roles/pubsub.publisher':\n continue\n if publisher in binding['members']:\n return\n found = binding\n if not found:\n policy.setdefault(\n 'bindings', {'members': [publisher], 'role': 'roles/pubsub.publisher'})\n else:\n found['members'].append(publisher)\n client.execute_command('setIamPolicy', {'resource': topic, 'body': {'policy': policy}})", - "docstring": "Ensure the given identities are in the iam role bindings for the topic." - }, - { - "code": "async def serve(self) -> None:\n LOGGER.debug('RevRegBuilder.serve >>>')\n assert self.external\n file_pid = join(self._dir_tails_sentinel, '.pid')\n if isfile(file_pid):\n with open(file_pid, 'r') as fh_pid:\n pid = int(fh_pid.read())\n try:\n kill(pid, 0)\n except ProcessLookupError:\n remove(file_pid)\n LOGGER.info('RevRegBuilder removed derelict .pid file')\n except PermissionError:\n LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)\n LOGGER.debug('RevRegBuilder.serve <<<')\n return\n else:\n LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid)\n LOGGER.debug('RevRegBuilder.serve <<<')\n return\n pid = getpid()\n with open(file_pid, 'w') as pid_fh:\n print(str(pid), file=pid_fh)\n file_stop = join(self._dir_tails_sentinel, '.stop')\n while True:\n if isfile(file_stop):\n remove(file_stop)\n remove(file_pid)\n break\n p_pending = [join(self._dir_tails_sentinel, d) for d in listdir(self._dir_tails_sentinel)\n if isdir(join(self._dir_tails_sentinel, d))]\n p_pending = [p for p in p_pending if [s for s in listdir(p) if s.startswith('.')]]\n if p_pending:\n pdir = basename(p_pending[0])\n rr_id = pdir\n rr_size = int([s for s in listdir(p_pending[0]) if s.startswith('.')][0][1:])\n open(join(p_pending[0], '.in-progress'), 'w').close()\n await self.create_rev_reg(rr_id, rr_size or None)\n rmtree(p_pending[0])\n await asyncio.sleep(1)\n LOGGER.debug('RevRegBuilder.serve <<<')", - "docstring": "Write pidfile to sentinel directory if need be, and wait for sentinels\n to shut down or build revocation registry and tails file." - }, - { - "code": "def verify_email(self, action_token, signed_data):\n try:\n action = \"verify-email\"\n user = get_user_by_action_token(action, action_token)\n if not user or not user.signed_data_match(signed_data, action):\n raise mocha_exc.AppError(\"Verification Invalid!\")\n else:\n user.set_email_verified(True)\n flash_success(\"Account verified. You can now login\")\n username = user.username\n if user.login_method == \"email\":\n username = user.email\n return redirect(self.login, username=username)\n except Exception as e:\n logging.exception(e)\n flash_error(\"Verification Failed!\")\n return redirect(self.login)", - "docstring": "Verify email account, in which a link was sent to" - }, - { - "code": "def CompleteBreakpoint(self, breakpoint_id):\n with self._lock:\n self._completed.add(breakpoint_id)\n if breakpoint_id in self._active:\n self._active.pop(breakpoint_id).Clear()", - "docstring": "Marks the specified breaking as completed.\n\n Appends the ID to set of completed breakpoints and clears it.\n\n Args:\n breakpoint_id: breakpoint ID to complete." - }, - { - "code": "def create_nation_fixtures(self):\n SHP_SLUG = \"cb_{}_us_state_500k\".format(self.YEAR)\n DOWNLOAD_PATH = os.path.join(self.DOWNLOAD_DIRECTORY, SHP_SLUG)\n shape = shapefile.Reader(\n os.path.join(DOWNLOAD_PATH, \"{}.shp\".format(SHP_SLUG))\n )\n fields = shape.fields[1:]\n field_names = [f[0] for f in fields]\n features = []\n for shp in shape.shapeRecords():\n state = dict(zip(field_names, shp.record))\n geodata = {\n \"type\": \"Feature\",\n \"geometry\": shp.shape.__geo_interface__,\n \"properties\": {\n \"state\": state[\"STATEFP\"],\n \"name\": state[\"NAME\"],\n },\n }\n features.append(geodata)\n Geometry.objects.update_or_create(\n division=self.NATION,\n subdivision_level=self.STATE_LEVEL,\n simplification=self.THRESHOLDS[\"nation\"],\n source=os.path.join(\n self.SHP_SOURCE_BASE.format(self.YEAR), SHP_SLUG\n )\n + \".zip\",\n series=self.YEAR,\n defaults={\n \"topojson\": self.toposimplify(\n geojson.FeatureCollection(features),\n self.THRESHOLDS[\"nation\"],\n )\n },\n )\n geo, created = Geometry.objects.update_or_create(\n division=self.NATION,\n subdivision_level=self.COUNTY_LEVEL,\n simplification=self.THRESHOLDS[\"nation\"],\n source=os.path.join(\n self.SHP_SOURCE_BASE.format(self.YEAR), SHP_SLUG\n )\n + \".zip\",\n series=self.YEAR,\n defaults={\"topojson\": self.get_state_county_shps(\"00\")},\n )\n tqdm.write(\"Nation\\n\")\n tqdm.write(\n self.TQDM_PREFIX\n + \"> FIPS {} @ ~{}kb \".format(\n \"00\", round(len(json.dumps(geo.topojson)) / 1000)\n )\n )\n tqdm.write(self.style.SUCCESS(\"Done.\\n\"))", - "docstring": "Create national US and State Map" - }, - { - "code": "def CreateAllStaticECMWFRAPIDFiles(in_drainage_line,\n river_id,\n length_id,\n slope_id,\n next_down_id,\n in_catchment,\n catchment_river_id,\n rapid_output_folder,\n kfac_celerity=1000.0/3600.0,\n kfac_formula_type=3,\n kfac_length_units=\"km\",\n lambda_k=0.35,\n x_value=0.3,\n nhdplus=False,\n taudem_network_connectivity_tree_file=None,\n file_geodatabase=None):\n CreateAllStaticRAPIDFiles(in_drainage_line,\n river_id,\n length_id,\n slope_id,\n next_down_id,\n rapid_output_folder,\n kfac_celerity,\n kfac_formula_type,\n kfac_length_units,\n lambda_k,\n x_value,\n nhdplus,\n taudem_network_connectivity_tree_file,\n file_geodatabase)\n rapid_connect_file = os.path.join(rapid_output_folder, 'rapid_connect.csv')\n CreateAllStaticECMWFFiles(in_catchment,\n catchment_river_id,\n rapid_output_folder,\n rapid_connect_file,\n file_geodatabase)", - "docstring": "This creates all of the static RAPID files and ECMWF grid weight tables.\n\n Parameters\n ----------\n in_drainage_line: str\n Path to the stream network (i.e. Drainage Line) shapefile.\n river_id: str\n The name of the field with the river ID\n (Ex. 'HydroID', 'COMID', or 'LINKNO').\n length_id: str\n The field name containging the length of the river segment\n (Ex. 'LENGTHKM' or 'Length').\n slope_id: str\n The field name containging the slope of the river segment\n (Ex. 'Avg_Slope' or 'Slope').\n next_down_id: str\n The name of the field with the river ID of the next downstream\n river segment (Ex. 'NextDownID' or 'DSLINKNO').\n in_catchment: str\n Path to the Catchment shapefile.\n catchment_river_id: str\n The name of the field with the river ID (Ex. 'DrainLnID' or 'LINKNO').\n rapid_output_folder: str\n The path to the folder where all of the RAPID output will be generated.\n kfac_celerity: float, optional\n The flow wave celerity for the watershed in meters per second.\n 1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown.\n kfac_formula_type: int, optional\n An integer representing the formula type to use when calculating kfac.\n Default is 3.\n kfac_length_units: str, optional\n The units for the length_id field. Supported types are \"m\" for meters\n and \"km\" for kilometers. Default is \"km\".\n lambda_k: float, optional\n The value for lambda given from RAPID after the calibration process.\n Default is 0.35.\n x_value: float, optional\n Value for the muskingum X parameter [0-0.5].Default is 0.3.\n nhdplus: bool, optional\n If True, the drainage line is from the NHDPlus dataset with the\n VAA fields COMID, FROMNODE, TONODE, and DIVERGENCE. Default is False.\n taudem_network_connectivity_tree_file: str, optional\n If set, the connectivity file will be generated from the\n TauDEM connectivity tree file.\n file_geodatabase: str, optional\n Path to the file geodatabase. If you use this option,\n in_drainage_line is the name of the stream network feature class\n (WARNING: Not always stable with GDAL).\n\n\n Example::\n\n from RAPIDpy.gis.workflow import CreateAllStaticECMWFRAPIDFiles\n\n CreateAllStaticECMWFRAPIDFiles(\n in_drainage_line=\"/path/to/drainage_line.shp\",\n river_id=\"HydroID\",\n length_id=\"LENGTHKM\",\n slope_id=\"SLOPE\",\n next_down_id=\"NextDownID\",\n in_catchment=\"/path/to/catchment.shp\",\n catchment_river_id=\"DrainLnID\",\n rapid_output_folder=\"/path/to/rapid/output\",\n )" - }, - { - "code": "def pvt(bars):\n trend = ((bars['close'] - bars['close'].shift(1)) /\n bars['close'].shift(1)) * bars['volume']\n return trend.cumsum()", - "docstring": "Price Volume Trend" - }, - { - "code": "def path_yield(path):\n for part in (x for x in path.strip(SEP).split(SEP) if x not in (None, '')):\n yield part", - "docstring": "Yield on all path parts." - }, - { - "code": "def to_representation(self, instance):\n if self.id_only():\n return instance.pk\n pk = getattr(instance, 'pk', None)\n if not settings.ENABLE_SERIALIZER_OBJECT_CACHE or pk is None:\n return self._to_representation(instance)\n else:\n if pk not in self.obj_cache:\n self.obj_cache[pk] = self._to_representation(instance)\n return self.obj_cache[pk]", - "docstring": "Modified to_representation method. Optionally may cache objects.\n\n Arguments:\n instance: A model instance or data object.\n Returns:\n Instance ID if the serializer is meant to represent its ID.\n Otherwise, a tagged data dict representation." - }, - { - "code": "def _writeBlock(block, blockID):\n with open(\"blockIDs.txt\", \"a\") as fp:\n fp.write(\"blockID: \" + str(blockID) + \"\\n\")\n sentences = \"\"\n for sentence in block:\n sentences += sentence+\",\"\n fp.write(\"block sentences: \"+sentences[:-1]+\"\\n\")\n fp.write(\"\\n\")", - "docstring": "writes the block to a file with the id" - }, - { - "code": "def autofocus_stack(fieldstack, nm, res, ival, roi=None,\n metric=\"average gradient\", padding=True,\n same_dist=False, ret_ds=False, ret_grads=False,\n num_cpus=_cpu_count, copy=True):\n dopt = list()\n grad = list()\n M = fieldstack.shape[0]\n stackargs = list()\n for s in range(M):\n stackargs.append([fieldstack[s].copy(copy), nm, res, ival,\n roi, metric, padding, True, True, 1])\n p = mp.Pool(num_cpus)\n result = p.map_async(_autofocus_wrapper, stackargs).get()\n p.close()\n p.terminate()\n p.join()\n newstack = np.zeros(fieldstack.shape, dtype=fieldstack.dtype)\n for s in range(M):\n field, ds, gs = result[s]\n dopt.append(ds)\n grad.append(gs)\n newstack[s] = field\n if same_dist:\n davg = np.average(dopt)\n newstack = refocus_stack(fieldstack, davg, nm, res,\n num_cpus=num_cpus, copy=copy,\n padding=padding)\n ret_list = [newstack]\n if ret_ds:\n ret_list += [dopt]\n if ret_grads:\n ret_list += [grad]\n if len(ret_list) == 1:\n return ret_list[0]\n else:\n return tuple(ret_list)", - "docstring": "Numerical autofocusing of a stack using the Helmholtz equation.\n\n\n Parameters\n ----------\n fieldstack : 2d or 3d ndarray\n Electric field is BG-Corrected, i.e. Field = EX/BEx\n nm : float\n Refractive index of medium.\n res : float\n Size of wavelength in pixels.\n ival : tuple of floats\n Approximate interval to search for optimal focus in px.\n metric : str\n see `autofocus_field`.\n padding : bool\n Perform padding with linear ramp from edge to average\n to reduce ringing artifacts.\n\n .. versionchanged:: 0.1.4\n improved padding value and padding location\n ret_dopt : bool\n Return optimized distance and gradient plotting data.\n same_dist : bool\n Refocus entire sinogram with one distance.\n red_ds : bool\n Return the autofocusing distances in pixels. Defaults to False.\n If sam_dist is True, still returns autofocusing distances\n of first pass. The used refocusing distance is the\n average.\n red_grads : bool\n Return the computed gradients as a list.\n copy : bool\n If False, overwrites input array.\n\n\n Returns\n -------\n The focused field (and the refocussing distance + data if d is None)" - }, - { - "code": "def _get_unrestricted_qvm(name: str, noisy: bool,\n n_qubits: int = 34,\n connection: ForestConnection = None,\n qvm_type: str = 'qvm') -> QuantumComputer:\n topology = nx.complete_graph(n_qubits)\n return _get_qvm_with_topology(name=name, connection=connection,\n topology=topology,\n noisy=noisy,\n requires_executable=False,\n qvm_type=qvm_type)", - "docstring": "A qvm with a fully-connected topology.\n\n This is obviously the least realistic QVM, but who am I to tell users what they want.\n\n :param name: The name of this QVM\n :param noisy: Whether to construct a noisy quantum computer\n :param n_qubits: 34 qubits ought to be enough for anybody.\n :param connection: The connection to use to talk to external services\n :param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.\n :return: A pre-configured QuantumComputer" - }, - { - "code": "def close(self: Any) -> None:\n if self._file_obj is not None:\n self._file_obj.close()\n self._file_obj = None", - "docstring": "Close any files linked to this object" - }, - { - "code": "def get_codomain(self, key):\n return [v for k, v in self.all if k == key]", - "docstring": "RETURN AN ARRAY OF OBJECTS THAT key MAPS TO" - }, - { - "code": "def complex_dtype(dtype, default=None):\n dtype, dtype_in = np.dtype(dtype), dtype\n if is_complex_floating_dtype(dtype):\n return dtype\n try:\n complex_base_dtype = TYPE_MAP_R2C[dtype.base]\n except KeyError:\n if default is not None:\n return default\n else:\n raise ValueError('no complex counterpart exists for `dtype` {}'\n ''.format(dtype_repr(dtype_in)))\n else:\n return np.dtype((complex_base_dtype, dtype.shape))", - "docstring": "Return complex counterpart of ``dtype`` if existing, else ``default``.\n\n Parameters\n ----------\n dtype :\n Real or complex floating point data type. It can be given in any\n way the `numpy.dtype` constructor understands.\n default :\n Object to be returned if no complex counterpart is found for\n ``dtype``, except for ``None``, in which case an error is raised.\n\n Returns\n -------\n complex_dtype : `numpy.dtype`\n The complex counterpart of ``dtype``.\n\n Raises\n ------\n ValueError\n if there is no complex counterpart to the given data type and\n ``default == None``.\n\n Examples\n --------\n Convert scalar dtypes:\n\n >>> complex_dtype(float)\n dtype('complex128')\n >>> complex_dtype('float32')\n dtype('complex64')\n >>> complex_dtype(complex)\n dtype('complex128')\n\n Dtypes with shape are also supported:\n\n >>> complex_dtype(np.dtype((float, (3,))))\n dtype(('>> complex_dtype(('float32', (3,)))\n dtype(('>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> from reda.plotters import matplot\n >>> a = np.arange(4)\n >>> b = np.arange(3) + 3\n >>> def sum(a, b):\n ... return a + b\n >>> x, y = np.meshgrid(a, b)\n >>> c = sum(x, y)\n >>> fig, (ax1, ax2) = plt.subplots(1, 2)\n >>> im = ax1.pcolormesh(x, y, c)\n >>> _ = plt.colorbar(im, ax=ax1)\n >>> _ = ax1.set_title(\"plt.pcolormesh\")\n >>> _, _ = matplot(x, y, c, ax=ax2)\n >>> _ = ax2.set_title(\"reda.plotters.matplot\")\n >>> fig.show()\n\n Note\n ----\n Only works for equidistant data at the moment." - }, - { - "code": "def _diffrsp_app(self,xmlfile=None, **kwargs):\n loglevel = kwargs.get('loglevel', self.loglevel)\n self.logger.log(loglevel, 'Computing diffuse repsonce for component %s.',\n self.name)\n srcmdl_file = self.files['srcmdl']\n if xmlfile is not None:\n srcmdl_file = self.get_model_path(xmlfile)\n kw = dict(evfile=self.files['ft1'],\n scfile=self.data_files['scfile'],\n irfs = self.config['gtlike']['irfs'],\n evtype = self.config['selection']['evtype'],\n srcmdl = srcmdl_file)\n run_gtapp('gtdiffrsp', self.logger, kw, loglevel=loglevel)\n return", - "docstring": "Compute the diffuse response" - }, - { - "code": "def get_parser_class():\n global distro\n if distro == 'Linux':\n Parser = parser.LinuxParser\n if not os.path.exists(Parser.get_command()[0]):\n Parser = parser.UnixIPParser\n elif distro in ['Darwin', 'MacOSX']:\n Parser = parser.MacOSXParser\n elif distro == 'Windows':\n Parser = parser.WindowsParser\n else:\n Parser = parser.NullParser\n Log.error(\"Unknown distro type '%s'.\" % distro)\n Log.debug(\"Distro detected as '%s'\" % distro)\n Log.debug(\"Using '%s'\" % Parser)\n return Parser", - "docstring": "Returns the parser according to the system platform" - }, - { - "code": "def CleanAff4Clients(self):\n inactive_client_ttl = config.CONFIG[\"DataRetention.inactive_client_ttl\"]\n if not inactive_client_ttl:\n self.Log(\"TTL not set - nothing to do...\")\n return\n exception_label = config.CONFIG[\n \"DataRetention.inactive_client_ttl_exception_label\"]\n index = client_index.CreateClientIndex(token=self.token)\n client_urns = index.LookupClients([\".\"])\n deadline = rdfvalue.RDFDatetime.Now() - inactive_client_ttl\n deletion_count = 0\n for client_group in collection.Batch(client_urns, 1000):\n inactive_client_urns = []\n for client in aff4.FACTORY.MultiOpen(\n client_group,\n mode=\"r\",\n aff4_type=aff4_grr.VFSGRRClient,\n token=self.token):\n if exception_label in client.GetLabelsNames():\n continue\n if client.Get(client.Schema.LAST) < deadline:\n inactive_client_urns.append(client.urn)\n aff4.FACTORY.MultiDelete(inactive_client_urns, token=self.token)\n deletion_count += len(inactive_client_urns)\n self.HeartBeat()\n self.Log(\"Deleted %d inactive clients.\" % deletion_count)", - "docstring": "Cleans up old client data from aff4." - }, - { - "code": "def __update_display_items_model(self, display_items_model: ListModel.FilteredListModel, data_group: typing.Optional[DataGroup.DataGroup], filter_id: typing.Optional[str]) -> None:\n with display_items_model.changes():\n if data_group is not None:\n display_items_model.container = data_group\n display_items_model.filter = ListModel.Filter(True)\n display_items_model.sort_key = None\n display_items_model.filter_id = None\n elif filter_id == \"latest-session\":\n display_items_model.container = self.document_model\n display_items_model.filter = ListModel.EqFilter(\"session_id\", self.document_model.session_id)\n display_items_model.sort_key = DataItem.sort_by_date_key\n display_items_model.sort_reverse = True\n display_items_model.filter_id = filter_id\n elif filter_id == \"temporary\":\n display_items_model.container = self.document_model\n display_items_model.filter = ListModel.NotEqFilter(\"category\", \"persistent\")\n display_items_model.sort_key = DataItem.sort_by_date_key\n display_items_model.sort_reverse = True\n display_items_model.filter_id = filter_id\n elif filter_id == \"none\":\n display_items_model.container = self.document_model\n display_items_model.filter = ListModel.Filter(False)\n display_items_model.sort_key = DataItem.sort_by_date_key\n display_items_model.sort_reverse = True\n display_items_model.filter_id = filter_id\n else:\n display_items_model.container = self.document_model\n display_items_model.filter = ListModel.EqFilter(\"category\", \"persistent\")\n display_items_model.sort_key = DataItem.sort_by_date_key\n display_items_model.sort_reverse = True\n display_items_model.filter_id = None", - "docstring": "Update the data item model with a new container, filter, and sorting.\n\n This is called when the data item model is created or when the user changes\n the data group or sorting settings." - }, - { - "code": "def is_vhost_alive(self, vhost):\n return self._api_get('/api/aliveness-test/{0}'.format(\n urllib.parse.quote_plus(vhost)\n ))", - "docstring": "Declares a test queue, then publishes and consumes a message.\n Intended for use by monitoring tools.\n\n :param vhost: The vhost name to check\n :type vhost: str" - }, - { - "code": "def _build_params_from_kwargs(self, **kwargs):\n api_methods = self.get_api_params()\n required_methods = self.get_api_required_params()\n ret_kwargs = {}\n for key, val in kwargs.items():\n if key not in api_methods:\n warnings.warn(\n 'Passed uknown parameter [{}]'.format(key),\n Warning\n )\n continue\n if key not in required_methods and val is None:\n continue\n if type(val) != api_methods[key]['type']:\n raise ValueError(\n \"Invalid type specified to param: {}\".format(key)\n )\n if 'max_len' in api_methods[key]:\n if len(val) > api_methods[key]['max_len']:\n raise ValueError(\n \"Lenght of parameter [{}] more than \"\n \"allowed length\".format(key)\n )\n ret_kwargs[api_methods[key]['param']] = val\n for item in required_methods:\n if item not in ret_kwargs:\n raise pushalot.exc.PushalotException(\n \"Parameter [{}] required, but not set\".format(item)\n )\n return ret_kwargs", - "docstring": "Builds parameters from passed arguments\n\n Search passed parameters in available methods,\n prepend specified API key, and return dictionary\n which can be sent directly to API server.\n\n\n :param kwargs:\n :type param: dict\n :raises ValueError: If type of specified parameter doesn't match\n the expected type. Also raised if some basic\n validation of passed parameter fails.\n :raises PushalotException: If required parameter not set.\n :return: Dictionary with params which can be\n sent to API server\n :rtype: dict" - }, - { - "code": "def search(query, stats):\n log.debug(\"Search query: {0}\".format(query))\n issues = []\n for batch in range(MAX_BATCHES):\n response = stats.parent.session.get(\n \"{0}/rest/api/latest/search?{1}\".format(\n stats.parent.url, urllib.urlencode({\n \"jql\": query,\n \"fields\": \"summary,comment\",\n \"maxResults\": MAX_RESULTS,\n \"startAt\": batch * MAX_RESULTS})))\n data = response.json()\n log.debug(\"Batch {0} result: {1} fetched\".format(\n batch, listed(data[\"issues\"], \"issue\")))\n log.data(pretty(data))\n issues.extend(data[\"issues\"])\n if len(issues) >= data[\"total\"]:\n break\n return [Issue(issue, prefix=stats.parent.prefix) for issue in issues]", - "docstring": "Perform issue search for given stats instance" - }, - { - "code": "def _check_query(self, query, style_cols=None):\n try:\n self.sql_client.send(\n utils.minify_sql((\n 'EXPLAIN',\n 'SELECT',\n ' {style_cols}{comma}',\n ' the_geom, the_geom_webmercator',\n 'FROM ({query}) _wrap;',\n )).format(query=query,\n comma=',' if style_cols else '',\n style_cols=(','.join(style_cols)\n if style_cols else '')),\n do_post=False)\n except Exception as err:\n raise ValueError(('Layer query `{query}` and/or style column(s) '\n '{cols} are not valid: {err}.'\n '').format(query=query,\n cols=', '.join(['`{}`'.format(c)\n for c in style_cols]),\n err=err))", - "docstring": "Checks if query from Layer or QueryLayer is valid" - }, - { - "code": "def remove_rows_matching(df, column, match):\n df = df.copy()\n mask = df[column].values != match\n return df.iloc[mask, :]", - "docstring": "Return a ``DataFrame`` with rows where `column` values match `match` are removed.\n\n The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared\n to `match`, and those rows that match are removed from the DataFrame.\n\n :param df: Pandas ``DataFrame``\n :param column: Column indexer\n :param match: ``str`` match target\n :return: Pandas ``DataFrame`` filtered" - }, - { - "code": "def run_sql_query(self, sql, required=False, query_params=[]):\n try:\n cursor = self.connection.execute(sql, query_params)\n except sqlite3.OperationalError as e:\n error_message = e.message if hasattr(e, 'message') else str(e)\n logger.warn(\n \"Encountered error \\\"%s\\\" from query \\\"%s\\\" with parameters %s\",\n error_message,\n sql,\n query_params)\n raise\n results = cursor.fetchall()\n if required and not results:\n raise ValueError(\n \"No results found for query:\\n%s\\nwith parameters: %s\" % (\n sql, query_params))\n return results", - "docstring": "Given an arbitrary SQL query, run it against the database\n and return the results.\n\n Parameters\n ----------\n sql : str\n SQL query\n\n required : bool\n Raise an error if no results found in the database\n\n query_params : list\n For each '?' in the query there must be a corresponding value in\n this list." - }, - { - "code": "def parse(self):\n log.debug(self)\n self.parse_composite()\n self.split_line()\n self.convert_coordinates()\n self.convert_meta()\n self.make_shape()\n log.debug(self)", - "docstring": "Convert line to shape object" - }, - { - "code": "def account_xdr_object(self):\n return Xdr.types.PublicKey(Xdr.const.KEY_TYPE_ED25519,\n self.verifying_key.to_bytes())", - "docstring": "Create PublicKey XDR object via public key bytes.\n\n :return: Serialized XDR of PublicKey type." - }, - { - "code": "def default_links_factory_with_additional(additional_links):\n def factory(pid, **kwargs):\n links = default_links_factory(pid)\n for link in additional_links:\n links[link] = additional_links[link].format(pid=pid,\n scheme=request.scheme,\n host=request.host)\n return links\n return factory", - "docstring": "Generate a links generation factory with the specified additional links.\n\n :param additional_links: A dict of link names to links to be added to the\n returned object.\n :returns: A link generation factory." - }, - { - "code": "def remove(self, *args):\n args = self.prepare_args(args)\n for index in self._indexes:\n index.remove(*args)", - "docstring": "Remove the instance tied to the field from all the indexes\n\n For the parameters, seen BaseIndex.remove" - }, - { - "code": "def getRoom(self, _id):\n if SockJSRoomHandler._room.has_key(self._gcls() + _id):\n return SockJSRoomHandler._room[self._gcls() + _id]\n return None", - "docstring": "Retrieve a room from it's id" - }, - { - "code": "def translate(\n self,\n values,\n target_language=None,\n format_=None,\n source_language=None,\n customization_ids=(),\n model=None,\n ):\n single_value = False\n if isinstance(values, six.string_types):\n single_value = True\n values = [values]\n if target_language is None:\n target_language = self.target_language\n if isinstance(customization_ids, six.string_types):\n customization_ids = [customization_ids]\n data = {\n \"target\": target_language,\n \"q\": values,\n \"cid\": customization_ids,\n \"format\": format_,\n \"source\": source_language,\n \"model\": model,\n }\n response = self._connection.api_request(method=\"POST\", path=\"\", data=data)\n translations = response.get(\"data\", {}).get(\"translations\", ())\n if len(values) != len(translations):\n raise ValueError(\n \"Expected iterations to have same length\", values, translations\n )\n for value, translation in six.moves.zip(values, translations):\n translation[\"input\"] = value\n if single_value:\n return translations[0]\n else:\n return translations", - "docstring": "Translate a string or list of strings.\n\n See https://cloud.google.com/translate/docs/translating-text\n\n :type values: str or list\n :param values: String or list of strings to translate.\n\n :type target_language: str\n :param target_language: The language to translate results into. This\n is required by the API and defaults to\n the target language of the current instance.\n\n :type format_: str\n :param format_: (Optional) One of ``text`` or ``html``, to specify\n if the input text is plain text or HTML.\n\n :type source_language: str\n :param source_language: (Optional) The language of the text to\n be translated.\n\n :type customization_ids: str or list\n :param customization_ids: (Optional) ID or list of customization IDs\n for translation. Sets the ``cid`` parameter\n in the query.\n\n :type model: str\n :param model: (Optional) The model used to translate the text, such\n as ``'base'`` or ``'nmt'``.\n\n :rtype: str or list\n :returns: A list of dictionaries for each queried value. Each\n dictionary typically contains three keys (though not\n all will be present in all cases)\n\n * ``detectedSourceLanguage``: The detected language (as an\n ISO 639-1 language code) of the text.\n * ``translatedText``: The translation of the text into the\n target language.\n * ``input``: The corresponding input value.\n * ``model``: The model used to translate the text.\n\n If only a single value is passed, then only a single\n dictionary will be returned.\n :raises: :class:`~exceptions.ValueError` if the number of\n values and translations differ." - }, - { - "code": "def broadcast_transaction(hex_tx, blockchain_client):\n if isinstance(blockchain_client, BlockcypherClient):\n return blockcypher.broadcast_transaction(hex_tx, blockchain_client)\n elif isinstance(blockchain_client, BlockchainInfoClient):\n return blockchain_info.broadcast_transaction(hex_tx, blockchain_client)\n elif isinstance(blockchain_client, ChainComClient):\n return chain_com.broadcast_transaction(hex_tx, blockchain_client)\n elif isinstance(blockchain_client, (BitcoindClient, AuthServiceProxy)):\n return bitcoind.broadcast_transaction(hex_tx, blockchain_client)\n elif hasattr(blockchain_client, \"broadcast_transaction\"):\n return blockchain_client.broadcast_transaction( hex_tx )\n elif isinstance(blockchain_client, BlockchainClient):\n raise Exception('That blockchain interface is not supported.')\n else:\n raise Exception('A BlockchainClient object is required')", - "docstring": "Dispatches a raw hex transaction to the network." - }, - { - "code": "def handle_error(self, error):\n logging.exception(\"try to sleep if there are repeating errors.\")\n error_desc = str(error)\n now = datetime.datetime.now()\n if error_desc not in self.error_time_log:\n self.error_time_log[error_desc] = now\n return\n time_of_last_encounter = self.error_time_log[str(error)]\n time_since_last_encounter = now - time_of_last_encounter\n if time_since_last_encounter.total_seconds() > self.config.get('min_seconds_between_errors'):\n self.error_time_log[error_desc] = now\n return\n if error_desc not in self.error_sleep_log:\n time.sleep(self.config.get('sleep_seconds_on_consecutive_errors'))\n self.error_sleep_log[error_desc] = 1\n else:\n sys.exit()", - "docstring": "Try to detect repetitive errors and sleep for a while to avoid being marked as spam" - }, - { - "code": "def unregister(self, namespace, command=None):\n if not namespace:\n namespace = DEFAULT_NAMESPACE\n namespace = namespace.strip().lower()\n if namespace not in self._commands:\n self._logger.warning(\"Unknown name space: %s\", namespace)\n return False\n if command is not None:\n command = command.strip().lower()\n if command not in self._commands[namespace]:\n self._logger.warning(\n \"Unknown command: %s.%s\", namespace, command\n )\n return False\n del self._commands[namespace][command]\n if not self._commands[namespace]:\n del self._commands[namespace]\n else:\n del self._commands[namespace]\n return True", - "docstring": "Unregisters the given command. If command is None, the whole name space\n is unregistered.\n\n :param namespace: The command name space.\n :param command: The shell name of the command, or None\n :return: True if the command was known, else False" - }, - { - "code": "def rpc_atlas_peer_exchange(self, remote_peer, **con_info):\n conf = get_blockstack_opts()\n if not conf.get('atlas', False):\n return {'error': 'Not an atlas node', 'http_status': 404}\n client_host = con_info['client_host']\n client_port = con_info['client_port']\n peer_host = None\n peer_port = None\n LOCALHOST = ['127.0.0.1', '::1', 'localhost']\n if client_host not in LOCALHOST:\n peer_host = client_host\n peer_port = client_port\n else:\n try:\n peer_host, peer_port = url_to_host_port(remote_peer)\n assert peer_host\n assert peer_port\n except:\n return {'error': 'Invalid remote peer address', 'http_status': 400}\n peers = self.peer_exchange(peer_host, peer_port)\n return self.success_response({'peers': peers})", - "docstring": "Accept a remotely-given atlas peer, and return our list\n of healthy peers. The remotely-given atlas peer will only\n be considered if the caller is localhost; otherwise, the caller's\n socket-given information will be used. This is to prevent\n a malicious node from filling up this node's peer table with\n junk.\n\n Returns at most atlas_max_neighbors() peers\n Returns {'status': True, 'peers': ...} on success\n Returns {'error': ...} on failure" - }, - { - "code": "def map_dict(key_map, *dicts, copy=False, base=None):\n it = combine_dicts(*dicts).items()\n get = key_map.get\n return combine_dicts({get(k, k): v for k, v in it}, copy=copy, base=base)", - "docstring": "Returns a dict with new key values.\n\n :param key_map:\n A dictionary that maps the dict keys ({old key: new key}\n :type key_map: dict\n\n :param dicts:\n A sequence of dicts.\n :type dicts: dict\n\n :param copy:\n If True, it returns a deepcopy of input values.\n :type copy: bool, optional\n\n :param base:\n Base dict where combine multiple dicts in one.\n :type base: dict, optional\n\n :return:\n A unique dict with new key values.\n :rtype: dict\n\n Example::\n\n >>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})\n >>> sorted(d.items())\n [('c', 1), ('d', 2)]" - }, - { - "code": "def success(self, request, message, extra_tags='', fail_silently=False):\n add(self.target_name, request, constants.SUCCESS, message, extra_tags=extra_tags,\n fail_silently=fail_silently)", - "docstring": "Add a message with the ``SUCCESS`` level." - }, - { - "code": "def get_instance(\n self,\n name,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if \"get_instance\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"get_instance\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.get_instance,\n default_retry=self._method_configs[\"GetInstance\"].retry,\n default_timeout=self._method_configs[\"GetInstance\"].timeout,\n client_info=self._client_info,\n )\n request = cloud_redis_pb2.GetInstanceRequest(name=name)\n return self._inner_api_calls[\"get_instance\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", - "docstring": "Gets the details of a specific Redis instance.\n\n Example:\n >>> from google.cloud import redis_v1beta1\n >>>\n >>> client = redis_v1beta1.CloudRedisClient()\n >>>\n >>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')\n >>>\n >>> response = client.get_instance(name)\n\n Args:\n name (str): Required. Redis instance resource name using the form:\n ``projects/{project_id}/locations/{location_id}/instances/{instance_id}``\n where ``location_id`` refers to a GCP region\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will not\n be retried.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.redis_v1beta1.types.Instance` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid." - }, - { - "code": "def matrix_iter(self, scale=1, border=None):\n return utils.matrix_iter(self.matrix, self._version, scale, border)", - "docstring": "\\\n Returns an iterator over the matrix which includes the border.\n\n The border is returned as sequence of light modules.\n Dark modules are reported as ``0x1``, light modules have the value\n ``0x0``.\n\n The following example converts the QR Code matrix into a list of\n lists which use boolean values for the modules (True = dark module,\n False = light module)::\n\n >>> import segno\n >>> qr = segno.make('The Beatles')\n >>> size = qr.symbol_size()[0]\n >>> res = []\n >>> # Scaling factor 2, default border\n >>> for row in qr.matrix_iter(scale=2):\n >>> res.append([col == 0x1 for col in row])\n >>> size * 2 == len(res[0])\n True\n\n :param int scale: The scaling factor (default: ``1``).\n :param int border: The size of border / quiet zone or ``None`` to\n indicate the default border.\n :raises: :py:exc:`ValueError` if the scaling factor or the border is\n invalid (i.e. negative)." - }, - { - "code": "def group_theta(node_length, node_idx):\n theta = -np.pi + node_idx * 2 * np.pi / node_length\n return theta", - "docstring": "Returns an angle corresponding to a node of interest.\n\n Intended to be used for placing node group labels at the correct spot.\n\n :param float node_length: total number of nodes in the graph.\n :param int node_idx: the index of the node of interest.\n :returns: theta -- the angle of the node of interest in radians." - }, - { - "code": "def UnicodeFromCodePage(string):\n codepage = ctypes.windll.kernel32.GetOEMCP()\n try:\n return string.decode(\"cp%s\" % codepage)\n except UnicodeError:\n try:\n return string.decode(\"utf16\", \"ignore\")\n except UnicodeError:\n return string.decode(\"utf8\", \"ignore\")", - "docstring": "Attempt to coerce string into a unicode object." - }, - { - "code": "def setDashboardOverlaySceneProcess(self, ulOverlayHandle, unProcessId):\n fn = self.function_table.setDashboardOverlaySceneProcess\n result = fn(ulOverlayHandle, unProcessId)\n return result", - "docstring": "Sets the dashboard overlay to only appear when the specified process ID has scene focus" - }, - { - "code": "def previousSibling(self) -> Optional[AbstractNode]:\n parent = self.parentNode\n if parent is None:\n return None\n return parent.childNodes.item(parent.childNodes.index(self) - 1)", - "docstring": "Return the previous sibling of this node.\n\n If there is no previous sibling, return ``None``." - }, - { - "code": "def _advance_frame(self):\n self.current_frame += 1\n if self.current_frame == self.frame_len:\n self.current_frame = 0", - "docstring": "Sets `self.current_frame` to the next frame, looping to the\n beginning if needed." - }, - { - "code": "def attach_storage(self, server, storage, storage_type, address):\n body = {'storage_device': {}}\n if storage:\n body['storage_device']['storage'] = str(storage)\n if storage_type:\n body['storage_device']['type'] = storage_type\n if address:\n body['storage_device']['address'] = address\n url = '/server/{0}/storage/attach'.format(server)\n res = self.post_request(url, body)\n return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self)", - "docstring": "Attach a Storage object to a Server. Return a list of the server's storages." - }, - { - "code": "def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):\n if 'RoW' not in self:\n if key == 'RoW':\n return ['RoW'] if 'RoW' in (only or []) else []\n elif only and 'RoW' in only:\n only.pop(only.index('RoW'))\n possibles = self.topology if only is None else {k: self[k] for k in only}\n faces = self[key]\n lst = [\n (k, len(v))\n for k, v in possibles.items()\n if v and faces.issuperset(v)\n ]\n return self._finish_filter(lst, key, include_self, exclusive, biggest_first)", - "docstring": "Get all locations that are completely within this location.\n\n If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained(\"RoW\")`` returns a list with either ``RoW`` or nothing." - }, - { - "code": "def logout(request, config_loader_path=None):\n state = StateCache(request.session)\n conf = get_config(config_loader_path, request)\n client = Saml2Client(conf, state_cache=state,\n identity_cache=IdentityCache(request.session))\n subject_id = _get_subject_id(request.session)\n if subject_id is None:\n logger.warning(\n 'The session does not contain the subject id for user %s',\n request.user)\n result = client.global_logout(subject_id)\n state.sync()\n if not result:\n logger.error(\"Looks like the user %s is not logged in any IdP/AA\", subject_id)\n return HttpResponseBadRequest(\"You are not logged in any IdP/AA\")\n if len(result) > 1:\n logger.error('Sorry, I do not know how to logout from several sources. I will logout just from the first one')\n for entityid, logout_info in result.items():\n if isinstance(logout_info, tuple):\n binding, http_info = logout_info\n if binding == BINDING_HTTP_POST:\n logger.debug('Returning form to the IdP to continue the logout process')\n body = ''.join(http_info['data'])\n return HttpResponse(body)\n elif binding == BINDING_HTTP_REDIRECT:\n logger.debug('Redirecting to the IdP to continue the logout process')\n return HttpResponseRedirect(get_location(http_info))\n else:\n logger.error('Unknown binding: %s', binding)\n return HttpResponseServerError('Failed to log out')\n else:\n return finish_logout(request, logout_info)\n logger.error('Could not logout because there only the HTTP_REDIRECT is supported')\n return HttpResponseServerError('Logout Binding not supported')", - "docstring": "SAML Logout Request initiator\n\n This view initiates the SAML2 Logout request\n using the pysaml2 library to create the LogoutRequest." - }, - { - "code": "def _get_resource_hash(zone_name, record):\n record_data = defaultdict(int, record)\n if type(record_data['GeoLocation']) == dict:\n record_data['GeoLocation'] = \":\".join([\"{}={}\".format(k, v) for k, v in record_data['GeoLocation'].items()])\n args = [\n zone_name,\n record_data['Name'],\n record_data['Type'],\n record_data['Weight'],\n record_data['Region'],\n record_data['GeoLocation'],\n record_data['Failover'],\n record_data['HealthCheckId'],\n record_data['TrafficPolicyInstanceId']\n ]\n return get_resource_id('r53r', args)", - "docstring": "Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique\n resource IDs\n\n Args:\n zone_name (`str`): The name of the DNS Zone the record belongs to\n record (`dict`): A record dict to generate the hash from\n\n Returns:\n `str`" - }, - { - "code": "def _ref_bus_angle_constraint(self, buses, Va, xmin, xmax):\n refs = [bus._i for bus in buses if bus.type == REFERENCE]\n Varefs = array([b.v_angle for b in buses if b.type == REFERENCE])\n xmin[Va.i1 - 1 + refs] = Varefs\n xmax[Va.iN - 1 + refs] = Varefs\n return xmin, xmax", - "docstring": "Adds a constraint on the reference bus angles." - }, - { - "code": "def _structure_default(self, obj, cl):\n if cl is Any or cl is Optional:\n return obj\n msg = (\n \"Unsupported type: {0}. Register a structure hook for \"\n \"it.\".format(cl)\n )\n raise ValueError(msg)", - "docstring": "This is the fallthrough case. Everything is a subclass of `Any`.\n\n A special condition here handles ``attrs`` classes.\n\n Bare optionals end here too (optionals with arguments are unions.) We\n treat bare optionals as Any." - }, - { - "code": "def delete(self, storagemodel:object, modeldefinition = None) -> bool:\n deleted = False\n if (storagemodel.id != '') and (storagemodel.pop_receipt != '') and (not storagemodel.id is None) and (not storagemodel.pop_receipt is None):\n try:\n modeldefinition['queueservice'].delete_message(storagemodel._queuename, storagemodel.id, storagemodel.pop_receipt)\n deleted = True\n except Exception as e:\n msg = 'can not delete queue message: queue {} with message.id {!s} because {!s}'.format(storagemodel._queuename, storagemodel.id, e)\n raise AzureStorageWrapException(msg=msg)\n else:\n log.info('cant update queuemessage {} due to missing id and pop_receipt'.format(storagemodel._queuename))\n return deleted", - "docstring": "delete the message in queue" - }, - { - "code": "def get_minimum_needs(self):\n minimum_needs = OrderedDict()\n for resource in self.minimum_needs['resources']:\n if resource['Unit abbreviation']:\n name = '%s [%s]' % (\n tr(resource['Resource name']),\n resource['Unit abbreviation']\n )\n else:\n name = tr(resource['Resource name'])\n amount = resource['Default']\n minimum_needs[name] = amount\n return OrderedDict(minimum_needs)", - "docstring": "Get the minimum needed information about the minimum needs.\n\n That is the resource and the amount.\n\n :returns: minimum needs\n :rtype: OrderedDict" - }, - { - "code": "def _create_session(self, test_connection=False):\n session = consulate.Session(host=self.host, port=self.port)\n if test_connection:\n session.status.leader()\n return session", - "docstring": "Create a consulate.session object, and query for its leader to ensure\n that the connection is made.\n\n :param test_connection: call .leader() to ensure that the connection\n is valid\n :type test_connection: bool\n :return consulate.Session instance" - }, - { - "code": "def get_database_users(self):\n url = \"db/{0}/users\".format(self._database)\n response = self.request(\n url=url,\n method='GET',\n expected_response_code=200\n )\n return response.json()", - "docstring": "Get list of database users." - }, - { - "code": "def _reset_suffix_links(self):\n self._suffix_links_set = False\n for current, _parent in self.dfs():\n current.suffix = None\n current.dict_suffix = None\n current.longest_prefix = None", - "docstring": "Reset all suffix links in all nodes in this trie." - }, - { - "code": "def _get_os_price_id(items, os, location):\n for item in items:\n if any([utils.lookup(item,\n 'itemCategory',\n 'categoryCode') != 'os',\n utils.lookup(item,\n 'softwareDescription',\n 'referenceCode') != os]):\n continue\n for price in item['prices']:\n if not _matches_location(price, location):\n continue\n return price['id']\n raise SoftLayer.SoftLayerError(\"Could not find valid price for os: '%s'\" %\n os)", - "docstring": "Returns the price id matching." - }, - { - "code": "def equals(self, rest_object):\n if self._is_dirty:\n return False\n if rest_object is None:\n return False\n if not isinstance(rest_object, NURESTObject):\n raise TypeError('The object is not a NURESTObject %s' % rest_object)\n if self.rest_name != rest_object.rest_name:\n return False\n if self.id and rest_object.id:\n return self.id == rest_object.id\n if self.local_id and rest_object.local_id:\n return self.local_id == rest_object.local_id\n return False", - "docstring": "Compare with another object" - }, - { - "code": "def close(self):\n if self.debug:\n time.sleep(10)\n for host in self.workers:\n host.close()\n for broker in self.brokers:\n try:\n broker.close()\n except AttributeError:\n pass\n scoop.logger.info('Finished cleaning spawned subprocesses.')", - "docstring": "Subprocess cleanup." - }, - { - "code": "def collect_spans(ast: AST) -> List[Tuple[str, Tuple[int, int]]]:\n spans = []\n if ast.get(\"subject\", False):\n spans.extend(collect_spans(ast[\"subject\"]))\n if ast.get(\"object\", False):\n spans.extend(collect_spans(ast[\"object\"]))\n if ast.get(\"nested\", False):\n spans.extend(collect_spans(ast[\"nested\"]))\n if ast.get(\"function\", False):\n log.debug(f\"Processing function\")\n spans.append((\"Function\", ast[\"function\"][\"name_span\"]))\n log.debug(f\"Spans: {spans}\")\n if ast.get(\"args\", False):\n for idx, arg in enumerate(ast[\"args\"]):\n log.debug(f\"Arg {arg}\")\n if arg.get(\"function\", False):\n log.debug(f\"Recursing on arg function\")\n results = collect_spans(arg)\n log.debug(f\"Results {results}\")\n spans.extend(results)\n elif arg.get(\"nsarg\", False):\n log.debug(f\"Processing NSArg Arg {arg}\")\n spans.append((\"NSArg\", arg[\"span\"]))\n spans.append((\"NSPrefix\", arg[\"nsarg\"][\"ns_span\"]))\n spans.append((\"NSVal\", arg[\"nsarg\"][\"ns_val_span\"]))\n elif arg[\"type\"] == \"StrArg\":\n spans.append((\"StrArg\", arg[\"span\"]))\n log.debug(f\"Spans: {spans}\")\n return spans", - "docstring": "Collect flattened list of spans of BEL syntax types\n\n Provide simple list of BEL syntax type spans for highlighting.\n Function names, NSargs, NS prefix, NS value and StrArgs will be\n tagged.\n\n Args:\n ast: AST of BEL assertion\n\n Returns:\n List[Tuple[str, Tuple[int, int]]]: list of span objects (, (, ))" - }, - { - "code": "def plot_grid(step):\n rad = get_rprof(step, 'r')[0]\n drad = get_rprof(step, 'dr')[0]\n _, unit = step.sdat.scale(1, 'm')\n if unit:\n unit = ' ({})'.format(unit)\n fig, (ax1, ax2) = plt.subplots(2, sharex=True)\n ax1.plot(rad, '-ko')\n ax1.set_ylabel('$r$' + unit)\n ax2.plot(drad, '-ko')\n ax2.set_ylabel('$dr$' + unit)\n ax2.set_xlim([-0.5, len(rad) - 0.5])\n ax2.set_xlabel('Cell number')\n misc.saveplot(fig, 'grid', step.istep)", - "docstring": "Plot cell position and thickness.\n\n The figure is call grid_N.pdf where N is replace by the step index.\n\n Args:\n step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\n instance." - }, - { - "code": "def substitute_environ(self):\n for attr_name in dir(self):\n if attr_name.startswith('_') or attr_name.upper() != attr_name:\n continue\n orig_value = getattr(self, attr_name)\n is_required = isinstance(orig_value, Required)\n orig_type = orig_value.v_type if is_required else type(orig_value)\n env_var_name = self._ENV_PREFIX + attr_name\n env_var = os.getenv(env_var_name, None)\n if env_var is not None:\n if issubclass(orig_type, bool):\n env_var = env_var.upper() in ('1', 'TRUE')\n elif issubclass(orig_type, int):\n env_var = int(env_var)\n elif issubclass(orig_type, Path):\n env_var = Path(env_var)\n elif issubclass(orig_type, bytes):\n env_var = env_var.encode()\n setattr(self, attr_name, env_var)\n elif is_required and attr_name not in self._custom_settings:\n raise RuntimeError('The required environment variable \"{0}\" is currently not set, '\n 'you\\'ll need to run `source activate.settings.sh` '\n 'or you can set that single environment variable with '\n '`export {0}=\"\"`'.format(env_var_name))", - "docstring": "Substitute environment variables into settings." - }, - { - "code": "def get_timestamp_info(value):\n value, offset = _split_offset(value)\n fmt, microsec = _get_timestamp_format(value)\n dt_value = _datetime_obj_factory(value, fmt)\n return dt_value, fmt, offset, microsec", - "docstring": "Returns the datetime object, the format, the offset and the microsecond of the timestamp in input\n\n :type value: `str`" - }, - { - "code": "def load_img(name):\n fullname = name\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha() is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n except pygame.error, message:\n print \"Error: couldn't load image: \", fullname\n raise SystemExit, message\n return (image, image.get_rect())", - "docstring": "Load image and return an image object" - }, - { - "code": "def list_server_certificates(path_prefix='/', region=None, key=None, keyid=None, profile=None):\n retries = 10\n sleep = 6\n conn = __utils__['boto3.get_connection']('iam', region=region, key=key, keyid=keyid,\n profile=profile)\n Items = []\n while retries:\n try:\n log.debug('Garnering list of IAM Server Certificates')\n IsTruncated = True\n while IsTruncated:\n kwargs = {'PathPrefix': path_prefix}\n ret = conn.list_server_certificates(**kwargs)\n Items += ret.get('ServerCertificateMetadataList', [])\n IsTruncated = ret.get('IsTruncated')\n kwargs.update({'Marker': ret.get('Marker')})\n return Items\n except botocore.exceptions.ParamValidationError as err:\n raise SaltInvocationError(str(err))\n except botocore.exceptions.ClientError as err:\n if retries and jmespath.search('Error.Code', err.response) == 'Throttling':\n retries -= 1\n log.debug('Throttled by AWS API, retrying in %s seconds...', sleep)\n time.sleep(sleep)\n continue\n log.error('Failed to list IAM Server Certificates: %s', err.message)\n return None", - "docstring": "Lists the server certificates stored in IAM that have the specified path prefix.\n\n .. versionadded:: ???\n\n :param path_prefix:\n The path prefix for filtering the results. For example: /company/servercerts would get\n all server certificates for which the path starts with /company/servercerts .\n This parameter is optional. If it is not included, it defaults to a slash (/), listing all\n server certificates. This parameter allows (per its regex pattern) a string of characters\n consisting of either a forward slash (/) by itself or a string that must begin and end with\n forward slashes. In addition, it can contain any ASCII character from the ! (u0021)\n through the DEL character (u007F), including most punctuation characters, digits, and upper\n and lowercased letters.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion boto_iam.list_server_certificates path_prefix=/somepath/" - }, - { - "code": "def imei(self) -> str:\n num = self.random.choice(IMEI_TACS)\n num = num + str(self.random.randint(100000, 999999))\n return num + luhn_checksum(num)", - "docstring": "Generate a random IMEI.\n\n :return: IMEI." - }, - { - "code": "def cut_from_chain(sciobj_model):\n if _is_head(sciobj_model):\n old_pid = sciobj_model.obsoletes.did\n _cut_head_from_chain(sciobj_model)\n elif _is_tail(sciobj_model):\n old_pid = sciobj_model.obsoleted_by.did\n _cut_tail_from_chain(sciobj_model)\n else:\n old_pid = sciobj_model.obsoleted_by.did\n _cut_embedded_from_chain(sciobj_model)\n _update_sid_to_last_existing_pid_map(old_pid)", - "docstring": "Remove an object from a revision chain.\n\n The object can be at any location in the chain, including the head or tail.\n\n Preconditions:\n - The object with the pid is verified to exist and to be a member of an\n revision chain. E.g., with:\n\n d1_gmn.app.views.asserts.is_existing_object(pid)\n d1_gmn.app.views.asserts.is_in_revision_chain(pid)\n\n Postconditions:\n - The given object is a standalone object with empty obsoletes, obsoletedBy and\n seriesId fields.\n - The previously adjacent objects in the chain are adjusted to close any gap that\n was created or remove dangling reference at the head or tail.\n - If the object was the last object in the chain and the chain has a SID, the SID\n reference is shifted over to the new last object in the chain." - }, - { - "code": "def get_residuals(ds, m):\n model_spectra = get_model_spectra(ds, m)\n resid = ds.test_flux - model_spectra\n return resid", - "docstring": "Using the dataset and model object, calculate the residuals and return\n\n Parameters\n ----------\n ds: dataset object\n m: model object\n Return\n ------\n residuals: array of residuals, spec minus model spec" - }, - { - "code": "def transform(self, X, y=None):\n X = check_array(X)\n n_features = X.shape[1]\n X_transformed = np.copy(X)\n non_zero_vector = np.count_nonzero(X_transformed, axis=1)\n non_zero = np.reshape(non_zero_vector, (-1, 1))\n zero_col = np.reshape(n_features - non_zero_vector, (-1, 1))\n X_transformed = np.hstack((non_zero, X_transformed))\n X_transformed = np.hstack((zero_col, X_transformed))\n return X_transformed", - "docstring": "Transform data by adding two virtual features.\n\n Parameters\n ----------\n X: numpy ndarray, {n_samples, n_components}\n New data, where n_samples is the number of samples and n_components\n is the number of components.\n y: None\n Unused\n\n Returns\n -------\n X_transformed: array-like, shape (n_samples, n_features)\n The transformed feature set" - }, - { - "code": "def generous_parse_uri(uri):\n parse_result = urlparse(uri)\n if parse_result.scheme == '':\n abspath = os.path.abspath(parse_result.path)\n if IS_WINDOWS:\n abspath = windows_to_unix_path(abspath)\n fixed_uri = \"file://{}\".format(abspath)\n parse_result = urlparse(fixed_uri)\n return parse_result", - "docstring": "Return a urlparse.ParseResult object with the results of parsing the\n given URI. This has the same properties as the result of parse_uri.\n\n When passed a relative path, it determines the absolute path, sets the\n scheme to file, the netloc to localhost and returns a parse of the result." - }, - { - "code": "def list_processors(self, instance=None):\n url = '/processors'\n if instance:\n url += '/' + instance\n response = self.get_proto(path=url)\n message = rest_pb2.ListProcessorsResponse()\n message.ParseFromString(response.content)\n processors = getattr(message, 'processor')\n return iter([Processor(processor) for processor in processors])", - "docstring": "Lists the processors.\n\n Processors are returned in lexicographical order.\n\n :param Optional[str] instance: A Yamcs instance name.\n :rtype: ~collections.Iterable[.Processor]" - }, - { - "code": "def compute_reverse_dependencies(\n self, targets: Iterable[BuildTarget]\n ) -> Mapping[str, Iterable[BuildTarget]]:\n result = defaultdict(list)\n for target in targets:\n for dependency in target.dependencies:\n result[dependency].append(target)\n return result", - "docstring": "Compute the set of targets which depend on each target." - }, - { - "code": "def connect(uri, factory=pymongo.MongoClient):\n warnings.warn(\n \"do not use. Just call MongoClient directly.\", DeprecationWarning)\n return factory(uri)", - "docstring": "Use the factory to establish a connection to uri." - }, - { - "code": "def get_final(self):\n if self.prec is None:\n return self.x\n else:\n return self.prec.undo(self.x)", - "docstring": "Return the final solution in the original coordinates" - }, - { - "code": "def check_requirements():\n if not os.path.exists(REQUIREMENTS):\n sys.exit(\n ansi.error() + ' %s is missing. Please check it in.' % ansi.underline(REQUIREMENTS)\n )\n with open(REQUIREMENTS, 'r', encoding='utf-8') as f:\n dependencies = f.readlines()\n vcs = [d for d in dependencies if re.match(r'^(-e )?(git|svn|hg|bzr).*', d)]\n dependencies = list(set(dependencies) - set(vcs))\n missing = []\n try:\n pkg_resources.require(dependencies)\n except (\n pkg_resources.ContextualVersionConflict,\n pkg_resources.DistributionNotFound,\n pkg_resources.VersionConflict\n ) as error:\n missing.append(str(error))\n except pkg_resources.RequirementParseError:\n pass\n if missing:\n missing = ' missing requirement:\\n ' + os.linesep.join(missing)\n if '--env-checked' in sys.argv:\n sys.exit(ansi.error() + missing + '\\nRequirement installation failure, please check for errors in:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install_requirements(None)\n reboot('--env-checked')", - "docstring": "Make sure all listed packages from requirements.txt have been installed into the virtualenv at boot." - }, - { - "code": "def get_proficiency_form_for_create(self, objective_id, resource_id, proficiency_record_types):\n from dlkit.abstract_osid.id.primitives import Id as ABCId\n from dlkit.abstract_osid.type.primitives import Type as ABCType\n if not isinstance(objective_id, ABCId):\n raise errors.InvalidArgument('argument is not a valid OSID Id')\n if not isinstance(resource_id, ABCId):\n raise errors.InvalidArgument('argument is not a valid OSID Id')\n for arg in proficiency_record_types:\n if not isinstance(arg, ABCType):\n raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')\n if proficiency_record_types == []:\n obj_form = objects.ProficiencyForm(\n objective_bank_id=self._catalog_id,\n objective_id=objective_id,\n resource_id=resource_id,\n catalog_id=self._catalog_id,\n runtime=self._runtime,\n proxy=self._proxy)\n else:\n obj_form = objects.ProficiencyForm(\n objective_bank_id=self._catalog_id,\n record_types=proficiency_record_types,\n objective_id=objective_id,\n resource_id=resource_id,\n catalog_id=self._catalog_id,\n runtime=self._runtime,\n proxy=self._proxy)\n obj_form._for_update = False\n self._forms[obj_form.get_id().get_identifier()] = not CREATED\n return obj_form", - "docstring": "Gets the proficiency form for creating new proficiencies.\n\n A new form should be requested for each create transaction.\n\n arg: objective_id (osid.id.Id): the ``Id`` of the\n ``Objective``\n arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``\n arg: proficiency_record_types (osid.type.Type[]): array of\n proficiency record types\n return: (osid.learning.ProficiencyForm) - the proficiency form\n raise: NotFound - ``objective_id`` or ``resource_id`` is not\n found\n raise: NullArgument - ``objective_id, resource_id,`` or\n ``proficieny_record_types`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n raise: Unsupported - unable to get form for requested record\n types\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def parse_value(self, value):\n parsed = super(BoolField, self).parse_value(value)\n return bool(parsed) if parsed is not None else None", - "docstring": "Cast value to `bool`." - }, - { - "code": "def save_pickle(obj, outfile, protocol=2):\n with open(outfile, 'wb') as f:\n pickle.dump(obj, f, protocol=protocol)\n return outfile", - "docstring": "Save the object as a pickle file\n\n Args:\n outfile (str): Filename\n protocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2\n\n Returns:\n str: Path to pickle file" - }, - { - "code": "def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0):\n x = np.ma.masked_invalid(pd.Series(x).interpolate())\n try:\n ind = np.isfinite(x).nonzero()[0][0]\n x[ind:] = signal.savgol_filter(x[ind:], window_length, polyorder, deriv,\n delta, axis, mode, cval)\n except IndexError:\n pass\n return np.ma.masked_invalid(x)", - "docstring": "Wrapper for the scipy.signal.savgol_filter function that handles Nan values.\n\n See: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/3\n\n Returns\n -------\n y : ndarray, same shape as `x`\n The filtered data." - }, - { - "code": "def updateWPText(self):\n self.wpText.set_position((self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0)))\n self.wpText.set_size(self.fontSize)\n if type(self.nextWPTime) is str:\n self.wpText.set_text('%.f/%.f\\n(%.f m, ~ s)' % (self.currentWP,self.finalWP,self.wpDist))\n else:\n self.wpText.set_text('%.f/%.f\\n(%.f m, %.f s)' % (self.currentWP,self.finalWP,self.wpDist,self.nextWPTime))", - "docstring": "Updates the current waypoint and distance to it." - }, - { - "code": "def send_email_with_callback_token(user, email_token, **kwargs):\n try:\n if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS:\n email_subject = kwargs.get('email_subject',\n api_settings.PASSWORDLESS_EMAIL_SUBJECT)\n email_plaintext = kwargs.get('email_plaintext',\n api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE)\n email_html = kwargs.get('email_html',\n api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME)\n context = inject_template_context({'callback_token': email_token.key, })\n html_message = loader.render_to_string(email_html, context,)\n send_mail(\n email_subject,\n email_plaintext % email_token.key,\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS,\n [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)],\n fail_silently=False,\n html_message=html_message,)\n else:\n logger.debug(\"Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.\")\n return False\n return True\n except Exception as e:\n logger.debug(\"Failed to send token email to user: %d.\"\n \"Possibly no email on user object. Email entered was %s\" %\n (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)))\n logger.debug(e)\n return False", - "docstring": "Sends a Email to user.email.\n\n Passes silently without sending in test environment" - }, - { - "code": "def list():\n for cls, metrics in metric_catalog.items():\n echo(white(cls.__name__))\n for metric in metrics.keys():\n echo('> {0}'.format(metric))", - "docstring": "List all known metrics" - }, - { - "code": "def memory_data():\n vm = psutil.virtual_memory()\n sw = psutil.swap_memory()\n return {\n 'virtual': {\n 'total': mark(vm.total, 'bytes'),\n 'free': mark(vm.free, 'bytes'),\n 'percent': mark(vm.percent, 'percentage')\n },\n 'swap': {\n 'total': mark(sw.total, 'bytes'),\n 'free': mark(sw.free, 'bytes'),\n 'percent': mark(sw.percent, 'percentage')\n },\n }", - "docstring": "Returns memory data." - }, - { - "code": "def xml_path_completion(xml_path):\n if xml_path.startswith(\"/\"):\n full_path = xml_path\n else:\n full_path = os.path.join(robosuite.models.assets_root, xml_path)\n return full_path", - "docstring": "Takes in a local xml path and returns a full path.\n if @xml_path is absolute, do nothing\n if @xml_path is not absolute, load xml that is shipped by the package" - }, - { - "code": "def add(from_user, from_id, to_user, to_id, type):\n \"adds a relation to the graph\"\n if options.users and to_user:\n G.add_node(from_user, screen_name=from_user)\n G.add_node(to_user, screen_name=to_user)\n if G.has_edge(from_user, to_user):\n weight = G[from_user][to_user]['weight'] + 1\n else:\n weight = 1\n G.add_edge(from_user, to_user, type=type, weight=weight)\n elif not options.users and to_id:\n G.add_node(from_id, screen_name=from_user, type=type)\n if to_user:\n G.add_node(to_id, screen_name=to_user)\n else:\n G.add_node(to_id)\n G.add_edge(from_id, to_id, type=type)", - "docstring": "adds a relation to the graph" - }, - { - "code": "def populateMainMenu(self, parentMenu):\n parentMenu.addAction(\"Configure\", self.configure)\n parentMenu.addAction(\"Collect garbage\", self.__collectGarbage)", - "docstring": "Populates the main menu.\n\n The main menu looks as follows:\n Plugins\n - Plugin manager (fixed item)\n - Separator (fixed item)\n - (this is the parentMenu passed)\n ...\n If no items were populated by the plugin then there will be no\n menu item shown.\n It is suggested to insert plugin configuration item here if so." - }, - { - "code": "def _add_sj_index_commands(fq1, ref_file, gtf_file):\n if _has_sj_index(ref_file):\n return \"\"\n else:\n rlength = fastq.estimate_maximum_read_length(fq1)\n cmd = \" --sjdbGTFfile %s \" % gtf_file\n cmd += \" --sjdbOverhang %s \" % str(rlength - 1)\n return cmd", - "docstring": "newer versions of STAR can generate splice junction databases on thephfly\n this is preferable since we can tailor it to the read lengths" - }, - { - "code": "def _aux_type(self, i):\n aux_type = ctypes.c_int()\n check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))\n return _DTYPE_MX_TO_NP[aux_type.value]", - "docstring": "Data-type of the array's ith aux data.\n\n Returns\n -------\n numpy.dtype\n This BaseSparseNDArray's aux data type." - }, - { - "code": "def _writeSuperLinks(self, superLinks, fileObject):\n for slink in superLinks:\n fileObject.write('SLINK %s %s\\n' % (\n slink.slinkNumber,\n slink.numPipes))\n for node in slink.superNodes:\n fileObject.write('NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f\\n' % (\n node.nodeNumber,\n node.groundSurfaceElev,\n node.invertElev,\n node.manholeSA,\n node.nodeInletCode,\n node.cellI,\n node.cellJ,\n node.weirSideLength,\n node.orificeDiameter))\n for pipe in slink.pipes:\n fileObject.write('PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f\\n' % (\n pipe.pipeNumber,\n pipe.xSecType,\n pipe.diameterOrHeight,\n pipe.width,\n pipe.slope,\n pipe.roughness,\n pipe.length,\n pipe.conductance,\n pipe.drainSpacing))", - "docstring": "Write SuperLinks to File Method" - }, - { - "code": "def binary_op(self, op, other, **kwargs):\n func = getattr(pandas.DataFrame, op)\n return self._inter_df_op_handler(func, other, **kwargs)", - "docstring": "Perform an operation between two objects.\n\n Note: The list of operations is as follows:\n - add\n - eq\n - floordiv\n - ge\n - gt\n - le\n - lt\n - mod\n - mul\n - ne\n - pow\n - rfloordiv\n - rmod\n - rpow\n - rsub\n - rtruediv\n - sub\n - truediv\n - __and__\n - __or__\n - __xor__\n Args:\n op: The operation. See list of operations above\n other: The object to operate against.\n\n Returns:\n A new QueryCompiler object." - }, - { - "code": "def request_token(self):\n client = OAuth1(\n client_key=self._server_cache[self.client.server].key,\n client_secret=self._server_cache[self.client.server].secret,\n callback_uri=self.callback,\n )\n request = {\"auth\": client}\n response = self._requester(\n requests.post,\n \"oauth/request_token\",\n **request\n )\n data = parse.parse_qs(response.text)\n data = {\n 'token': data[self.PARAM_TOKEN][0],\n 'token_secret': data[self.PARAM_TOKEN_SECRET][0]\n }\n return data", - "docstring": "Gets OAuth request token" - }, - { - "code": "def valuemap(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if 'value' in kwargs:\n val = kwargs['value']\n del kwargs['value']\n _f = f(*args, **kwargs)\n def valued_f(*args, **kwargs):\n result = _f(*args, **kwargs)\n s, obj, span = result\n if callable(val):\n return PegreResult(s, val(obj), span)\n else:\n return PegreResult(s, val, span)\n return valued_f\n else:\n return f(*args, **kwargs)\n return wrapper", - "docstring": "Decorator to help PEG functions handle value conversions." - }, - { - "code": "def timeparse(sval, granularity='seconds'):\n match = COMPILED_SIGN.match(sval)\n sign = -1 if match.groupdict()['sign'] == '-' else 1\n sval = match.groupdict()['unsigned']\n for timefmt in COMPILED_TIMEFORMATS:\n match = timefmt.match(sval)\n if match and match.group(0).strip():\n mdict = match.groupdict()\n if granularity == 'minutes':\n mdict = _interpret_as_minutes(sval, mdict)\n if all(v.isdigit() for v in list(mdict.values()) if v):\n return sign * sum([MULTIPLIERS[k] * int(v, 10) for (k, v) in\n list(mdict.items()) if v is not None])\n elif ('secs' not in mdict or\n mdict['secs'] is None or\n mdict['secs'].isdigit()):\n return (\n sign * int(sum([MULTIPLIERS[k] * float(v) for (k, v) in\n list(mdict.items()) if k != 'secs' and v is not None])) +\n (int(mdict['secs'], 10) if mdict['secs'] else 0))\n else:\n return sign * sum([MULTIPLIERS[k] * float(v) for (k, v) in\n list(mdict.items()) if v is not None])", - "docstring": "Parse a time expression, returning it as a number of seconds. If\n possible, the return value will be an `int`; if this is not\n possible, the return will be a `float`. Returns `None` if a time\n expression cannot be parsed from the given string.\n\n Arguments:\n - `sval`: the string value to parse\n\n >>> timeparse('1:24')\n 84\n >>> timeparse(':22')\n 22\n >>> timeparse('1 minute, 24 secs')\n 84\n >>> timeparse('1m24s')\n 84\n >>> timeparse('1.2 minutes')\n 72\n >>> timeparse('1.2 seconds')\n 1.2\n\n Time expressions can be signed.\n\n >>> timeparse('- 1 minute')\n -60\n >>> timeparse('+ 1 minute')\n 60\n \n If granularity is specified as ``minutes``, then ambiguous digits following\n a colon will be interpreted as minutes; otherwise they are considered seconds.\n \n >>> timeparse('1:30')\n 90\n >>> timeparse('1:30', granularity='minutes')\n 5400" - }, - { - "code": "def pdf(cls, mass, log_mode=True):\n alpha = 2.35\n a = 0.060285569480482866\n dn_dm = a * mass**(-alpha)\n if log_mode:\n return dn_dm * (mass * np.log(10))\n else:\n return dn_dm", - "docstring": "PDF for the Salpeter IMF.\n\n Value of 'a' is set to normalize the IMF to 1 between 0.1 and 100 Msun" - }, - { - "code": "def _check_pos(self, level, *tokens):\n for record in self.records:\n if all(record.levelno == level and token in record.message for token in tokens):\n return\n level_name = logging.getLevelName(level)\n msgs = [\"Tokens {} not found in {}, all was logged is...\".format(tokens, level_name)]\n for record in self.records:\n msgs.append(\" {:9s} {!r}\".format(record.levelname, record.message))\n self.test_instance.fail(\"\\n\".join(msgs))", - "docstring": "Check if the different tokens were logged in one record, assert by level." - }, - { - "code": "def rpcexec(self, payload):\n log.debug(json.dumps(payload))\n self.ws.send(json.dumps(payload, ensure_ascii=False).encode(\"utf8\"))", - "docstring": "Execute a call by sending the payload\n\n :param dict payload: Payload data\n :raises ValueError: if the server does not respond in proper JSON format\n :raises RPCError: if the server returns an error" - }, - { - "code": "def expand_path(experiment_config, key):\n if experiment_config.get(key):\n experiment_config[key] = os.path.expanduser(experiment_config[key])", - "docstring": "Change '~' to user home directory" - }, - { - "code": "def read_nmr_efg(self):\n header_pattern = r'^\\s+NMR quadrupolar parameters\\s+$\\n' \\\n r'^\\s+Cq : quadrupolar parameter\\s+Cq=e[*]Q[*]V_zz/h$\\n' \\\n r'^\\s+eta: asymmetry parameters\\s+\\(V_yy - V_xx\\)/ V_zz$\\n' \\\n r'^\\s+Q : nuclear electric quadrupole moment in mb \\(millibarn\\)$\\n' \\\n r'^-{50,}$\\n' \\\n r'^\\s+ion\\s+Cq\\(MHz\\)\\s+eta\\s+Q \\(mb\\)\\s+$\\n' \\\n r'^-{50,}\\s*$\\n'\n row_pattern = r'\\d+\\s+(?P[-]?\\d+\\.\\d+)\\s+(?P[-]?\\d+\\.\\d+)\\s+' \\\n r'(?P[-]?\\d+\\.\\d+)'\n footer_pattern = r'-{50,}\\s*$'\n self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float,\n last_one_only=True, attribute_name=\"efg\")", - "docstring": "Parse the NMR Electric Field Gradient interpretted values.\n\n Returns:\n Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.\n Each dict key/value pair corresponds to a component of the tensors." - }, - { - "code": "def less_strict_bool(x):\n if x is None:\n return False\n elif x is True or x is False:\n return x\n else:\n return strict_bool(x)", - "docstring": "Idempotent and None-safe version of strict_bool." - }, - { - "code": "def pprint_blockers(blockers):\n pprinted = []\n for blocker in sorted(blockers, key=lambda x: tuple(reversed(x))):\n buf = [blocker[0]]\n if len(blocker) > 1:\n buf.append(' (which is blocking ')\n buf.append(', which is blocking '.join(blocker[1:]))\n buf.append(')')\n pprinted.append(''.join(buf))\n return pprinted", - "docstring": "Pretty print blockers into a sequence of strings.\n\n Results will be sorted by top-level project name. This means that if a\n project is blocking another project then the dependent project will be\n what is used in the sorting, not the project at the bottom of the\n dependency graph." - }, - { - "code": "def get_worker(*queue_names, **kwargs):\n job_class = get_job_class(kwargs.pop('job_class', None))\n queue_class = kwargs.pop('queue_class', None)\n queues = get_queues(*queue_names, **{'job_class': job_class,\n 'queue_class': queue_class})\n queue_class = queues[0].__class__\n worker_class = get_worker_class(kwargs.pop('worker_class', None))\n return worker_class(queues,\n connection=queues[0].connection,\n exception_handlers=get_exception_handlers() or None,\n job_class=job_class,\n queue_class=queue_class,\n **kwargs)", - "docstring": "Returns a RQ worker for all queues or specified ones." - }, - { - "code": "def add_entity(self,entity):\n if self.entity_layer is None:\n self.entity_layer = Centities(type=self.type)\n self.root.append(self.entity_layer.get_node())\n self.entity_layer.add_entity(entity)", - "docstring": "Adds an entity to the entity layer\n @type entity: L{Centity}\n @param entity: the entity object" - }, - { - "code": "def compose(self):\n rr = self.__reagents + self.__reactants\n if rr:\n if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr):\n raise TypeError('Queries not composable')\n r = reduce(or_, rr)\n else:\n r = MoleculeContainer()\n if self.__products:\n if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products):\n raise TypeError('Queries not composable')\n p = reduce(or_, self.__products)\n else:\n p = MoleculeContainer()\n return r ^ p", - "docstring": "get CGR of reaction\n\n reagents will be presented as unchanged molecules\n :return: CGRContainer" - }, - { - "code": "def receive_hardbounce_post(self, post_params):\n if isinstance(post_params, dict):\n required_params = ['action', 'email', 'sig']\n if not self.check_for_valid_postback_actions(required_params, post_params):\n return False\n else:\n return False\n if post_params['action'] != 'hardbounce':\n return False\n signature = post_params['sig']\n post_params = post_params.copy()\n del post_params['sig']\n if signature != get_signature_hash(post_params, self.secret):\n return False\n if 'send_id' in post_params:\n send_id = post_params['send_id']\n send_response = self.get_send(send_id)\n if not send_response.is_ok():\n return False\n send_obj = send_response.get_body()\n if not send_obj or 'email' not in send_obj:\n return False\n if 'blast_id' in post_params:\n blast_id = post_params['blast_id']\n blast_response = self.get_blast(blast_id)\n if not blast_response.is_ok():\n return False\n blast_obj = blast_response.get_body()\n if not blast_obj:\n return False\n return True", - "docstring": "Hard bounce postbacks" - }, - { - "code": "def docstr(self, prefix='', include_label=True):\n return '\\n'.join([x.docstr(prefix, include_label) for x in self])", - "docstring": "Returns the ``docstr`` of each parameter joined together." - }, - { - "code": "def setRootJob(self, rootJobStoreID):\n with self.writeSharedFileStream(self.rootJobStoreIDFileName) as f:\n f.write(rootJobStoreID.encode('utf-8'))", - "docstring": "Set the root job of the workflow backed by this job store\n\n :param str rootJobStoreID: The ID of the job to set as root" - }, - { - "code": "def _get_next_or_previous_by_publish_date(self, is_next, **kwargs):\n arg = \"publish_date__gt\" if is_next else \"publish_date__lt\"\n order = \"publish_date\" if is_next else \"-publish_date\"\n lookup = {arg: self.publish_date}\n concrete_model = base_concrete_model(Displayable, self)\n try:\n queryset = concrete_model.objects.published\n except AttributeError:\n queryset = concrete_model.objects.all\n try:\n return queryset(**kwargs).filter(**lookup).order_by(order)[0]\n except IndexError:\n pass", - "docstring": "Retrieves next or previous object by publish date. We implement\n our own version instead of Django's so we can hook into the\n published manager and concrete subclasses." - }, - { - "code": "def methods(self):\n done = {}\n for astroid in itertools.chain(iter((self,)), self.ancestors()):\n for meth in astroid.mymethods():\n if meth.name in done:\n continue\n done[meth.name] = None\n yield meth", - "docstring": "Iterate over all of the method defined in this class and its parents.\n\n :returns: The methods defined on the class.\n :rtype: iterable(FunctionDef)" - }, - { - "code": "def _recv_loop(self):\n required_len = BGP_MIN_MSG_LEN\n conn_lost_reason = \"Connection lost as protocol is no longer active\"\n try:\n while True:\n next_bytes = self._socket.recv(required_len)\n if len(next_bytes) == 0:\n conn_lost_reason = 'Peer closed connection'\n break\n self.data_received(next_bytes)\n except socket.error as err:\n conn_lost_reason = 'Connection to peer lost: %s.' % err\n except bgp.BgpExc as ex:\n conn_lost_reason = 'Connection to peer lost, reason: %s.' % ex\n except Exception as e:\n LOG.debug(traceback.format_exc())\n conn_lost_reason = str(e)\n finally:\n self.connection_lost(conn_lost_reason)", - "docstring": "Sits in tight loop collecting data received from peer and\n processing it." - }, - { - "code": "def _on_leave(self, *args):\n if self.__clicked:\n self.config(foreground=self._clicked_color)\n else:\n self.config(foreground=self._normal_color)\n self.config(cursor=\"\")", - "docstring": "Set the text color to either the normal color when not clicked or the clicked color when clicked." - }, - { - "code": "def add_letter(self, letter):\n assert isinstance(letter, str)\n assert len(letter) == 1\n self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]\n self.cursor += 1", - "docstring": "Add a letter at the cursor pos." - }, - { - "code": "def get_command(self, ctx, name):\n self.connect(ctx)\n if not hasattr(ctx, \"widget\") or name in [\"shell\"]:\n return super(Engineer, self).get_command(ctx, name)\n if name == \"--help\":\n return None\n info = ctx.widget.engineer_info(name)\n return self.make_command(ctx, name, info)", - "docstring": "get click command from engineer exposed xbahn command specs" - }, - { - "code": "def _print_status(self):\n self._clear_line()\n self._print(' ')\n if self.max_value:\n self._print_percent()\n self._print(' ')\n self._print_bar()\n else:\n self._print_throbber()\n self._print(' ')\n if self.measurement == Measurement.bytes:\n self._print_size_downloaded()\n else:\n self._print(self.current_value)\n self._print(' ')\n self._print_duration()\n self._print(' ')\n if self.measurement == Measurement.bytes:\n self._print_speed()\n self._flush()", - "docstring": "Print an entire status line including bar and stats." - }, - { - "code": "def compute_dominance_frontier(graph, domtree):\n df = {}\n for x in networkx.dfs_postorder_nodes(domtree):\n if x not in graph:\n continue\n df[x] = set()\n for y in graph.successors(x):\n if x not in domtree.predecessors(y):\n df[x].add(y)\n if x is None:\n continue\n for z in domtree.successors(x):\n if z is x:\n continue\n if z not in df:\n continue\n for y in df[z]:\n if x not in list(domtree.predecessors(y)):\n df[x].add(y)\n return df", - "docstring": "Compute a dominance frontier based on the given post-dominator tree.\n\n This implementation is based on figure 2 of paper An Efficient Method of Computing Static Single Assignment\n Form by Ron Cytron, etc.\n\n :param graph: The graph where we want to compute the dominance frontier.\n :param domtree: The dominator tree\n :returns: A dict of dominance frontier" - }, - { - "code": "def base_fields(self):\n if self.extensions_start is None:\n return len(self.fields)\n return len(self.fields[:self.extensions_start])", - "docstring": "return number of non-extended fields" - }, - { - "code": "def _discretize(a):\n arr = np.asarray(a)\n index = np.argsort(arr)\n inverse_index = np.zeros(arr.size, dtype=np.intp)\n inverse_index[index] = np.arange(arr.size, dtype=np.intp)\n arr = arr[index]\n obs = np.r_[True, arr[1:] != arr[:-1]]\n return obs.cumsum()[inverse_index] - 1", - "docstring": "Discretizes array values to class labels." - }, - { - "code": "def eval(self, expression):\n expression_wrapped = wrap_script.format(expression)\n self._libeng.engEvalString(self._ep, expression_wrapped)\n mxresult = self._libeng.engGetVariable(self._ep, 'ERRSTR__')\n error_string = self._libmx.mxArrayToString(mxresult)\n self._libmx.mxDestroyArray(mxresult)\n if error_string != \"\":\n raise RuntimeError(\"Error from MATLAB\\n{0}\".format(error_string))", - "docstring": "Evaluate `expression` in MATLAB engine.\n\n Parameters\n ----------\n expression : str\n Expression is passed to MATLAB engine and evaluated." - }, - { - "code": "def argparse(argv, parser, arguments):\n def add_arg(parser, arg_spec):\n parser.add_argument(arg_spec.name, help=arg_spec.help)\n return parser\n parse_request = parser \\\n .map(lambda i: ArgumentParser(description=i.description)) \\\n .combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \\\n .last() \\\n .combine_latest(argv.to_list(), lambda parser, args: (parser,args))\n def subscribe(observer):\n def on_next(value):\n parser, args = value\n try:\n args = parser.parse_args(args)\n for key,value in vars(args).items():\n observer.on_next(Argument(key=key, value=value))\n except NameError as exc:\n observer.on_error(\"{}\\n{}\".format(exc, parser.format_help()))\n return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)\n return AnonymousObservable(subscribe)", - "docstring": "A command line argument parser.\n Parses arguments coming from the argv Observable and outputs them as\n Argument items in the output observable.\n\n Parameters\n -----------\n argv : Observable\n An Observable of strings.\n parser : Observable\n An Observable containing one Parser item.\n arguments : Observable\n An Observable containing ArgumentDef items.\n\n\n Returns\n -------\n Observable\n An Observable of Argument items." - }, - { - "code": "async def post(self, public_key):\n\t\tif settings.SIGNATURE_VERIFICATION:\n\t\t\tsuper().verify()\n\t\ttry:\n\t\t\tbody = json.loads(self.request.body)\n\t\texcept:\n\t\t\tself.set_status(400)\n\t\t\tself.write({\"error\":400, \"reason\":\"Unexpected data format. JSON required\"})\n\t\t\traise tornado.web.Finish\n\t\tif isinstance(body[\"message\"], str):\n\t\t\tmessage = json.loads(body[\"message\"])\n\t\telif isinstance(body[\"message\"], dict):\n\t\t\tmessage = body[\"message\"]\n\t\tcid = message.get(\"cid\")\n\t\treview = message.get(\"review\")\n\t\trating = message.get(\"rating\")\n\t\tcoinid = message.get(\"coinid\")\n\t\tif not all([cid, rating, review]):\n\t\t\tself.set_status(400)\n\t\t\tself.write({\"error\":400, \"reason\":\"Missed required fields\"})\n\t\tif coinid in settings.bridges.keys():\n\t\t\tself.account.blockchain.setendpoint(settings.bridges[coinid])\n\t\telse:\n\t\t\tself.set_status(400)\n\t\t\tself.write({\"error\":400, \"reason\":\"Invalid coinid\"})\n\t\t\traise tornado.web.Finish \n\t\tbuyer_address = self.account.validator[coinid](public_key)\n\t\treview = await self.account.blockchain.addreview(cid=int(cid),buyer_address=buyer_address,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tstars=int(rating), review=review)\n\t\tawait self.account.setreview(cid=cid, txid=review[\"result\"][\"txid\"], coinid=coinid)\n\t\tself.write({\"cid\":cid, \"review\":review, \"rating\":rating})", - "docstring": "Writes contents review" - }, - { - "code": "def get_country(similar=False, **kwargs):\n result_country = None\n try:\n if similar:\n for country in countries:\n if kwargs.get('name', '') in country.name:\n result_country = country\n break\n else:\n result_country = countries.get(**kwargs)\n except Exception as ex:\n msg = ('Country not found in pycountry with params introduced'\n ' - {}'.format(ex))\n logger.error(msg, params=kwargs)\n return result_country", - "docstring": "Get a country for pycountry" - }, - { - "code": "def neighbors_iter(self, n, t=None):\n try:\n if t is None:\n return iter(self._adj[n])\n else:\n return iter([i for i in self._adj[n] if self.__presence_test(n, i, t)])\n except KeyError:\n raise nx.NetworkXError(\"The node %s is not in the graph.\" % (n,))", - "docstring": "Return an iterator over all neighbors of node n at time t.\n\n Parameters\n ----------\n n : node\n A node in the graph\n t : snapshot id (default=None)\n If None will be returned an iterator over the neighbors of the node on the flattened graph.\n\n Examples\n --------\n >>> G = dn.DynGraph()\n >>> G.add_path([0,1,2,3], t=0)\n >>> [n for n in G.neighbors_iter(0, t=0)]\n [1]" - }, - { - "code": "def AVP(avpId, **fields):\n val = None\n classType = AVP_Unknown\n if isinstance(avpId, str):\n try:\n for vnd in AvpDefDict:\n for code in AvpDefDict[vnd]:\n val = AvpDefDict[vnd][code]\n if val[0][:len(\n avpId)] == avpId:\n raise\n found = False\n except BaseException:\n found = True\n else:\n if isinstance(avpId, list):\n code = avpId[0]\n vnd = avpId[1]\n else:\n code = avpId\n vnd = 0\n try:\n val = AvpDefDict[vnd][code]\n found = True\n except BaseException:\n found = False\n if not found:\n warning('The AVP identifier %s has not been found.' % str(avpId))\n if isinstance(avpId, str):\n return None\n fields['avpCode'] = code\n if 'avpVnd' not in fields and vnd:\n fields['avpVnd'] = vnd\n if 'avpFlags' not in fields:\n if val:\n fields['avpFlags'] = val[2]\n else:\n fields['avpFlags'] = vnd and 128 or 0\n if val:\n classType = val[1]\n _ret = classType(**fields)\n if val:\n _ret.name = 'AVP ' + val[0]\n return _ret", - "docstring": "Craft an AVP based on its id and optional parameter fields" - }, - { - "code": "def first_name(anon, obj, field, val):\n return anon.faker.first_name(field=field)", - "docstring": "Returns a random first name" - }, - { - "code": "def receive_nack(self, msg):\n self.observe_proposal(msg.promised_proposal_id)\n if msg.proposal_id == self.proposal_id and self.nacks_received is not None:\n self.nacks_received.add(msg.from_uid)\n if len(self.nacks_received) == self.quorum_size:\n return self.prepare()", - "docstring": "Returns a new Prepare message if the number of Nacks received reaches\n a quorum." - }, - { - "code": "def deploy_api_gateway( self,\n api_id,\n stage_name,\n stage_description=\"\",\n description=\"\",\n cache_cluster_enabled=False,\n cache_cluster_size='0.5',\n variables=None,\n cloudwatch_log_level='OFF',\n cloudwatch_data_trace=False,\n cloudwatch_metrics_enabled=False,\n cache_cluster_ttl=300,\n cache_cluster_encrypted=False\n ):\n print(\"Deploying API Gateway..\")\n self.apigateway_client.create_deployment(\n restApiId=api_id,\n stageName=stage_name,\n stageDescription=stage_description,\n description=description,\n cacheClusterEnabled=cache_cluster_enabled,\n cacheClusterSize=cache_cluster_size,\n variables=variables or {}\n )\n if cloudwatch_log_level not in self.cloudwatch_log_levels:\n cloudwatch_log_level = 'OFF'\n self.apigateway_client.update_stage(\n restApiId=api_id,\n stageName=stage_name,\n patchOperations=[\n self.get_patch_op('logging/loglevel', cloudwatch_log_level),\n self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),\n self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),\n self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)),\n self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted)\n ]\n )\n return \"https://{}.execute-api.{}.amazonaws.com/{}\".format(api_id, self.boto_session.region_name, stage_name)", - "docstring": "Deploy the API Gateway!\n\n Return the deployed API URL." - }, - { - "code": "def parse(*models, **kwargs):\n if isinstance(models, tuple) and isinstance(models[0], list):\n models = models[0]\n config = kwargs.pop('config', False)\n state = kwargs.pop('state', False)\n profiles = kwargs.pop('profiles', [])\n if not profiles and hasattr(napalm_device, 'profile'):\n profiles = napalm_device.profile\n if not profiles:\n profiles = [__grains__.get('os')]\n root = _get_root_object(models)\n parser_kwargs = {\n 'device': napalm_device.get('DRIVER'),\n 'profile': profiles\n }\n if config:\n root.parse_config(**parser_kwargs)\n if state:\n root.parse_state(**parser_kwargs)\n return root.to_dict(filter=True)", - "docstring": "Parse configuration from the device.\n\n models\n A list of models to be used when parsing.\n\n config: ``False``\n Parse config.\n\n state: ``False``\n Parse state.\n\n profiles: ``None``\n Use certain profiles to parse. If not specified, will use the device\n default profile(s).\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' napalm_yang.parse models.openconfig_interfaces\n\n Output Example:\n\n .. code-block:: python\n\n {\n \"interfaces\": {\n \"interface\": {\n \".local.\": {\n \"name\": \".local.\",\n \"state\": {\n \"admin-status\": \"UP\",\n \"counters\": {\n \"in-discards\": 0,\n \"in-errors\": 0,\n \"out-errors\": 0\n },\n \"enabled\": True,\n \"ifindex\": 0,\n \"last-change\": 0,\n \"oper-status\": \"UP\",\n \"type\": \"softwareLoopback\"\n },\n \"subinterfaces\": {\n \"subinterface\": {\n \".local..0\": {\n \"index\": \".local..0\",\n \"state\": {\n \"ifindex\": 0,\n \"name\": \".local..0\"\n }\n }\n }\n }\n },\n \"ae0\": {\n \"name\": \"ae0\",\n \"state\": {\n \"admin-status\": \"UP\",\n \"counters\": {\n \"in-discards\": 0,\n \"in-errors\": 0,\n \"out-errors\": 0\n },\n \"enabled\": True,\n \"ifindex\": 531,\n \"last-change\": 255203,\n \"mtu\": 1518,\n \"oper-status\": \"DOWN\"\n },\n \"subinterfaces\": {\n \"subinterface\": {\n \"ae0.0\": {\n \"index\": \"ae0.0\",\n \"state\": {\n \"description\": \"ASDASDASD\",\n \"ifindex\": 532,\n \"name\": \"ae0.0\"\n }\n }\n \"ae0.32767\": {\n \"index\": \"ae0.32767\",\n \"state\": {\n \"ifindex\": 535,\n \"name\": \"ae0.32767\"\n }\n }\n }\n }\n },\n \"dsc\": {\n \"name\": \"dsc\",\n \"state\": {\n \"admin-status\": \"UP\",\n \"counters\": {\n \"in-discards\": 0,\n \"in-errors\": 0,\n \"out-errors\": 0\n },\n \"enabled\": True,\n \"ifindex\": 5,\n \"last-change\": 0,\n \"oper-status\": \"UP\"\n }\n },\n \"ge-0/0/0\": {\n \"name\": \"ge-0/0/0\",\n \"state\": {\n \"admin-status\": \"UP\",\n \"counters\": {\n \"in-broadcast-pkts\": 0,\n \"in-discards\": 0,\n \"in-errors\": 0,\n \"in-multicast-pkts\": 0,\n \"in-unicast-pkts\": 16877,\n \"out-broadcast-pkts\": 0,\n \"out-errors\": 0,\n \"out-multicast-pkts\": 0,\n \"out-unicast-pkts\": 15742\n },\n \"description\": \"management interface\",\n \"enabled\": True,\n \"ifindex\": 507,\n \"last-change\": 258467,\n \"mtu\": 1400,\n \"oper-status\": \"UP\"\n },\n \"subinterfaces\": {\n \"subinterface\": {\n \"ge-0/0/0.0\": {\n \"index\": \"ge-0/0/0.0\",\n \"state\": {\n \"description\": \"ge-0/0/0.0\",\n \"ifindex\": 521,\n \"name\": \"ge-0/0/0.0\"\n }\n }\n }\n }\n }\n \"irb\": {\n \"name\": \"irb\",\n \"state\": {\n \"admin-status\": \"UP\",\n \"counters\": {\n \"in-discards\": 0,\n \"in-errors\": 0,\n \"out-errors\": 0\n },\n \"enabled\": True,\n \"ifindex\": 502,\n \"last-change\": 0,\n \"mtu\": 1514,\n \"oper-status\": \"UP\",\n \"type\": \"ethernetCsmacd\"\n }\n },\n \"lo0\": {\n \"name\": \"lo0\",\n \"state\": {\n \"admin-status\": \"UP\",\n \"counters\": {\n \"in-discards\": 0,\n \"in-errors\": 0,\n \"out-errors\": 0\n },\n \"description\": \"lo0\",\n \"enabled\": True,\n \"ifindex\": 6,\n \"last-change\": 0,\n \"oper-status\": \"UP\",\n \"type\": \"softwareLoopback\"\n },\n \"subinterfaces\": {\n \"subinterface\": {\n \"lo0.0\": {\n \"index\": \"lo0.0\",\n \"state\": {\n \"description\": \"lo0.0\",\n \"ifindex\": 16,\n \"name\": \"lo0.0\"\n }\n },\n \"lo0.16384\": {\n \"index\": \"lo0.16384\",\n \"state\": {\n \"ifindex\": 21,\n \"name\": \"lo0.16384\"\n }\n },\n \"lo0.16385\": {\n \"index\": \"lo0.16385\",\n \"state\": {\n \"ifindex\": 22,\n \"name\": \"lo0.16385\"\n }\n },\n \"lo0.32768\": {\n \"index\": \"lo0.32768\",\n \"state\": {\n \"ifindex\": 248,\n \"name\": \"lo0.32768\"\n }\n }\n }\n }\n }\n }\n }\n }" - }, - { - "code": "async def runItemCmdr(item, outp=None, **opts):\n cmdr = await getItemCmdr(item, outp=outp, **opts)\n await cmdr.runCmdLoop()", - "docstring": "Create a cmdr for the given item and run the cmd loop.\n\n Example:\n\n runItemCmdr(foo)" - }, - { - "code": "def high_low(data, channels=None, high=None, low=None, full_output=False):\n if channels is None:\n data_ch = data\n else:\n data_ch = data[:,channels]\n if data_ch.ndim == 1:\n data_ch = data_ch.reshape((-1,1))\n if high is None:\n if hasattr(data_ch, 'range'):\n high = [np.Inf if di is None else di[1] for di in data_ch.range()]\n high = np.array(high)\n else:\n high = np.Inf\n if low is None:\n if hasattr(data_ch, 'range'):\n low = [-np.Inf if di is None else di[0] for di in data_ch.range()]\n low = np.array(low)\n else:\n low = -np.Inf\n mask = np.all((data_ch < high) & (data_ch > low), axis = 1)\n gated_data = data[mask]\n if full_output:\n HighLowGateOutput = collections.namedtuple(\n 'HighLowGateOutput',\n ['gated_data', 'mask'])\n return HighLowGateOutput(gated_data=gated_data, mask=mask)\n else:\n return gated_data", - "docstring": "Gate out high and low values across all specified channels.\n\n Gate out events in `data` with values in the specified channels which\n are larger than or equal to `high` or less than or equal to `low`.\n\n Parameters\n ----------\n data : FCSData or numpy array\n NxD flow cytometry data where N is the number of events and D is\n the number of parameters (aka channels).\n channels : int, str, list of int, list of str, optional\n Channels on which to perform gating. If None, use all channels.\n high, low : int, float, optional\n High and low threshold values. If None, `high` and `low` will be\n taken from ``data.range`` if available, otherwise\n ``np.inf`` and ``-np.inf`` will be used.\n full_output : bool, optional\n Flag specifying to return additional outputs. If true, the outputs\n are given as a namedtuple.\n\n Returns\n -------\n gated_data : FCSData or numpy array\n Gated flow cytometry data of the same format as `data`.\n mask : numpy array of bool, only if ``full_output==True``\n Boolean gate mask used to gate data such that ``gated_data =\n data[mask]``." - }, - { - "code": "def split_input(cls, mapper_spec):\n params = _get_params(mapper_spec)\n entity_kind_name = params[cls.ENTITY_KIND_PARAM]\n batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))\n shard_count = mapper_spec.shard_count\n namespace = params.get(cls.NAMESPACE_PARAM)\n app = params.get(cls._APP_PARAM)\n filters = params.get(cls.FILTERS_PARAM)\n if namespace is None:\n namespace_query = datastore.Query(\"__namespace__\",\n keys_only=True,\n _app=app)\n namespace_keys = namespace_query.Get(\n limit=cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)\n if len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD:\n ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,\n contiguous=True,\n _app=app)\n return [cls(entity_kind_name,\n key_ranges=None,\n ns_range=ns_range,\n batch_size=batch_size,\n filters=filters)\n for ns_range in ns_ranges]\n elif not namespace_keys:\n return [cls(entity_kind_name,\n key_ranges=None,\n ns_range=namespace_range.NamespaceRange(_app=app),\n batch_size=shard_count,\n filters=filters)]\n else:\n namespaces = [namespace_key.name() or \"\"\n for namespace_key in namespace_keys]\n else:\n namespaces = [namespace]\n readers = cls._split_input_from_params(\n app, namespaces, entity_kind_name, params, shard_count)\n if filters:\n for reader in readers:\n reader._filters = filters\n return readers", - "docstring": "Splits query into shards without fetching query results.\n\n Tries as best as it can to split the whole query result set into equal\n shards. Due to difficulty of making the perfect split, resulting shards'\n sizes might differ significantly from each other.\n\n Args:\n mapper_spec: MapperSpec with params containing 'entity_kind'.\n May have 'namespace' in the params as a string containing a single\n namespace. If specified then the input reader will only yield values\n in the given namespace. If 'namespace' is not given then values from\n all namespaces will be yielded. May also have 'batch_size' in the params\n to specify the number of entities to process in each batch.\n\n Returns:\n A list of InputReader objects. If the query results are empty then the\n empty list will be returned. Otherwise, the list will always have a length\n equal to number_of_shards but may be padded with Nones if there are too\n few results for effective sharding." - }, - { - "code": "def nanany(values, axis=None, skipna=True, mask=None):\n values, mask, dtype, _, _ = _get_values(values, skipna, False, copy=skipna,\n mask=mask)\n return values.any(axis)", - "docstring": "Check if any elements along an axis evaluate to True.\n\n Parameters\n ----------\n values : ndarray\n axis : int, optional\n skipna : bool, default True\n mask : ndarray[bool], optional\n nan-mask if known\n\n Returns\n -------\n result : bool\n\n Examples\n --------\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([1, 2])\n >>> nanops.nanany(s)\n True\n\n >>> import pandas.core.nanops as nanops\n >>> s = pd.Series([np.nan])\n >>> nanops.nanany(s)\n False" - }, - { - "code": "def rpm_install(install_dir):\n log = logging.getLogger(mod_logger + '.rpm_install')\n if not isinstance(install_dir, basestring):\n msg = 'install_dir argument must be a string'\n log.error(msg)\n raise CommandError(msg)\n if not os.path.isdir(install_dir):\n msg = 'Directory not found: {f}'.format(f=install_dir)\n log.error(msg)\n raise CommandError(msg)\n command = ['rpm', '-iv', '--force', '{d}/*.rpm'.format(d=install_dir)]\n try:\n result = run_command(command)\n except CommandError:\n raise\n log.info('RPM completed and exit with code: {c}'.format(\n c=result['code']))\n return result['code']", - "docstring": "This method installs all RPM files in a specific dir\n\n :param install_dir: (str) Full path to the directory\n :return int exit code form the rpm command\n :raises CommandError" - }, - { - "code": "def table_formatter(self, dataframe, inc_header=1, inc_index=1):\n return TableFormatter(dataframe, inc_header=inc_header, inc_index=inc_index)", - "docstring": "Return a table formatter for the dataframe. Saves the user the need to import this class" - }, - { - "code": "def ensure_echo_on():\n if not sys.stdin.isatty():\n return\n try:\n import termios\n except ImportError:\n return\n attributes = termios.tcgetattr(sys.stdin)\n if not attributes[3] & termios.ECHO:\n attributes[3] |= termios.ECHO\n termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)", - "docstring": "Ensure that echo mode is enabled. Some tools such as PDB disable\n it which causes usability issues after reload." - }, - { - "code": "def inserir(self, type, description):\n script_type_map = dict()\n script_type_map['type'] = type\n script_type_map['description'] = description\n code, xml = self.submit(\n {'script_type': script_type_map}, 'POST', 'scripttype/')\n return self.response(code, xml)", - "docstring": "Inserts a new Script Type and returns its identifier.\n\n :param type: Script Type type. String with a minimum 3 and maximum of 40 characters\n :param description: Script Type description. String with a minimum 3 and maximum of 100 characters\n\n :return: Dictionary with the following structure:\n\n ::\n\n {'script_type': {'id': < id_script_type >}}\n\n :raise InvalidParameterError: Type or description is null and invalid.\n :raise NomeTipoRoteiroDuplicadoError: Type script already registered with informed.\n :raise DataBaseError: Networkapi failed to access the database.\n :raise XMLError: Networkapi failed to generate the XML response." - }, - { - "code": "def decay(ax, p0, pf, A, n, format=None, **kwds):\r\n r\r\n if format is None: format = 'k-'\r\n T = sqrt((p0[0]-pf[0])**2+(p0[1]-pf[1])**2)\r\n alpha = atan2(pf[1]-p0[1], pf[0]-p0[0])\r\n x = [i*T/400.0 for i in range(401)]\r\n y = [A*sin(xi * 2*pi*n/T) for xi in x]\r\n cur_list = [(x, y)]\r\n cur_list = rotate_and_traslate(cur_list, alpha, p0)\r\n for curi in cur_list:\r\n ax.plot(curi[0], curi[1], format, **kwds)", - "docstring": "r\"\"\"Draw a spontaneous decay as a wavy line." - }, - { - "code": "def flags(self, index):\r\n if not index.isValid():\r\n return Qt.ItemIsEnabled\r\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\r\n Qt.ItemIsEditable)", - "docstring": "Overriding method flags" - }, - { - "code": "def configure(screen_name=None, config_file=None, app=None, **kwargs):\n dirs = kwargs.pop('default_directories', None)\n bases = kwargs.pop('default_bases', None)\n file_config = {}\n if config_file is not False:\n config_file = find_file(config_file, dirs, bases)\n file_config = parse(config_file)\n config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')}\n user_conf = file_config.get('users', {}).get(screen_name, {})\n app = app or user_conf.get('app')\n app_conf = file_config.get('apps', {}).get(app, {})\n config.update(app_conf)\n config.update(user_conf)\n config.update({k: v for k, v in kwargs.items() if v is not None})\n return config", - "docstring": "Set up a config dictionary using a bots.yaml config file and optional keyword args.\n\n Args:\n screen_name (str): screen_name of user to search for in config file\n config_file (str): Path to read for the config file\n app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}.\n default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS.\n default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES." - }, - { - "code": "def apply_noise(data, noise):\n if noise >= 1:\n noise = noise/100.\n for i in range(data.nRows()):\n ones = data.rowNonZeros(i)[0]\n replace_indices = numpy.random.choice(ones,\n size = int(len(ones)*noise), replace = False)\n for index in replace_indices:\n data[i, index] = 0\n new_indices = numpy.random.choice(data.nCols(),\n size = int(len(ones)*noise), replace = False)\n for index in new_indices:\n while data[i, index] == 1:\n index = numpy.random.randint(0, data.nCols())\n data[i, index] = 1", - "docstring": "Applies noise to a sparse matrix. Noise can be an integer between 0 and\n 100, indicating the percentage of ones in the original input to move, or\n a float in [0, 1), indicating the same thing.\n The input matrix is modified in-place, and nothing is returned.\n This operation does not affect the sparsity of the matrix, or of any\n individual datapoint." - }, - { - "code": "def addFile(self,file):\n mylambda= lambda adict : { key.upper() : mylambda(adict[key]) if isinstance(adict[key],dict) else adict[key] for key in adict.keys() }\n if file.endswith('.json') :\n with open(file, 'r') as f:\n fileContent = mylambda(json.load(f))\n elif file.endswith('.ini') :\n parser = configparser.ConfigParser()\n parser.read(file)\n fileContent = { section : { conflist[0].upper() : conflist[1] for conflist in parser.items(section) } for section in parser.sections() }\n else :\n raise FileFormatException()\n self._config = {**self._config, **mylambda(fileContent)}", - "docstring": "Permet d'ajouter un fichier\n\n Args:\n file (string): path d'un fichier json\n\n Returns:\n type: None\n\n Raises:\n FileFormatException: Erreur du format de fichier" - }, - { - "code": "def verify_quote(self, quote_id, extra):\n container = self.generate_order_template(quote_id, extra)\n clean_container = {}\n for key in container.keys():\n if container.get(key) != '':\n clean_container[key] = container[key]\n return self.client.call('SoftLayer_Billing_Order_Quote', 'verifyOrder', clean_container, id=quote_id)", - "docstring": "Verifies that a quote order is valid.\n\n ::\n\n extras = {\n 'hardware': {'hostname': 'test', 'domain': 'testing.com'},\n 'quantity': 2\n }\n manager = ordering.OrderingManager(env.client)\n result = manager.verify_quote(12345, extras)\n\n\n :param int quote_id: ID for the target quote\n :param dictionary extra: Overrides for the defaults of SoftLayer_Container_Product_Order\n :param int quantity: Quantity to override default" - }, - { - "code": "def walk(self):\n catalog = self.catalog\n query = self.additionalQuery.copy()\n query['portal_type'] = self.src_portal_type\n query['meta_type'] = self.src_meta_type\n if HAS_LINGUA_PLONE and 'Language' in catalog.indexes():\n query['Language'] = 'all'\n brains = catalog(query)\n limit = getattr(self, 'limit', False)\n if limit:\n brains = brains[:limit]\n obj_num_total = len(brains)\n logger.info('{} {} objects will be migrated walking through {}'\n .format(obj_num_total, self.src_portal_type, catalog.id))\n counter = 0\n for brain in brains:\n if counter % 100 == 0:\n logger.info('Progress: {} objects have been migrated out of {}'\n .format(counter, obj_num_total))\n try:\n obj = brain.getObject()\n except AttributeError:\n LOG.error(\"Couldn't access %s\" % brain.getPath())\n continue\n if self.callBefore is not None and callable(self.callBefore):\n if not self.callBefore(obj, **self.kwargs):\n continue\n try:\n state = obj._p_changed\n except Exception:\n state = 0\n if obj is not None:\n yield obj\n if state is None:\n obj._p_deactivate()\n counter += 1\n if obj_num_total == counter:\n logger.info(\n 'Progress: {} objects have been migrated out of {}'\n .format(counter, obj_num_total))", - "docstring": "Walks around and returns all objects which needs migration\n It does exactly the same as the original method, but add some\n progress loggers.\n\n :return: objects (with acquisition wrapper) that needs migration\n :rtype: generator" - }, - { - "code": "def write_offsource(page, args, grbtag, onsource=False):\n th = ['Re-weighted SNR', 'Coherent SNR']\n if args.time_slides:\n if onsource:\n out_dir = 'ZEROLAG_ALL'\n else:\n out_dir = 'ZEROLAG_OFF'\n else:\n if onsource:\n out_dir = 'ALL_TIMES'\n else:\n out_dir = 'OFFSOURCE'\n plot = markup.page()\n p = \"%s/plots_clustered/GRB%s_bestnr_vs_time_noinj.png\" % (out_dir, grbtag)\n plot.a(href=p, title=\"Detection statistic versus time\")\n plot.img(src=p)\n plot.a.close()\n td = [ plot() ]\n plot = markup.page()\n p = \"%s/plots_clustered/GRB%s_triggers_vs_time_noinj.png\" % (out_dir, grbtag)\n plot.a(href=p, title=\"Coherent SNR versus time\")\n plot.img(src=p)\n plot.a.close()\n td.append(plot())\n ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]\n for ifo in ifos:\n th.append('%s SNR' % ifo)\n plot = markup.page()\n p = \"%s/plots_clustered/GRB%s_%s_triggers_vs_time_noinj.png\"\\\n % (out_dir, grbtag, ifo)\n plot.a(href=p, title=\"%s SNR versus time\" % ifo)\n plot.img(src=p)\n plot.a.close()\n td.append(plot())\n page = write_table(page, th, td)\n return page", - "docstring": "Write offsource SNR versus time plots to markup.page object page" - }, - { - "code": "def get_rates(self, mmin, mmax=np.inf):\n nsrcs = self.number_sources()\n for iloc, source in enumerate(self.source_model):\n print(\"Source Number %s of %s, Name = %s, Typology = %s\" % (\n iloc + 1,\n nsrcs,\n source.name,\n source.__class__.__name__))\n if isinstance(source, CharacteristicFaultSource):\n self._get_fault_rates(source, mmin, mmax)\n elif isinstance(source, ComplexFaultSource):\n self._get_fault_rates(source, mmin, mmax)\n elif isinstance(source, SimpleFaultSource):\n self._get_fault_rates(source, mmin, mmax)\n elif isinstance(source, AreaSource):\n self._get_area_rates(source, mmin, mmax)\n elif isinstance(source, PointSource):\n self._get_point_rates(source, mmin, mmax)\n else:\n print(\"Source type %s not recognised - skipping!\" % source)\n continue", - "docstring": "Returns the cumulative rates greater than Mmin\n\n :param float mmin:\n Minimum magnitude" - }, - { - "code": "def get_epoch_namespace_prices( block_height, units ):\n assert units in ['BTC', TOKEN_TYPE_STACKS], 'Invalid unit {}'.format(units)\n epoch_config = get_epoch_config( block_height )\n if units == 'BTC':\n return epoch_config['namespace_prices']\n else:\n return epoch_config['namespace_prices_stacks']", - "docstring": "get the list of namespace prices by block height" - }, - { - "code": "def setOverlayTextureBounds(self, ulOverlayHandle):\n fn = self.function_table.setOverlayTextureBounds\n pOverlayTextureBounds = VRTextureBounds_t()\n result = fn(ulOverlayHandle, byref(pOverlayTextureBounds))\n return result, pOverlayTextureBounds", - "docstring": "Sets the part of the texture to use for the overlay. UV Min is the upper left corner and UV Max is the lower right corner." - }, - { - "code": "def dbg_print_irsb(self, irsb_addr, project=None):\n if project is None:\n project = self._project\n if project is None:\n raise Exception(\"Dict addr_to_run is empty. \" + \\\n \"Give me a project, and I'll recreate the IRSBs for you.\")\n else:\n vex_block = project.factory.block(irsb_addr).vex\n statements = vex_block.statements\n whitelist = self.get_whitelisted_statements(irsb_addr)\n for i in range(0, len(statements)):\n if whitelist is True or i in whitelist:\n line = \"+\"\n else:\n line = \"-\"\n line += \"[% 3d] \" % i\n print(line, end='')\n statements[i].pp()", - "docstring": "Pretty-print an IRSB with whitelist information" - }, - { - "code": "def _find_rule_no(self, mac):\n ipt_cmd = ['iptables', '-L', '--line-numbers']\n cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False)\n for o in cmdo.split('\\n'):\n if mac in o.lower():\n rule_no = o.split()[0]\n LOG.info('Found rule %(rule)s for %(mac)s.',\n {'rule': rule_no, 'mac': mac})\n return rule_no", - "docstring": "Find rule number associated with a given mac." - }, - { - "code": "def put(self, file):\n input_ = {\n \"message\": file.logs,\n \"author\": file.author.dict(),\n \"content\": file.base64,\n \"branch\": file.branch\n }\n uri = \"{api}/repos/{origin}/contents/{path}\".format(\n api=self.github_api_url,\n origin=self.origin,\n path=file.path\n )\n data = self.request(\"PUT\", uri, data=input_)\n if data.status_code == 201:\n file.pushed = True\n return file\n else:\n decoded_data = json.loads(data.content.decode(\"utf-8\"))\n return self.ProxyError(\n data.status_code, (decoded_data, \"message\"),\n step=\"put\", context={\n \"uri\": uri,\n \"params\": input_\n }\n )", - "docstring": "Create a new file on github\n\n :param file: File to create\n :return: File or self.ProxyError" - }, - { - "code": "def asset_save(self):\n if not self.cur_asset:\n return\n desc = self.asset_desc_pte.toPlainText()\n self.cur_asset.description = desc\n self.cur_asset.save()", - "docstring": "Save the current asset\n\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def initialize_from_matrix(cls, matrix, column):\n vec = Vector(matrix.get_height())\n for row in xrange(matrix.get_height()):\n vec.set_value(0, row, matrix.get_value(column, row))\n return vec", - "docstring": "Create vector from matrix\n\n :param Matrix matrix: The Matrix, which should be used to create the vector.\n :param integer column: The column of the matrix, which should be used\n to create the new vector.\n :raise: Raises an :py:exc:`IndexError` if the matrix does not have the specified column." - }, - { - "code": "def _mprotate(ang, lny, pool, order):\n targ_args = list()\n slsize = np.int(np.floor(lny / ncores))\n for t in range(ncores):\n ymin = t * slsize\n ymax = (t + 1) * slsize\n if t == ncores - 1:\n ymax = lny\n targ_args.append((ymin, ymax, ang, order))\n pool.map(_rotate, targ_args)", - "docstring": "Uses multiprocessing to wrap around _rotate\n\n 4x speedup on an intel i7-3820 CPU @ 3.60GHz with 8 cores.\n\n The function calls _rotate which accesses the `mprotate_dict`.\n Data is rotated in-place.\n\n Parameters\n ----------\n ang: float\n rotation angle in degrees\n lny: int\n total number of rotations to perform\n pool: instance of multiprocessing.pool.Pool\n the pool object used for the computation\n order: int\n interpolation order" - }, - { - "code": "def set_permission(permission, value, app):\n script =\n app_url = 'app://' + app\n run_marionette_script(script % (permission, app_url, app_url, value), True)", - "docstring": "Set a permission for the specified app\n Value should be 'deny' or 'allow'" - }, - { - "code": "def get_last_doc(self):\n try:\n result = self.elastic.search(\n index=self.meta_index_name,\n body={\n \"query\": {\"match_all\": {}},\n \"sort\": [{\"_ts\": \"desc\"}],\n },\n size=1\n )[\"hits\"][\"hits\"]\n for r in result:\n r['_source']['_id'] = r['_id']\n return r['_source']\n except es_exceptions.RequestError:\n return None", - "docstring": "Get the most recently modified document from Elasticsearch.\n\n This method is used to help define a time window within which documents\n may be in conflict after a MongoDB rollback." - }, - { - "code": "def convert_like(item, like):\n if isinstance(like, np.ndarray):\n return np.asanyarray(item, dtype=like.dtype)\n if isinstance(item, like.__class__) or is_none(like):\n return item\n if (is_sequence(item) and\n len(item) == 1 and\n isinstance(item[0], like.__class__)):\n return item[0]\n item = like.__class__(item)\n return item", - "docstring": "Convert an item to have the dtype of another item\n\n Parameters\n ----------\n item: item to be converted\n like: object with target dtype. If None, item is returned unmodified\n\n Returns\n --------\n result: item, but in dtype of like" - }, - { - "code": "def serialize(self):\n serialized = {'type': self.__type__}\n for argname in self._attributes:\n if argname == 'required':\n continue\n argvalue = self._get_argname_value(argname)\n if argvalue is not None:\n if argvalue is Null:\n argvalue = None\n if self.__serialize_attr_aliases__ and argname in self.__serialize_attr_aliases__:\n argname = self.__serialize_attr_aliases__[argname]\n serialized[argname] = argvalue\n return serialized", - "docstring": "Return a serializable form of the config instance" - }, - { - "code": "def success(self, **kwargs):\n response = {'success': True}\n response.update(kwargs)\n response.update(self.kwargs)\n response['test_argument3'] = datetime.timedelta(days=1) + response['test_argument3']\n return response", - "docstring": "Returns all arguments received in init and this method call" - }, - { - "code": "def pday(dayfmt):\n\tyear, month, day = map(int, dayfmt.split('-'))\n\treturn '{day} the {number}'.format(\n\t\tday=calendar.day_name[calendar.weekday(year, month, day)],\n\t\tnumber=inflect.engine().ordinal(day),\n\t)", - "docstring": "P the day\n\n\t>>> print(pday('2012-08-24'))\n\tFriday the 24th" - }, - { - "code": "def action(context, request, action=None, resource=None, uid=None):\n if action is None:\n action = request.get_header(\"HTTP_X_HTTP_METHOD_OVERRIDE\", \"CREATE\").lower()\n func_name = \"{}_items\".format(action)\n action_func = getattr(api, func_name, None)\n if action_func is None:\n api.fail(500, \"API has no member named '{}'\".format(func_name))\n portal_type = api.resource_to_portal_type(resource)\n items = action_func(portal_type=portal_type, uid=uid)\n return {\n \"count\": len(items),\n \"items\": items,\n \"url\": api.url_for(\"senaite.jsonapi.v1.action\", action=action),\n }", - "docstring": "Various HTTP POST actions" - }, - { - "code": "def save_images(self):\n res_dict = self.treeview.get_selected()\n clobber = self.settings.get('clobber', False)\n self.treeview.clear_selection()\n if self.suffix:\n sfx = '_' + self.suffix\n else:\n sfx = ''\n if self.settings.get('include_chname', True):\n sfx += '_' + self.chname\n for infile in res_dict:\n f_pfx = os.path.splitext(infile)[0]\n f_ext = '.fits'\n oname = f_pfx + sfx + f_ext\n outfile = os.path.join(self.outdir, oname)\n self.w.status.set_text(\n 'Writing out {0} to {1} ...'.format(shorten_name(infile, 10),\n shorten_name(oname, 10)))\n self.logger.debug(\n 'Writing out {0} to {1} ...'.format(infile, oname))\n if os.path.exists(outfile) and not clobber:\n self.logger.error('{0} already exists'.format(outfile))\n continue\n bnch = res_dict[infile]\n if bnch.path is None or not os.path.isfile(bnch.path):\n self._write_mosaic(f_pfx, outfile)\n else:\n shutil.copyfile(bnch.path, outfile)\n self._write_mef(f_pfx, bnch.extlist, outfile)\n self.logger.info('{0} written'.format(outfile))\n self.w.status.set_text('Saving done, see log')", - "docstring": "Save selected images.\n\n This uses Astropy FITS package to save the outputs no matter\n what user chose to load the images." - }, - { - "code": "def set_current_filename(self, filename, focus=True):\r\n index = self.has_filename(filename)\r\n if index is not None:\r\n if focus:\r\n self.set_stack_index(index)\r\n editor = self.data[index].editor\r\n if focus:\r\n editor.setFocus()\r\n else:\r\n self.stack_history.remove_and_append(index)\r\n return editor", - "docstring": "Set current filename and return the associated editor instance." - }, - { - "code": "def get_normalized_variable_map(scope_or_module,\n collection=tf.GraphKeys.GLOBAL_VARIABLES,\n context=None,\n group_sliced_variables=True):\n scope_name = get_variable_scope_name(scope_or_module)\n if context is None:\n context = scope_or_module\n prefix = get_variable_scope_name(context)\n prefix_length = len(prefix) + 1 if prefix else 0\n if not _is_scope_prefix(scope_name, prefix):\n raise ValueError(\"Scope '{}' is not prefixed by '{}'.\".format(\n scope_name, prefix))\n variables = get_variables_in_scope(scope_name, collection)\n if not group_sliced_variables:\n single_vars = variables\n grouped_vars = dict()\n else:\n single_vars, grouped_vars = _get_sliced_variables(variables)\n var_map = {var.op.name[prefix_length:]: var for var in single_vars}\n for full_name, var_group in grouped_vars.items():\n name = full_name[prefix_length:]\n if name in var_map:\n raise ValueError(\"Mixing slices and non-slices with the same name: \" +\n str(name))\n var_map[name] = var_group\n return var_map", - "docstring": "Builds map of `tf.Variable`s in scope or module with normalized names.\n\n The names of the variables are normalized to remove the scope prefix.\n\n Args:\n scope_or_module: Scope or module to build map from.\n collection: Collection to restrict query to. By default this is\n `tf.Graphkeys.GLOBAL_VARIABLES`, which includes non-trainable variables\n such as moving averages.\n context: Scope or module, identical to or parent of `scope`. If given, this\n will be used as the stripped prefix. By default `None`, which means\n `context=scope`.\n group_sliced_variables: Boolean, if set to True, sliced variables are\n grouped together in the returned map; if set to False, each partition of\n a sliced variable is a separate (key, value) pair.\n\n Returns:\n Dictionary mapping normalized variable name to `tf.Variable`, or a list\n of `tf.Variables` if the variable is a sliced (partitioned) variable.\n\n Raises:\n ValueError: If `context` is given but is not a proper prefix of `scope`." - }, - { - "code": "def type_schema(\n type_name, inherits=None, rinherit=None,\n aliases=None, required=None, **props):\n if aliases:\n type_names = [type_name]\n type_names.extend(aliases)\n else:\n type_names = [type_name]\n if rinherit:\n s = copy.deepcopy(rinherit)\n s['properties']['type'] = {'enum': type_names}\n else:\n s = {\n 'type': 'object',\n 'properties': {\n 'type': {'enum': type_names}}}\n if not inherits:\n s['additionalProperties'] = False\n s['properties'].update(props)\n if not required:\n required = []\n if isinstance(required, list):\n required.append('type')\n s['required'] = required\n if inherits:\n extended = s\n s = {'allOf': [{'$ref': i} for i in inherits]}\n s['allOf'].append(extended)\n return s", - "docstring": "jsonschema generation helper\n\n params:\n - type_name: name of the type\n - inherits: list of document fragments that are required via anyOf[$ref]\n - rinherit: use another schema as a base for this, basically work around\n inherits issues with additionalProperties and type enums.\n - aliases: additional names this type maybe called\n - required: list of required properties, by default 'type' is required\n - props: additional key value properties" - }, - { - "code": "def fourier_series(x, *a):\n output = 0\n output += a[0]/2\n w = a[1]\n for n in range(2, len(a), 2):\n n_ = n/2\n val1 = a[n]\n val2 = a[n+1]\n output += val1*np.sin(n_*x*w)\n output += val2*np.cos(n_*x*w)\n return output", - "docstring": "Arbitrary dimensionality fourier series.\n\n The first parameter is a_0, and the second parameter is the interval/scale\n parameter.\n\n The parameters are altering sin and cos paramters.\n\n n = (len(a)-2)/2" - }, - { - "code": "def convolve_filter(signal, impulse_response):\n if impulse_response is not None:\n adjusted_signal = fftconvolve(signal, impulse_response)\n adjusted_signal = adjusted_signal[\n len(impulse_response) / 2:len(adjusted_signal) - len(impulse_response) / 2 + 1]\n return adjusted_signal\n else:\n return signal", - "docstring": "Convovle the two input signals, if impulse_response is None,\n returns the unaltered signal" - }, - { - "code": "def paginator(context, adjacent_pages=2):\r\n current_page = context.get('page')\r\n paginator = context.get('paginator')\r\n if not paginator:\r\n return\r\n pages = paginator.num_pages\r\n current_range = range(current_page - adjacent_pages, current_page + adjacent_pages + 1)\r\n page_numbers = [n for n in current_range if n > 0 and n <= pages]\r\n slugtype = ''\r\n if 'topic_slug' in context:\r\n page_url = context[\"topic\"].get_short_url()\r\n slugtype = 'topic'\r\n elif 'forum_slug' in context:\r\n page_url = '/forum/%s/' % context[\"forum_slug\"]\r\n slugtype = 'forum'\r\n else:\r\n page_url = context['request'].get_full_path()\r\n return {\r\n \"is_paginated\": context[\"is_paginated\"],\r\n \"page\": current_page,\r\n \"pages\": pages,\r\n \"page_obj\": context['page_obj'],\r\n \"page_numbers\": page_numbers,\r\n \"has_next\": context[\"page_obj\"].has_next(),\r\n \"has_previous\": context[\"page_obj\"].has_previous(),\r\n \"page_url\" : page_url,\r\n 'slugtype' : slugtype,\r\n }", - "docstring": "To be used in conjunction with the object_list generic view.\r\n Adds pagination context variables for use in displaying first, adjacent and\r\n last page links in addition to those created by the object_list generic view." - }, - { - "code": "def get_restricted_sites(self, request):\n try:\n return request.user.get_sites()\n except AttributeError:\n return Site.objects.none()", - "docstring": "The sites on which the user has permission on.\n\n To return the permissions, the method check for the ``get_sites``\n method on the user instance (e.g.: ``return request.user.get_sites()``)\n which must return the queryset of enabled sites.\n If the attribute does not exists, the user is considered enabled\n for all the websites.\n\n :param request: current request\n :return: boolean or a queryset of available sites" - }, - { - "code": "def apis(self):\n value = self.attributes['apis']\n if isinstance(value, six.string_types):\n value = shlex.split(value)\n return value", - "docstring": "List of API to test" - }, - { - "code": "def sparse_is_desireable(lhs, rhs):\n return False\n if len(lhs.shape) == 1:\n return False\n else:\n lhs_rows, lhs_cols = lhs.shape\n if len(rhs.shape) == 1:\n rhs_rows = 1\n rhs_cols = rhs.size\n else:\n rhs_rows, rhs_cols = rhs.shape\n result_size = lhs_rows * rhs_cols\n if sp.issparse(lhs) and sp.issparse(rhs):\n return True\n elif sp.issparse(lhs):\n lhs_zero_rows = lhs_rows - np.unique(lhs.nonzero()[0]).size\n rhs_zero_cols = np.all(rhs==0, axis=0).sum()\n elif sp.issparse(rhs):\n lhs_zero_rows = np.all(lhs==0, axis=1).sum()\n rhs_zero_cols = rhs_cols- np.unique(rhs.nonzero()[1]).size\n else:\n lhs_zero_rows = np.all(lhs==0, axis=1).sum()\n rhs_zero_cols = np.all(rhs==0, axis=0).sum()\n num_zeros = lhs_zero_rows * rhs_cols + rhs_zero_cols * lhs_rows - lhs_zero_rows * rhs_zero_cols\n return (float(num_zeros) / float(size)) >= 0.5", - "docstring": "Examines a pair of matrices and determines if the result of their multiplication should be sparse or not." - }, - { - "code": "def is_name_valid(fqn):\n if not isinstance(fqn, (str,unicode)):\n return False\n if fqn.count( \".\" ) != 1:\n return False\n name, namespace_id = fqn.split(\".\")\n if len(name) == 0 or len(namespace_id) == 0:\n return False \n if not is_b40( name ) or \"+\" in name or \".\" in name:\n return False \n if not is_namespace_valid( namespace_id ):\n return False\n if len(fqn) > LENGTHS['blockchain_id_name']:\n return False \n return True", - "docstring": "Is a fully-qualified name acceptable?\n Return True if so\n Return False if not\n\n >>> is_name_valid('abcd')\n False\n >>> is_name_valid('abcd.')\n False\n >>> is_name_valid('.abcd')\n False\n >>> is_name_valid('Abcd.abcd')\n False\n >>> is_name_valid('abcd.abc.d')\n False\n >>> is_name_valid('abcd.abc+d')\n False\n >>> is_name_valid('a.b.c')\n False\n >>> is_name_valid(True)\n False\n >>> is_name_valid(123)\n False\n >>> is_name_valid(None)\n False\n >>> is_name_valid('')\n False\n >>> is_name_valid('abcdabcdabcdabcdabcdabcdabcdabcda.bcd')\n True\n >>> is_name_valid('abcdabcdabcdabcdabcdabcdabcdabcdab.bcd')\n False\n >>> is_name_valid('abcdabcdabcdabcdabcdabcdabcdabcdabc.d')\n True\n >>> is_name_valid('a+b.c')\n False\n >>> is_name_valid('a_b.c')\n True" - }, - { - "code": "def as_json(context):\n info = {\n 'info': cgi.escape(pprint.pformat(context.context)),\n }\n return Response(content_type='application/json', body=json.dumps(info))", - "docstring": "Return an object's representation as JSON" - }, - { - "code": "def _go_install(self, target, gopath, build_flags):\n args = build_flags + [target.import_path]\n result, go_cmd = self.go_dist.execute_go_cmd(\n 'install', gopath=gopath, args=args,\n workunit_factory=self.context.new_workunit,\n workunit_name='install {}'.format(target.import_path),\n workunit_labels=[WorkUnitLabel.COMPILER])\n if result != 0:\n raise TaskError('{} failed with exit code {}'.format(go_cmd, result))", - "docstring": "Create and execute a `go install` command." - }, - { - "code": "def pos_by_percent(self, x_percent, y_percent):\n x = round(x_percent * self.width)\n y = round(y_percent * self.height)\n return int(x), int(y)", - "docstring": "Finds a point inside the box that is exactly at the given percentage place.\n\n :param x_percent: how much percentage from left edge\n :param y_percent: how much percentage from top edge\n :return: A point inside the box" - }, - { - "code": "def run_winpdb(self):\r\n if self.save():\r\n fname = self.get_current_filename()\r\n runconf = get_run_configuration(fname)\r\n if runconf is None:\r\n args = []\r\n wdir = None\r\n else:\r\n args = runconf.get_arguments().split()\r\n wdir = runconf.get_working_directory()\r\n programs.run_program(WINPDB_PATH, [fname] + args, cwd=wdir or None)", - "docstring": "Run winpdb to debug current file" - }, - { - "code": "def remove_listener(self, listener):\n self.logger.debug('discarding listener %r', listener)\n with self._lock:\n self._listeners.discard(listener)", - "docstring": "Unregister some listener; ignore if the listener was never\n registered.\n\n :type listener: :class:`SessionListener`" - }, - { - "code": "def _init_config(self):\n system = platform.system().lower()\n leveldb_fallback_dir = os.path.expanduser(\"~\")\n if system.startswith(\"darwin\"):\n leveldb_fallback_dir = os.path.join(\n leveldb_fallback_dir, \"Library\", \"Ethereum\"\n )\n elif system.startswith(\"windows\"):\n leveldb_fallback_dir = os.path.join(\n leveldb_fallback_dir, \"AppData\", \"Roaming\", \"Ethereum\"\n )\n else:\n leveldb_fallback_dir = os.path.join(leveldb_fallback_dir, \".ethereum\")\n leveldb_fallback_dir = os.path.join(leveldb_fallback_dir, \"geth\", \"chaindata\")\n if not os.path.exists(self.config_path):\n log.info(\"No config file found. Creating default: \" + self.config_path)\n open(self.config_path, \"a\").close()\n config = ConfigParser(allow_no_value=True)\n config.optionxform = str\n config.read(self.config_path, \"utf-8\")\n if \"defaults\" not in config.sections():\n self._add_default_options(config)\n if not config.has_option(\"defaults\", \"leveldb_dir\"):\n self._add_leveldb_option(config, leveldb_fallback_dir)\n if not config.has_option(\"defaults\", \"dynamic_loading\"):\n self._add_dynamic_loading_option(config)\n with codecs.open(self.config_path, \"w\", \"utf-8\") as fp:\n config.write(fp)\n leveldb_dir = config.get(\n \"defaults\", \"leveldb_dir\", fallback=leveldb_fallback_dir\n )\n return os.path.expanduser(leveldb_dir)", - "docstring": "If no config file exists, create it and add default options.\n\n Default LevelDB path is specified based on OS\n dynamic loading is set to infura by default in the file\n Returns: leveldb directory" - }, - { - "code": "def Sample(self, tasks_status):\n sample_time = time.time()\n sample = '{0:f}\\t{1:d}\\t{2:d}\\t{3:d}\\t{4:d}\\t{5:d}\\n'.format(\n sample_time, tasks_status.number_of_queued_tasks,\n tasks_status.number_of_tasks_processing,\n tasks_status.number_of_tasks_pending_merge,\n tasks_status.number_of_abandoned_tasks,\n tasks_status.total_number_of_tasks)\n self._WritesString(sample)", - "docstring": "Takes a sample of the status of queued tasks for profiling.\n\n Args:\n tasks_status (TasksStatus): status information about tasks." - }, - { - "code": "def get_key(self, section, key):\n LOGGER.debug(\"> Retrieving '{0}' in '{1}' section.\".format(key, section))\n self.__settings.beginGroup(section)\n value = self.__settings.value(key)\n LOGGER.debug(\"> Key value: '{0}'.\".format(value))\n self.__settings.endGroup()\n return value", - "docstring": "Gets key value from settings file.\n\n :param section: Current section to retrieve key from.\n :type section: unicode\n :param key: Current key to retrieve.\n :type key: unicode\n :return: Current key value.\n :rtype: object" - }, - { - "code": "def lessThan(self, leftIndex, rightIndex):\n leftData = self.sourceModel().data(leftIndex, RegistryTableModel.SORT_ROLE)\n rightData = self.sourceModel().data(rightIndex, RegistryTableModel.SORT_ROLE)\n return leftData < rightData", - "docstring": "Returns true if the value of the item referred to by the given index left is less than\n the value of the item referred to by the given index right, otherwise returns false." - }, - { - "code": "def get_all_context(self):\n self.plan_pricing = get_object_or_404(PlanPricing.objects.all().select_related('plan', 'pricing'),\n Q(pk=self.kwargs['pk']) & Q(plan__available=True) & (\n Q(plan__customized=self.request.user) | Q(\n plan__customized__isnull=True)))\n if not self.request.user.userplan.is_expired() and self.request.user.userplan.plan != self.plan_pricing.plan:\n raise Http404\n self.plan = self.plan_pricing.plan\n self.pricing = self.plan_pricing.pricing", - "docstring": "Retrieves Plan and Pricing for current order creation" - }, - { - "code": "def sh(cmd, grid=False, infile=None, outfile=None, errfile=None,\n append=False, background=False, threaded=None, log=True,\n grid_opts=None, silent=False, shell=\"/bin/bash\", check=False):\n if not cmd:\n return 1\n if silent:\n outfile = errfile = \"/dev/null\"\n if grid:\n from jcvi.apps.grid import GridProcess\n pr = GridProcess(cmd, infile=infile, outfile=outfile, errfile=errfile,\n threaded=threaded, grid_opts=grid_opts)\n pr.start()\n return pr.jobid\n else:\n if infile:\n cat = \"cat\"\n if infile.endswith(\".gz\"):\n cat = \"zcat\"\n cmd = \"{0} {1} |\".format(cat, infile) + cmd\n if outfile and outfile != \"stdout\":\n if outfile.endswith(\".gz\"):\n cmd += \" | gzip\"\n tag = \">\"\n if append:\n tag = \">>\"\n cmd += \" {0}{1}\".format(tag, outfile)\n if errfile:\n if errfile == outfile:\n errfile = \"&1\"\n cmd += \" 2>{0}\".format(errfile)\n if background:\n cmd += \" &\"\n if log:\n logging.debug(cmd)\n call_func = check_call if check else call\n return call_func(cmd, shell=True, executable=shell)", - "docstring": "simple wrapper for system calls" - }, - { - "code": "def _sync_params_from_devices(self):\n self._exec_group.get_params(self._arg_params, self._aux_params)\n if self._kvstore and self._update_on_kvstore:\n for param_name, param_val in sorted(self._arg_params.items()):\n if param_val.stype == 'row_sparse':\n row_ids = nd.arange(0, param_val.shape[0], dtype='int64')\n self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids)\n self._params_dirty = False", - "docstring": "Synchronizes parameters from devices to CPU. This function should be called after\n calling `update` that updates the parameters on the devices, before one can read the\n latest parameters from ``self._arg_params`` and ``self._aux_params``.\n\n For row_sparse parameters on devices, ther are pulled from KVStore with all row ids." - }, - { - "code": "def _process_line(self, line):\n m = self._rfc_1459_command_regexp.match(line)\n prefix = m.group('prefix')\n tags = self._process_tags(m.group('tags'))\n source = self._process_prefix(prefix)\n command = self._process_command(m.group('command'))\n arguments = self._process_arguments(m.group('argument'))\n if not self.real_server_name:\n self.real_server_name = prefix\n command = irc.events.numeric.get(command, command)\n if command not in [\"privmsg\", \"notice\"]:\n return super(ServerConnection3, self)._process_line(line)\n event = Event3(\"all_raw_messages\", self.get_server_name(),\n None, [line], tags=tags)\n self._handle_event(event)\n target, msg = arguments[0], arguments[1]\n messages = irc.ctcp.dequote(msg)\n command = self._resolve_command(command, target)\n for m in messages:\n self._handle_message(tags, source, command, target, m)", - "docstring": "Process the given line and handle the events\n\n :param line: the raw message\n :type line: :class:`str`\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def sigma_r2(self, r, kwargs_profile, kwargs_anisotropy, kwargs_light):\n if self._mass_profile == 'power_law':\n if self._anisotropy_type == 'r_ani':\n if self._light_profile == 'Hernquist':\n sigma_r = self.power_law_anisotropy(r, kwargs_profile, kwargs_anisotropy, kwargs_light)\n else:\n raise ValueError('light profile %s not supported for Jeans solver' % self._light_profile)\n else:\n raise ValueError('anisotropy type %s not implemented in Jeans equation modelling' % self._anisotropy_type)\n else:\n raise ValueError('mass profile type %s not implemented in Jeans solver' % self._mass_profile)\n return sigma_r", - "docstring": "solves radial Jeans equation" - }, - { - "code": "def start_container(self, conf, tty=True, detach=False, is_dependency=False, no_intervention=False):\n if not conf.harpoon.docker_api.base_url.startswith(\"http\"):\n self.find_bound_ports(conf.ports)\n container_id = conf.container_id\n container_name = conf.container_name\n conf.harpoon.network_manager.register(conf, container_name)\n log.info(\"Starting container %s (%s)\", container_name, container_id)\n try:\n if not detach and not is_dependency:\n self.start_tty(conf, interactive=tty, **conf.other_options.start)\n else:\n conf.harpoon.docker_api.start(container_id\n , **conf.other_options.start\n )\n except docker.errors.APIError as error:\n if str(error).startswith(\"404 Client Error: Not Found\"):\n log.error(\"Container died before we could even get to it...\")\n inspection = None\n if not detach and not is_dependency:\n inspection = self.get_exit_code(conf)\n if inspection and not no_intervention:\n if not inspection[\"State\"][\"Running\"] and inspection[\"State\"][\"ExitCode\"] != 0:\n self.stage_run_intervention(conf)\n raise BadImage(\"Failed to run container\", container_id=container_id, container_name=container_name, reason=\"nonzero exit code after launch\")\n if not is_dependency and conf.harpoon.intervene_afterwards and not no_intervention:\n self.stage_run_intervention(conf, just_do_it=True)", - "docstring": "Start up a single container" - }, - { - "code": "def element(self, name, attrs=None):\n self.open(name, attrs)\n yield\n self.close()", - "docstring": "This method is a context manager for writing and closing an element." - }, - { - "code": "def move(self, new_father, idx=None, prepend=None, name=None):\n self.parent.pop(self._own_index)\n new_father._insert(self, idx=idx, prepend=prepend, name=name)\n new_father._stable = False\n return self", - "docstring": "Moves this element from his father to the given one." - }, - { - "code": "def _authorize_new_tokens(self):\n logging.info('About to request new OAuth2 tokens from Coursera.')\n state_token = uuid.uuid4().hex\n authorization_url = self._build_authorizaton_url(state_token)\n sys.stdout.write(\n 'Please visit the following URL to authorize this app:\\n')\n sys.stdout.write('\\t%s\\n\\n' % authorization_url)\n if _platform == 'darwin':\n sys.stdout.write(\n 'Mac OS X detected; attempting to auto-open the url '\n 'in your default browser...\\n')\n try:\n subprocess.check_call(['open', authorization_url])\n except:\n logging.exception('Could not call `open %(url)s`.',\n url=authorization_url)\n if self.local_webserver_port is not None:\n server_address = ('', self.local_webserver_port)\n code_holder = CodeHolder()\n local_server = BaseHTTPServer.HTTPServer(\n server_address,\n _make_handler(state_token, code_holder))\n while not code_holder.has_code():\n local_server.handle_request()\n coursera_code = code_holder.code\n else:\n coursera_code = raw_input('Please enter the code from Coursera: ')\n form_data = {\n 'code': coursera_code,\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'redirect_uri': self._redirect_uri,\n 'grant_type': 'authorization_code',\n }\n return self._request_tokens_from_token_endpoint(form_data)", - "docstring": "Stands up a new localhost http server and retrieves new OAuth2 access\n tokens from the Coursera OAuth2 server." - }, - { - "code": "def install(self):\n with self.selenium.context(self.selenium.CONTEXT_CHROME):\n self.find_primary_button().click()", - "docstring": "Confirm add-on install." - }, - { - "code": "def predict(self, X):\n return self.__cost(self.__unroll(self.__thetas), 0, np.matrix(X))", - "docstring": "Returns predictions of input test cases." - }, - { - "code": "def handle(self, event):\n callback = getattr(self, 'on_{event}'.format(event=event.event), None)\n callback(event)", - "docstring": "Entry point to handle user events.\n\n :param event: Received event. See a full list `here `_." - }, - { - "code": "def globus_group(*args, **kwargs):\n def inner_decorator(f):\n f = click.group(*args, cls=GlobusCommandGroup, **kwargs)(f)\n f = common_options(f)\n return f\n return inner_decorator", - "docstring": "Wrapper over click.group which sets GlobusCommandGroup as the Class\n\n Caution!\n Don't get snake-bitten by this. `globus_group` is a decorator which MUST\n take arguments. It is not wrapped in our common detect-and-decorate pattern\n to allow it to be used bare -- that wouldn't work (unnamed groups? weird\n stuff)" - }, - { - "code": "def ready_message():\n title = m.Heading(tr('Ready'), **PROGRESS_UPDATE_STYLE)\n notes = m.Paragraph(\n tr('You can now proceed to run your analysis by clicking the '),\n m.EmphasizedText(tr('Run'), **KEYWORD_STYLE),\n tr('button.'))\n message = m.Message(LOGO_ELEMENT, title, notes)\n return message", - "docstring": "Helper to create a message indicating inasafe is ready.\n\n :returns Message: A localised message indicating we are ready to run." - }, - { - "code": "def toggle(self, index, n_cols=70):\n data = self.get(index)\n if data['type'] == 'Submission':\n pass\n elif data['type'] == 'Comment':\n cache = [data]\n count = 1\n for d in self.iterate(index + 1, 1, n_cols):\n if d['level'] <= data['level']:\n break\n count += d.get('count', 1)\n cache.append(d)\n comment = {\n 'type': 'HiddenComment',\n 'cache': cache,\n 'count': count,\n 'level': data['level'],\n 'body': 'Hidden',\n 'hidden': True}\n self._comment_data[index:index + len(cache)] = [comment]\n elif data['type'] == 'HiddenComment':\n self._comment_data[index:index + 1] = data['cache']\n elif data['type'] == 'MoreComments':\n with self._loader('Loading comments'):\n assert self._loader.depth == 1\n comments = data['object'].comments(update=True)\n if not self._loader.exception:\n comments = self.flatten_comments(comments, data['level'])\n comment_data = [self.strip_praw_comment(c) for c in comments]\n self._comment_data[index:index + 1] = comment_data\n else:\n raise ValueError('%s type not recognized' % data['type'])", - "docstring": "Toggle the state of the object at the given index.\n\n If it is a comment, pack it into a hidden comment.\n If it is a hidden comment, unpack it.\n If it is more comments, load the comments." - }, - { - "code": "def _size_fmt(num):\n try:\n num = int(num)\n if num < 1024:\n return '{0} bytes'.format(num)\n num /= 1024.0\n for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'):\n if num < 1024.0:\n return '{0:3.1f} {1}'.format(num, unit)\n num /= 1024.0\n except Exception:\n log.error('Unable to format file size for \\'%s\\'', num)\n return 'unknown'", - "docstring": "Format bytes as human-readable file sizes" - }, - { - "code": "def create_from_hash(self, stream, name, output_type, output_params,\n initial_status=None, start=None, end=None):\n return self._create(True, stream, name, output_type, output_params, initial_status, start, end)", - "docstring": "Create a new push subscription using a live stream.\n\n Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushcreate\n\n :param stream: The hash of a DataSift stream.\n :type stream: str\n :param name: The name to give the newly created subscription\n :type name: str\n :param output_type: One of the supported output types e.g. s3\n :type output_type: str\n :param output_params: The set of parameters required for the given output type\n :type output_params: dict\n :param initial_status: The initial status of the subscription, active, paused or waiting_for_start\n :type initial_status: str\n :param start: Optionally specifies when the subscription should start\n :type start: int\n :param end: Optionally specifies when the subscription should end\n :type end: int\n :returns: dict with extra response data\n :rtype: :class:`~datasift.request.DictResponse`\n :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`" - }, - { - "code": "def getinfo(self):\n try:\n old_getinfo = AuthServiceProxy(self.__service_url, 'getinfo', self.__timeout, self.__conn, True)\n res = old_getinfo()\n if 'error' not in res:\n return res\n except JSONRPCException:\n pass\n network_info = self.getnetworkinfo()\n blockchain_info = self.getblockchaininfo()\n try:\n wallet_info = self.getwalletinfo()\n except:\n wallet_info = {\n 'walletversion': None,\n 'balance': None,\n 'keypoololdest': None,\n 'keypoolsize': None,\n 'paytxfee': None,\n }\n res = {\n 'version': network_info['version'],\n 'protocolversion': network_info['protocolversion'],\n 'walletversion': wallet_info['walletversion'],\n 'balance': wallet_info['balance'],\n 'blocks': blockchain_info['blocks'],\n 'timeoffset': network_info['timeoffset'],\n 'connections': network_info['connections'],\n 'proxy': network_info['networks'],\n 'difficulty': blockchain_info['difficulty'],\n 'testnet': blockchain_info['chain'] == 'testnet',\n 'keypoololdest': wallet_info['keypoololdest'],\n 'keypoolsize': wallet_info['keypoolsize'],\n 'paytxfee': wallet_info['paytxfee'],\n 'errors': network_info['warnings'],\n }\n for k in ['unlocked_until', 'relayfee', 'paytxfee']:\n if wallet_info.has_key(k):\n res[k] = wallet_info[k]\n return res", - "docstring": "Backwards-compatibility for 0.14 and later" - }, - { - "code": "def _add_listing_links(self, response: ListingResponse):\n base_url = response.request.url_info.url\n if self._glob_pattern:\n level = self._item_session.url_record.level\n else:\n level = None\n for file_entry in response.files:\n if self._glob_pattern and \\\n not fnmatch.fnmatchcase(file_entry.name, self._glob_pattern):\n continue\n if file_entry.type == 'dir':\n linked_url = urljoin_safe(base_url, file_entry.name + '/')\n elif file_entry.type in ('file', 'symlink', None):\n if not self._processor.fetch_params.retr_symlinks and \\\n file_entry.type == 'symlink':\n self._make_symlink(file_entry.name, file_entry.dest)\n linked_url = None\n else:\n linked_url = urljoin_safe(base_url, file_entry.name)\n else:\n linked_url = None\n if linked_url:\n linked_url_info = parse_url_or_log(linked_url)\n if linked_url_info:\n verdict = self._fetch_rule.check_ftp_request(self._item_session)[0]\n if verdict:\n if linked_url_info.path.endswith('/'):\n self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.directory)\n else:\n self._item_session.add_child_url(linked_url_info.url, link_type=LinkType.file, level=level)", - "docstring": "Add links from file listing response." - }, - { - "code": "def pull_image(self, image, insecure=False, dockercfg_path=None):\n logger.info(\"pulling image '%s' from registry\", image)\n logger.debug(\"image = '%s', insecure = '%s'\", image, insecure)\n tag = image.tag\n if dockercfg_path:\n self.login(registry=image.registry, docker_secret_path=dockercfg_path)\n try:\n command_result = self.retry_generator(self.d.pull,\n image.to_str(tag=False),\n tag=tag, insecure_registry=insecure,\n decode=True, stream=True)\n except TypeError:\n command_result = self.retry_generator(self.d.pull,\n image.to_str(tag=False),\n tag=tag, decode=True, stream=True)\n self.last_logs = command_result.logs\n return image.to_str()", - "docstring": "pull provided image from registry\n\n :param image_name: ImageName, image to pull\n :param insecure: bool, allow connecting to registry over plain http\n :param dockercfg_path: str, path to dockercfg\n :return: str, image (reg.om/img:v1)" - }, - { - "code": "def flag_calls(func):\n if hasattr(func, 'called'):\n return func\n def wrapper(*args, **kw):\n wrapper.called = False\n out = func(*args, **kw)\n wrapper.called = True\n return out\n wrapper.called = False\n wrapper.__doc__ = func.__doc__\n return wrapper", - "docstring": "Wrap a function to detect and flag when it gets called.\n\n This is a decorator which takes a function and wraps it in a function with\n a 'called' attribute. wrapper.called is initialized to False.\n\n The wrapper.called attribute is set to False right before each call to the\n wrapped function, so if the call fails it remains False. After the call\n completes, wrapper.called is set to True and the output is returned.\n\n Testing for truth in wrapper.called allows you to determine if a call to\n func() was attempted and succeeded." - }, - { - "code": "def _publish_internal(self, push_messages):\n import requests\n response = requests.post(\n self.host + self.api_url + '/push/send',\n data=json.dumps([pm.get_payload() for pm in push_messages]),\n headers={\n 'accept': 'application/json',\n 'accept-encoding': 'gzip, deflate',\n 'content-type': 'application/json',\n }\n )\n try:\n response_data = response.json()\n except ValueError:\n response.raise_for_status()\n raise PushServerError('Invalid server response', response)\n if 'errors' in response_data:\n raise PushServerError(\n 'Request failed',\n response,\n response_data=response_data,\n errors=response_data['errors'])\n if 'data' not in response_data:\n raise PushServerError(\n 'Invalid server response',\n response,\n response_data=response_data)\n response.raise_for_status()\n if len(push_messages) != len(response_data['data']):\n raise PushServerError(\n ('Mismatched response length. Expected %d %s but only '\n 'received %d' % (\n len(push_messages),\n 'receipt' if len(push_messages) == 1 else 'receipts',\n len(response_data['data']))),\n response,\n response_data=response_data)\n receipts = []\n for i, receipt in enumerate(response_data['data']):\n receipts.append(PushResponse(\n push_message=push_messages[i],\n status=receipt.get('status', PushResponse.ERROR_STATUS),\n message=receipt.get('message', ''),\n details=receipt.get('details', None)))\n return receipts", - "docstring": "Send push notifications\n\n The server will validate any type of syntax errors and the client will\n raise the proper exceptions for the user to handle.\n\n Each notification is of the form:\n {\n 'to': 'ExponentPushToken[xxx]',\n 'body': 'This text gets display in the notification',\n 'badge': 1,\n 'data': {'any': 'json object'},\n }\n\n Args:\n push_messages: An array of PushMessage objects." - }, - { - "code": "def kaplan_meier_estimator(event, time_exit, time_enter=None, time_min=None):\n event, time_enter, time_exit = check_y_survival(event, time_enter, time_exit, allow_all_censored=True)\n check_consistent_length(event, time_enter, time_exit)\n if time_enter is None:\n uniq_times, n_events, n_at_risk = _compute_counts(event, time_exit)\n else:\n uniq_times, n_events, n_at_risk = _compute_counts_truncated(event, time_enter, time_exit)\n values = 1 - n_events / n_at_risk\n if time_min is not None:\n mask = uniq_times >= time_min\n uniq_times = numpy.compress(mask, uniq_times)\n values = numpy.compress(mask, values)\n y = numpy.cumprod(values)\n return uniq_times, y", - "docstring": "Kaplan-Meier estimator of survival function.\n\n Parameters\n ----------\n event : array-like, shape = (n_samples,)\n Contains binary event indicators.\n\n time_exit : array-like, shape = (n_samples,)\n Contains event/censoring times.\n\n time_enter : array-like, shape = (n_samples,), optional\n Contains time when each individual entered the study for\n left truncated survival data.\n\n time_min : float, optional\n Compute estimator conditional on survival at least up to\n the specified time.\n\n Returns\n -------\n time : array, shape = (n_times,)\n Unique times.\n\n prob_survival : array, shape = (n_times,)\n Survival probability at each unique time point.\n If `time_enter` is provided, estimates are conditional probabilities.\n\n Examples\n --------\n Creating a Kaplan-Meier curve:\n\n >>> x, y = kaplan_meier_estimator(event, time)\n >>> plt.step(x, y, where=\"post\")\n >>> plt.ylim(0, 1)\n >>> plt.show()\n\n References\n ----------\n .. [1] Kaplan, E. L. and Meier, P., \"Nonparametric estimation from incomplete observations\",\n Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958." - }, - { - "code": "def unscale_and_snap_to_nearest(x, tune_params, eps):\n x_u = [i for i in x]\n for i, v in enumerate(tune_params.values()):\n pad = 0.5*eps\n linspace = numpy.linspace(pad, (eps*len(v))-pad, len(v))\n idx = numpy.abs(linspace-x[i]).argmin()\n idx = min(max(idx, 0), len(v)-1)\n x_u[i] = v[idx]\n return x_u", - "docstring": "helper func that snaps a scaled variable to the nearest config" - }, - { - "code": "def compare_words_lexicographic( word_a, word_b ):\n if ( not all_tamil(word_a) ) or (not all_tamil(word_b)) :\n pass\n La = len(word_a)\n Lb = len(word_b)\n all_TA_letters = u\"\".join(tamil_letters)\n for itr in range(0,min(La,Lb)):\n pos1 = all_TA_letters.find( word_a[itr] )\n pos2 = all_TA_letters.find( word_b[itr] )\n if pos1 != pos2 :\n return cmp(pos1, pos2)\n return cmp(La,Lb)", - "docstring": "compare words in Tamil lexicographic order" - }, - { - "code": "def register_composer(self, type, composer, **meta):\n try:\n self.registered_formats[type]['composer'] = composer\n except KeyError:\n self.registered_formats[type] = {'composer': composer}\n if meta:\n self.register_meta(type, **meta)", - "docstring": "Registers a composer of a format.\n\n :param type: The unique name of the format\n :param composer: The method to compose data as the format" - }, - { - "code": "def createTargetOrder(self, quantity, parentId=0,\n target=0., orderType=None, transmit=True, group=None, tif=\"DAY\",\n rth=False, account=None):\n order = self.createOrder(quantity,\n price = target,\n transmit = transmit,\n orderType = dataTypes[\"ORDER_TYPE_LIMIT\"] if orderType == None else orderType,\n ocaGroup = group,\n parentId = parentId,\n rth = rth,\n tif = tif,\n account = account\n )\n return order", - "docstring": "Creates TARGET order" - }, - { - "code": "def get_bucket(self, bucket, marker=None, max_keys=None, prefix=None):\n args = []\n if marker is not None:\n args.append((\"marker\", marker))\n if max_keys is not None:\n args.append((\"max-keys\", \"%d\" % (max_keys,)))\n if prefix is not None:\n args.append((\"prefix\", prefix))\n if args:\n object_name = \"?\" + urlencode(args)\n else:\n object_name = None\n details = self._details(\n method=b\"GET\",\n url_context=self._url_context(bucket=bucket, object_name=object_name),\n )\n d = self._submit(self._query_factory(details))\n d.addCallback(self._parse_get_bucket)\n return d", - "docstring": "Get a list of all the objects in a bucket.\n\n @param bucket: The name of the bucket from which to retrieve objects.\n @type bucket: L{unicode}\n\n @param marker: If given, indicate a position in the overall\n results where the results of this call should begin. The\n first result is the first object that sorts greater than\n this marker.\n @type marker: L{bytes} or L{NoneType}\n\n @param max_keys: If given, the maximum number of objects to\n return.\n @type max_keys: L{int} or L{NoneType}\n\n @param prefix: If given, indicate that only objects with keys\n beginning with this value should be returned.\n @type prefix: L{bytes} or L{NoneType}\n\n @return: A L{Deferred} that fires with a L{BucketListing}\n describing the result.\n\n @see: U{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html}" - }, - { - "code": "def update_single_grading_period(self, id, course_id, grading_periods_end_date, grading_periods_start_date, grading_periods_weight=None):\r\n path = {}\r\n data = {}\r\n params = {}\r\n path[\"course_id\"] = course_id\r\n path[\"id\"] = id\r\n data[\"grading_periods[start_date]\"] = grading_periods_start_date\r\n data[\"grading_periods[end_date]\"] = grading_periods_end_date\r\n if grading_periods_weight is not None:\r\n data[\"grading_periods[weight]\"] = grading_periods_weight\r\n self.logger.debug(\"PUT /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}\".format(params=params, data=data, **path))\r\n return self.generic_request(\"PUT\", \"/api/v1/courses/{course_id}/grading_periods/{id}\".format(**path), data=data, params=params, no_data=True)", - "docstring": "Update a single grading period.\r\n\r\n Update an existing grading period." - }, - { - "code": "def IPID_count(lst, funcID=lambda x: x[1].id, funcpres=lambda x: x[1].summary()):\n idlst = [funcID(e) for e in lst]\n idlst.sort()\n classes = [idlst[0]]\n classes += [t[1] for t in zip(idlst[:-1], idlst[1:]) if abs(t[0] - t[1]) > 50]\n lst = [(funcID(x), funcpres(x)) for x in lst]\n lst.sort()\n print(\"Probably %i classes:\" % len(classes), classes)\n for id, pr in lst:\n print(\"%5i\" % id, pr)", - "docstring": "Identify IP id values classes in a list of packets\n\nlst: a list of packets\nfuncID: a function that returns IP id values\nfuncpres: a function used to summarize packets" - }, - { - "code": "def cleanup():\n try:\n cloud_config = CloudConfig()\n cloud_controller = CloudController(cloud_config)\n cloud_controller.cleanup()\n except CloudComposeException as ex:\n print(ex)", - "docstring": "deletes launch configs and auto scaling group" - }, - { - "code": "def putRequest(self, request, block=True, timeout=0):\n self._requests_queue.put(request, block, timeout)\n self.workRequests[request.requestID] = request", - "docstring": "Put work request into work queue and save its id for later." - }, - { - "code": "def mark_all_as_deleted(self, recipient=None):\n assert_soft_delete()\n qset = self.active()\n if recipient:\n qset = qset.filter(recipient=recipient)\n return qset.update(deleted=True)", - "docstring": "Mark current queryset as deleted.\n Optionally, filter by recipient first." - }, - { - "code": "def remove_option(self, block, name):\n if block:\n if not self._ast or not block in self._block_map:\n raise ValueError(u\"Block '{0}' does not exist\"\n .format(common.from_utf8(block)))\n block_idx = self._block_map[block]\n for i, opt in enumerate(self._ast[2][block_idx][1]):\n if opt[0] == name:\n item_idx = i\n break\n else:\n raise ValueError(u\"Option '{0}' does not exist\"\n .format(common.from_utf8(name)))\n options = self._ast[2][block_idx][1]\n options.pop(item_idx)\n else:\n if not self._ast:\n raise ValueError(u\"Option '{0}' does not exist\"\n .format(common.from_utf8(name)))\n for i, opt in enumerate(self._ast[1]):\n if opt[0] == name:\n item_idx = i\n break\n else:\n raise ValueError(u\"Option '{0}' does not exist\"\n .format(common.from_utf8(name)))\n self._ast[1].pop(item_idx)", - "docstring": "Removes first matching option that exists from the AST.\n\n `block`\n Block name. Set to ``None`` for non-block option.\n `name`\n Option name to remove.\n\n * Raises a ``ValueError`` exception if `name` and/or `block`\n haven't been added." - }, - { - "code": "def dispatch(self, request, *args, **kwargs):\n self.return_url = request.GET.get('return_url', None)\n referrer = request.META.get('HTTP_REFERER', None)\n if (request.method != \"GET\" or\n request.is_ajax() or\n self.return_url or\n referrer is None or\n self.return_url is None and 'return_url' in request.GET):\n return super().dispatch(request, *args, **kwargs)\n if not self.return_url:\n url = request.get_full_path()\n if url.find(\"?\") < 0:\n url = \"?return_url=\".join((url, referrer))\n else:\n url = \"&return_url=\".join((url, referrer))\n return HttpResponseRedirect(url)", - "docstring": "Does request processing for return_url query parameter and redirects with it's missing\n\n We can't do that in the get method, as it does not exist in the View base class\n and child mixins implementing get do not call super().get" - }, - { - "code": "def v_from_i(resistance_shunt, resistance_series, nNsVth, current,\n saturation_current, photocurrent, method='lambertw'):\n if method.lower() == 'lambertw':\n return _singlediode._lambertw_v_from_i(\n resistance_shunt, resistance_series, nNsVth, current,\n saturation_current, photocurrent\n )\n else:\n args = (current, photocurrent, saturation_current,\n resistance_series, resistance_shunt, nNsVth)\n V = _singlediode.bishop88_v_from_i(*args, method=method.lower())\n size, shape = _singlediode._get_size_and_shape(args)\n if size <= 1:\n if shape is not None:\n V = np.tile(V, shape)\n if np.isnan(V).any() and size <= 1:\n V = np.repeat(V, size)\n if shape is not None:\n V = V.reshape(shape)\n return V", - "docstring": "Device voltage at the given device current for the single diode model.\n\n Uses the single diode model (SDM) as described in, e.g.,\n Jain and Kapoor 2004 [1].\n The solution is per Eq 3 of [1] except when resistance_shunt=numpy.inf,\n in which case the explict solution for voltage is used.\n Ideal device parameters are specified by resistance_shunt=np.inf and\n resistance_series=0.\n Inputs to this function can include scalars and pandas.Series, but it is\n the caller's responsibility to ensure that the arguments are all float64\n and within the proper ranges.\n\n Parameters\n ----------\n resistance_shunt : numeric\n Shunt resistance in ohms under desired IV curve conditions.\n Often abbreviated ``Rsh``.\n 0 < resistance_shunt <= numpy.inf\n\n resistance_series : numeric\n Series resistance in ohms under desired IV curve conditions.\n Often abbreviated ``Rs``.\n 0 <= resistance_series < numpy.inf\n\n nNsVth : numeric\n The product of three components. 1) The usual diode ideal factor\n (n), 2) the number of cells in series (Ns), and 3) the cell\n thermal voltage under the desired IV curve conditions (Vth). The\n thermal voltage of the cell (in volts) may be calculated as\n ``k*temp_cell/q``, where k is Boltzmann's constant (J/K),\n temp_cell is the temperature of the p-n junction in Kelvin, and\n q is the charge of an electron (coulombs).\n 0 < nNsVth\n\n current : numeric\n The current in amperes under desired IV curve conditions.\n\n saturation_current : numeric\n Diode saturation current in amperes under desired IV curve\n conditions. Often abbreviated ``I_0``.\n 0 < saturation_current\n\n photocurrent : numeric\n Light-generated current (photocurrent) in amperes under desired\n IV curve conditions. Often abbreviated ``I_L``.\n 0 <= photocurrent\n\n method : str\n Method to use: ``'lambertw'``, ``'newton'``, or ``'brentq'``. *Note*:\n ``'brentq'`` is limited to 1st quadrant only.\n\n Returns\n -------\n current : np.ndarray or scalar\n\n References\n ----------\n [1] A. Jain, A. Kapoor, \"Exact analytical solutions of the\n parameters of real solar cells using Lambert W-function\", Solar\n Energy Materials and Solar Cells, 81 (2004) 269-277." - }, - { - "code": "def normalize(name):\n ret = name.replace(':', '')\n ret = ret.replace('%', '')\n ret = ret.replace(' ', '_')\n return ret", - "docstring": "Normalize name for the Statsd convention" - }, - { - "code": "def _find_bounds_1d(data, x):\n idx = np.searchsorted(data, x)\n if idx == 0:\n idx0 = 0\n elif idx == len(data):\n idx0 = idx - 2\n else:\n idx0 = idx - 1\n return idx0", - "docstring": "Find the index of the lower bound where ``x`` should be inserted\n into ``a`` to maintain order.\n\n The index of the upper bound is the index of the lower bound\n plus 2. Both bound indices must be within the array.\n\n Parameters\n ----------\n data : 1D `~numpy.ndarray`\n The 1D array to search.\n\n x : float\n The value to insert.\n\n Returns\n -------\n index : int\n The index of the lower bound." - }, - { - "code": "def start_new(cls, tuner, inputs):\n config = _Job._load_config(inputs, tuner.estimator)\n warm_start_config_req = None\n if tuner.warm_start_config:\n warm_start_config_req = tuner.warm_start_config.to_input_req()\n tuner_args = config.copy()\n tuner_args['job_name'] = tuner._current_job_name\n tuner_args['strategy'] = tuner.strategy\n tuner_args['objective_type'] = tuner.objective_type\n tuner_args['objective_metric_name'] = tuner.objective_metric_name\n tuner_args['max_jobs'] = tuner.max_jobs\n tuner_args['max_parallel_jobs'] = tuner.max_parallel_jobs\n tuner_args['parameter_ranges'] = tuner.hyperparameter_ranges()\n tuner_args['static_hyperparameters'] = tuner.static_hyperparameters\n tuner_args['input_mode'] = tuner.estimator.input_mode\n tuner_args['metric_definitions'] = tuner.metric_definitions\n tuner_args['tags'] = tuner.tags\n tuner_args['warm_start_config'] = warm_start_config_req\n tuner_args['early_stopping_type'] = tuner.early_stopping_type\n if isinstance(tuner.estimator, sagemaker.algorithm.AlgorithmEstimator):\n tuner_args['algorithm_arn'] = tuner.estimator.algorithm_arn\n else:\n tuner_args['image'] = tuner.estimator.train_image()\n tuner_args['enable_network_isolation'] = tuner.estimator.enable_network_isolation()\n tuner_args['encrypt_inter_container_traffic'] = \\\n tuner.estimator.encrypt_inter_container_traffic\n tuner.estimator.sagemaker_session.tune(**tuner_args)\n return cls(tuner.sagemaker_session, tuner._current_job_name)", - "docstring": "Create a new Amazon SageMaker hyperparameter tuning job from the HyperparameterTuner.\n\n Args:\n tuner (sagemaker.tuner.HyperparameterTuner): HyperparameterTuner object created by the user.\n inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`.\n\n Returns:\n sagemaker.tuner._TuningJob: Constructed object that captures all information about the started job." - }, - { - "code": "def _get_from_format_dict(format_dict, key):\n if isinstance(format_dict[key], str):\n return format_dict[key]\n elif isinstance(format_dict[key], (list, tuple)):\n fn_list = list(format_dict[key])\n function = fn_list.pop(0)\n if len(fn_list):\n args = fn_list.pop(0)\n else:\n args = []\n if len(fn_list):\n kwargs = fn_list.pop(0)\n else:\n kwargs = {}\n return function(*args, **kwargs)", - "docstring": "Return a value from our format dict." - }, - { - "code": "def get_ticker(self):\n lastQuote = self.data['quote'][-1]\n lastTrade = self.data['trade'][-1]\n ticker = {\n \"last\": lastTrade['price'],\n \"buy\": lastQuote['bidPrice'],\n \"sell\": lastQuote['askPrice'],\n \"mid\": (float(lastQuote['bidPrice'] or 0) + float(lastQuote['askPrice'] or 0)) / 2\n }\n instrument = self.data['instrument'][0]\n return {k: round(float(v or 0), instrument['tickLog']) for k, v in list(ticker.items())}", - "docstring": "Return a ticker object. Generated from quote and trade." - }, - { - "code": "def Tokenizer(obj, metadata=None, separator=SEPARATOR):\n if obj is None:\n return []\n metadata = metadata or {}\n if isinstance(obj, (list, tuple)):\n return [\n Token(as_string(element).lower(), deepcopy(metadata)) for element in obj\n ]\n string = str(obj).strip().lower()\n length = len(string)\n tokens = []\n slice_start = 0\n for slice_end in range(length):\n char = string[slice_end]\n slice_length = slice_end - slice_start\n if separator.match(char) or slice_end == length - 1:\n if slice_length > 0:\n sl = slice(slice_start, slice_end if slice_end < length - 1 else None)\n token_metadata = {}\n token_metadata[\"position\"] = [\n slice_start,\n slice_length if slice_end < length - 1 else slice_length + 1,\n ]\n token_metadata[\"index\"] = len(tokens)\n token_metadata.update(metadata)\n tokens.append(Token(string[sl], token_metadata))\n slice_start = slice_end + 1\n return tokens", - "docstring": "Splits a string into tokens ready to be inserted into the search index.\n\n This tokenizer will convert its parameter to a string by calling `str` and\n then will split this string on characters matching `separator`.\n Lists will have their elements converted to strings and wrapped in a lunr\n `Token`.\n\n Optional metadata can be passed to the tokenizer, this metadata will be\n cloned and added as metadata to every token that is created from the object\n to be tokenized." - }, - { - "code": "def get(self, name: str, config: dict = None) -> NodePool:\n LOGGER.debug('NodePoolManager.node_pool >>>')\n rv = NodePool(name, self.protocol, config)\n LOGGER.debug('NodePoolManager.node_pool <<< %s', rv)\n return rv", - "docstring": "Return node pool in input name and optional configuration.\n\n :param name: name of configured pool\n :param config: pool configuration with optional 'timeout' int, 'extended_timeout' int,\n 'preordered_nodes' array of strings\n :return: node pool" - }, - { - "code": "def filter_regex(names, regex):\n return tuple(name for name in names\n if regex.search(name) is not None)", - "docstring": "Return a tuple of strings that match the regular expression pattern." - }, - { - "code": "async def losetup(self, image, read_only=True, offset=None, size=None,\n no_part_scan=None):\n try:\n device = self.udisks.find(image)\n except FileNotFoundError:\n pass\n else:\n self._log.info(_('not setting up {0}: already up', device))\n return device\n if not os.path.isfile(image):\n self._log.error(_('not setting up {0}: not a file', image))\n return None\n self._log.debug(_('setting up {0}', image))\n fd = os.open(image, os.O_RDONLY)\n device = await self.udisks.loop_setup(fd, {\n 'offset': offset,\n 'size': size,\n 'read-only': read_only,\n 'no-part-scan': no_part_scan,\n })\n self._log.info(_('set up {0} as {1}', image,\n device.device_presentation))\n return device", - "docstring": "Setup a loop device.\n\n :param str image: path of the image file\n :param bool read_only:\n :param int offset:\n :param int size:\n :param bool no_part_scan:\n :returns: the device object for the loop device" - }, - { - "code": "def block(self):\n st,output = _runcmd(\"/sbin/pfctl -aswitchyard -f -\", self._rules)\n log_debug(\"Installing rules: {}\".format(output))", - "docstring": "pfctl -a switchyard -f- < rules.txt\n pfctl -a switchyard -F rules\n pfctl -t switchyard -F r" - }, - { - "code": "def add_oxidation_state_by_element(self, oxidation_states):\n try:\n for site in self.sites:\n new_sp = {}\n for el, occu in site.species.items():\n sym = el.symbol\n new_sp[Specie(sym, oxidation_states[sym])] = occu\n site.species = new_sp\n except KeyError:\n raise ValueError(\"Oxidation state of all elements must be \"\n \"specified in the dictionary.\")", - "docstring": "Add oxidation states.\n\n Args:\n oxidation_states (dict): Dict of oxidation states.\n E.g., {\"Li\":1, \"Fe\":2, \"P\":5, \"O\":-2}" - }, - { - "code": "def do_before_loop(self):\n logger.info(\"I am the arbiter: %s\", self.link_to_myself.name)\n if not self.is_master:\n logger.debug(\"Waiting for my master death...\")\n return\n if not self.daemons_start(run_daemons=True):\n self.request_stop(message=\"Some Alignak daemons did not started correctly.\",\n exit_code=4)\n if not self.daemons_check():\n self.request_stop(message=\"Some Alignak daemons cannot be checked.\",\n exit_code=4)\n pause = max(1, max(self.conf.daemons_start_timeout, len(self.my_daemons) * 0.5))\n if pause:\n logger.info(\"Pausing %.2f seconds...\", pause)\n time.sleep(pause)\n self.configuration_dispatch()\n _t0 = time.time()\n self.get_initial_broks_from_satellites()\n statsmgr.timer('broks.get-initial', time.time() - _t0)\n self.external_commands_manager = ExternalCommandManager(\n self.conf, 'dispatcher', self, self.conf.accept_passive_unknown_check_results,\n self.conf.log_external_commands)", - "docstring": "Called before the main daemon loop.\n\n :return: None" - }, - { - "code": "def mark_offer_as_clear(self, offer_id):\n return self._create_put_request(\n resource=OFFERS,\n billomat_id=offer_id,\n command=CLEAR,\n )", - "docstring": "Mark offer as clear\n\n :param offer_id: the offer id\n :return Response" - }, - { - "code": "def end_subsegment(self, end_time=None):\n if not self.context.end_subsegment(end_time):\n return\n if self.current_segment().ready_to_send():\n self._send_segment()\n else:\n self.stream_subsegments()", - "docstring": "End the current active subsegment. If this is the last one open\n under its parent segment, the entire segment will be sent.\n\n :param float end_time: subsegment compeletion in unix epoch in seconds." - }, - { - "code": "def datetime_value_renderer(value, **options):\n datetime_format = options.get('datetime_format', 'SHORT_DATETIME_FORMAT')\n return formats.date_format(timezone.localtime(value), datetime_format)", - "docstring": "Render datetime value with django formats, default is SHORT_DATETIME_FORMAT" - }, - { - "code": "def get_host_health_data(self, data=None):\n if not data or data and \"GET_EMBEDDED_HEALTH_DATA\" not in data:\n data = self._execute_command(\n 'GET_EMBEDDED_HEALTH', 'SERVER_INFO', 'read')\n return data", - "docstring": "Request host health data of the server.\n\n :param: the data to retrieve from the server, defaults to None.\n :returns: the dictionary containing the embedded health data.\n :raises: IloConnectionError if failed connecting to the iLO.\n :raises: IloError, on an error from iLO." - }, - { - "code": "def count_year(year, **kwargs):\n url = gbif_baseurl + 'occurrence/counts/year'\n out = gbif_GET(url, {'year': year}, **kwargs)\n return out", - "docstring": "Lists occurrence counts by year\n\n :param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010``\n\n :return: dict\n\n Usage::\n\n from pygbif import occurrences\n occurrences.count_year(year = '1990,2000')" - }, - { - "code": "def _get_persistent_id(self, o):\n if type(o) in self.hash_dedup:\n oid = o.__class__.__name__ + \"-\" + str(hash(o))\n self._object_cache[oid] = o\n return oid\n if any(isinstance(o,c) for c in self.unsafe_key_baseclasses):\n return None\n try:\n return self._uuid_cache[o]\n except KeyError:\n pass\n except TypeError:\n return None\n if o.__class__.__module__.split('.')[0] in self.module_dedup or o.__class__ in self.uuid_dedup:\n oid = o.__class__.__name__.split(\".\")[-1] + '-' + str(uuid.uuid4())\n self._object_cache[oid] = o\n self._uuid_cache[o] = oid\n return oid\n return None", - "docstring": "Determines a persistent ID for an object.\n Does NOT do stores." - }, - { - "code": "def parse_out_ips(message):\n ips = []\n for entry in message.answer:\n for rdata in entry.items:\n ips.append(rdata.to_text())\n return ips", - "docstring": "Given a message, parse out the ips in the answer" - }, - { - "code": "def save(f, arr, vocab):\n itr = iter(vocab)\n word, idx = next(itr)\n _write_line(f, arr[idx], word)\n for word, idx in itr:\n f.write(b'\\n')\n _write_line(f, arr[idx], word)", - "docstring": "Save word embedding file.\n\n Args:\n f (File): File to write the vectors. File should be open for writing\n ascii.\n arr (numpy.array): Numpy array with ``float`` dtype.\n vocab (iterable): Each element is pair of a word (``bytes``) and ``arr``\n index (``int``). Word should be encoded to str apriori." - }, - { - "code": "def get_line_break_property(value, is_bytes=False):\n obj = unidata.ascii_line_break if is_bytes else unidata.unicode_line_break\n if value.startswith('^'):\n negated = value[1:]\n value = '^' + unidata.unicode_alias['linebreak'].get(negated, negated)\n else:\n value = unidata.unicode_alias['linebreak'].get(value, value)\n return obj[value]", - "docstring": "Get `LINE BREAK` property." - }, - { - "code": "def convert_datetimes_to_timestamps(data, datetime_attrs):\n if not data:\n return data\n new_data = {}\n for key, value in data.items():\n if key in datetime_attrs and isinstance(value, datetime):\n new_key = datetime_attrs[key]\n new_data[new_key] = timestamp_from_dt(value)\n else:\n new_data[key] = value\n return new_data", - "docstring": "Given a dictionary of data, and a dictionary of datetime attributes,\n return a new dictionary that converts any datetime attributes that may\n be present to their timestamped equivalent." - }, - { - "code": "def poke_32(library, session, address, data):\n return library.viPoke32(session, address, data)", - "docstring": "Write an 32-bit value from the specified address.\n\n Corresponds to viPoke32 function of the VISA library.\n\n :param library: the visa library wrapped by ctypes.\n :param session: Unique logical identifier to a session.\n :param address: Source address to read the value.\n :param data: value to be written to the bus.\n :return: return value of the library call.\n :rtype: :class:`pyvisa.constants.StatusCode`" - }, - { - "code": "def randomToggle(self, randomize):\n if randomize:\n self._stim.setReorderFunc(order_function('random'), 'random')\n else:\n self._stim.reorder = None", - "docstring": "Sets the reorder function on this StimulusModel to a randomizer\n or none, alternately" - }, - { - "code": "def commits(self, user, options):\n command = \"git log --all --author={0}\".format(user.login).split()\n command.append(\"--format=format:%h - %s\")\n command.append(\"--since='{0} 00:00:00'\".format(options.since))\n command.append(\"--until='{0} 00:00:00'\".format(options.until))\n if options.verbose:\n command.append(\"--name-only\")\n log.info(u\"Checking commits in {0}\".format(self.path))\n log.details(pretty(command))\n try:\n process = subprocess.Popen(\n command, cwd=self.path,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except OSError as error:\n log.debug(error)\n raise did.base.ReportError(\n \"Unable to access git repo '{0}'\".format(self.path))\n output, errors = process.communicate()\n log.debug(\"git log output:\")\n log.debug(output)\n if process.returncode == 0:\n if not output:\n return []\n else:\n if not options.verbose:\n return unicode(output, \"utf8\").split(\"\\n\")\n commits = []\n for commit in unicode(output, \"utf8\").split(\"\\n\\n\"):\n summary = commit.split(\"\\n\")[0]\n directory = re.sub(\"/[^/]+$\", \"\", commit.split(\"\\n\")[1])\n commits.append(\"{0}\\n{1}* {2}\".format(\n summary, 8 * \" \", directory))\n return commits\n else:\n log.debug(errors.strip())\n log.warn(\"Unable to check commits in '{0}'\".format(self.path))\n return []", - "docstring": "List commits for given user." - }, - { - "code": "def get_pages_list(self, project_id):\n try:\n result = self._request('/getpageslist/',\n {'projectid': project_id})\n return [TildaPage(**p) for p in result]\n except NetworkError:\n return []", - "docstring": "Get pages list" - }, - { - "code": "def get_keys_from_class(cc):\n return [prop.name for prop in cc.properties.values() \\\n if 'key' in prop.qualifiers]", - "docstring": "Return list of the key property names for a class" - }, - { - "code": "def run_stats_df(self):\n run_stats_df = []\n for x in self.run_stats:\n search_results = {**x[\"search_params\"]}\n search_results[\"score\"] = x[\"score\"]\n run_stats_df.append(search_results)\n return pd.DataFrame(run_stats_df)", - "docstring": "Returns self.run_stats over search params as pandas dataframe." - }, - { - "code": "def check(self):\n has_term = False\n if self.START_STATE not in self.states:\n raise SyntaxError('Undefined start rule')\n for state in self.states:\n for rule in self.states[state]:\n if rule is not None:\n if rule[2] == self.TERM_STATE:\n has_term = True\n elif rule[2] not in self.states:\n raise SyntaxError('Unexpected state: ' + rule[2])\n if not has_term:\n raise SyntaxError('Missed terminate state')", - "docstring": "Check semantic rules." - }, - { - "code": "def make_certifier():\n def decorator(func):\n @six.wraps(func)\n def wrapper(value=_undefined, **kwargs):\n def certify(val):\n if is_enabled():\n exec_func(func, val, **kwargs)\n return val\n if value is not _undefined:\n return certify(value)\n else:\n return certify\n return wrapper\n return decorator", - "docstring": "Decorator that can wrap raw functions to create a certifier function.\n\n Certifier functions support partial application. If a function wrapped by\n `make_certifier` is called with a value as its first argument it will be\n certified immediately. If no value is passed, then it will return a\n function that can be called at a later time.\n\n Assuming that `certify_something` has been decorated by `make_certifier`:\n >>> certify_something(value, foo=1, bar=2)\n\n Is equivalent to:\n >>> certifier = certify_something(foo=1, bar=2)\n >>> certifier(value)" - }, - { - "code": "def include(self, **attrs):\n for k, v in attrs.items():\n include = getattr(self, '_include_' + k, None)\n if include:\n include(v)\n else:\n self._include_misc(k, v)", - "docstring": "Add items to distribution that are named in keyword arguments\n\n For example, 'dist.include(py_modules=[\"x\"])' would add 'x' to\n the distribution's 'py_modules' attribute, if it was not already\n there.\n\n Currently, this method only supports inclusion for attributes that are\n lists or tuples. If you need to add support for adding to other\n attributes in this or a subclass, you can add an '_include_X' method,\n where 'X' is the name of the attribute. The method will be called with\n the value passed to 'include()'. So, 'dist.include(foo={\"bar\":\"baz\"})'\n will try to call 'dist._include_foo({\"bar\":\"baz\"})', which can then\n handle whatever special inclusion logic is needed." - }, - { - "code": "def store(self):\n size = self.board.SIZE\n cells = []\n for i in range(size):\n for j in range(size):\n cells.append(str(self.board.getCell(j, i)))\n score_str = \"%s\\n%d\" % (' '.join(cells), self.score)\n try:\n with open(self.store_file, 'w') as f:\n f.write(score_str)\n except:\n return False\n return True", - "docstring": "save the current game session's score and data for further use" - }, - { - "code": "def previous(self, day_of_week=None):\n if day_of_week is None:\n day_of_week = self.day_of_week\n if day_of_week < SUNDAY or day_of_week > SATURDAY:\n raise ValueError(\"Invalid day of week\")\n dt = self.subtract(days=1)\n while dt.day_of_week != day_of_week:\n dt = dt.subtract(days=1)\n return dt", - "docstring": "Modify to the previous occurrence of a given day of the week.\n If no day_of_week is provided, modify to the previous occurrence\n of the current day of the week. Use the supplied consts\n to indicate the desired day_of_week, ex. pendulum.MONDAY.\n\n :param day_of_week: The previous day of week to reset to.\n :type day_of_week: int or None\n\n :rtype: Date" - }, - { - "code": "def setup_temp_logger(log_level='error'):\n if is_temp_logging_configured():\n logging.getLogger(__name__).warning(\n 'Temporary logging is already configured'\n )\n return\n if log_level is None:\n log_level = 'warning'\n level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)\n handler = None\n for handler in logging.root.handlers:\n if handler in (LOGGING_NULL_HANDLER, LOGGING_STORE_HANDLER):\n continue\n if not hasattr(handler, 'stream'):\n continue\n if handler.stream is sys.stderr:\n break\n else:\n handler = LOGGING_TEMP_HANDLER\n handler.setLevel(level)\n formatter = logging.Formatter(\n '[%(levelname)-8s] %(message)s', datefmt='%H:%M:%S'\n )\n handler.setFormatter(formatter)\n logging.root.addHandler(handler)\n if LOGGING_NULL_HANDLER is not None:\n LOGGING_NULL_HANDLER.sync_with_handlers([handler])\n else:\n logging.getLogger(__name__).debug(\n 'LOGGING_NULL_HANDLER is already None, can\\'t sync messages '\n 'with it'\n )\n __remove_null_logging_handler()\n global __TEMP_LOGGING_CONFIGURED\n __TEMP_LOGGING_CONFIGURED = True", - "docstring": "Setup the temporary console logger" - }, - { - "code": "def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,\n request, request_token=None, access_token=None):\n raise self._subclass_must_implement(\"validate_timestamp_and_nonce\")", - "docstring": "Validates that the nonce has not been used before.\n\n :param client_key: The client/consumer key.\n :param timestamp: The ``oauth_timestamp`` parameter.\n :param nonce: The ``oauth_nonce`` parameter.\n :param request_token: Request token string, if any.\n :param access_token: Access token string, if any.\n :param request: OAuthlib request.\n :type request: oauthlib.common.Request\n :returns: True or False\n\n Per `Section 3.3`_ of the spec.\n\n \"A nonce is a random string, uniquely generated by the client to allow\n the server to verify that a request has never been made before and\n helps prevent replay attacks when requests are made over a non-secure\n channel. The nonce value MUST be unique across all requests with the\n same timestamp, client credentials, and token combinations.\"\n\n .. _`Section 3.3`: https://tools.ietf.org/html/rfc5849#section-3.3\n\n One of the first validation checks that will be made is for the validity\n of the nonce and timestamp, which are associated with a client key and\n possibly a token. If invalid then immediately fail the request\n by returning False. If the nonce/timestamp pair has been used before and\n you may just have detected a replay attack. Therefore it is an essential\n part of OAuth security that you not allow nonce/timestamp reuse.\n Note that this validation check is done before checking the validity of\n the client and token.::\n\n nonces_and_timestamps_database = [\n (u'foo', 1234567890, u'rannoMstrInghere', u'bar')\n ]\n\n def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,\n request_token=None, access_token=None):\n\n return ((client_key, timestamp, nonce, request_token or access_token)\n not in self.nonces_and_timestamps_database)\n\n This method is used by\n\n * AccessTokenEndpoint\n * RequestTokenEndpoint\n * ResourceEndpoint\n * SignatureOnlyEndpoint" - }, - { - "code": "def get_followers(self, query):\n api = self._connectToAPI()\n self._rate_limit_status(api=api, mode=\"get_followers\")\n try:\n friends_ids = api.followers_ids(query)\n except:\n return []\n return friends_ids", - "docstring": "Method to get the followers of a user.\n\n :param query: Query to be performed.\n\n :return: List of ids." - }, - { - "code": "def _listen_commands(self):\n self._last_update = None\n update_body = {'timeout': 2}\n while True:\n latest = self._last_update\n update_body.update({'offset': latest + 1} if latest else {})\n update_resp = self.client.get_updates(update_body)\n update_resp.add_done_callback(self._respond_commands)\n yield gen.sleep(5)", - "docstring": "Monitor new updates and send them further to\n self._respond_commands, where bot actions\n are decided." - }, - { - "code": "def set_sampling_interval(self, interval):\n task = asyncio.ensure_future(self.core.set_sampling_interval(interval))\n self.loop.run_until_complete(task)", - "docstring": "This method sets the sampling interval for the Firmata loop method\n\n :param interval: time in milliseconds\n\n :returns: No return value" - }, - { - "code": "def notify_observers(self, which=None, min_priority=None):\n if self._update_on:\n if which is None:\n which = self\n if min_priority is None:\n [callble(self, which=which) for _, _, callble in self.observers]\n else:\n for p, _, callble in self.observers:\n if p <= min_priority:\n break\n callble(self, which=which)", - "docstring": "Notifies all observers. Which is the element, which kicked off this\n notification loop. The first argument will be self, the second `which`.\n\n .. note::\n \n notifies only observers with priority p > min_priority!\n \n :param min_priority: only notify observers with priority > min_priority\n if min_priority is None, notify all observers in order" - }, - { - "code": "def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n if not (event_name in Events and closing_event_name in Events):\n raise ValueError(\"Logging and closing events should be only ignite.engine.Events\")\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n event_name=event_name,\n closing_event_name=closing_event_name)\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)", - "docstring": "Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list, optional): list of the metrics names to log as the bar progresses\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`." - }, - { - "code": "def _unpack_sdp_into_packet(packet, bytestring):\n packet.data = bytestring[10:]\n (flags, packet.tag, dest_cpu_port, src_cpu_port,\n packet.dest_y, packet.dest_x,\n packet.src_y, packet.src_x) = struct.unpack_from('<2x8B', bytestring)\n packet.reply_expected = flags == FLAG_REPLY\n packet.dest_cpu = dest_cpu_port & 0x1f\n packet.dest_port = (dest_cpu_port >> 5)\n packet.src_cpu = src_cpu_port & 0x1f\n packet.src_port = (src_cpu_port >> 5)", - "docstring": "Unpack the SDP header from a bytestring into a packet.\n\n Parameters\n ----------\n packet : :py:class:`.SDPPacket`\n Packet into which to store the unpacked header.\n bytestring : bytes\n Bytes from which to unpack the header data." - }, - { - "code": "def resolve_asset_dependency(self):\n for node in self.asset.findall(\"./*[@file]\"):\n file = node.get(\"file\")\n abs_path = os.path.abspath(self.folder)\n abs_path = os.path.join(abs_path, file)\n node.set(\"file\", abs_path)", - "docstring": "Converts every file dependency into absolute path so when we merge we don't break things." - }, - { - "code": "def get_msdn_ref(name):\n in_msdn = False\n if name in MSDN_VALUE_TYPES:\n name = MSDN_VALUE_TYPES[name]\n in_msdn = True\n if name.startswith('System.'):\n in_msdn = True\n if in_msdn:\n link = name.split('<')[0]\n if link in MSDN_LINK_MAP:\n link = MSDN_LINK_MAP[link]\n else:\n link = link.lower()\n url = 'https://msdn.microsoft.com/en-us/library/'+link+'.aspx'\n node = nodes.reference(name, shorten_type(name))\n node['refuri'] = url\n node['reftitle'] = name\n return node\n else:\n return None", - "docstring": "Try and create a reference to a type on MSDN" - }, - { - "code": "def rename(self, name_dict=None, inplace=None, **names):\n inplace = _check_inplace(inplace)\n name_dict = either_dict_or_kwargs(name_dict, names, 'rename')\n for k, v in name_dict.items():\n if k not in self and k not in self.dims:\n raise ValueError(\"cannot rename %r because it is not a \"\n \"variable or dimension in this dataset\" % k)\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dim_dict=name_dict)\n return self._replace(variables, coord_names, dims=dims,\n indexes=indexes, inplace=inplace)", - "docstring": "Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n inplace : bool, optional\n If True, rename variables and dimensions in-place. Otherwise,\n return a new dataset object.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n DataArray.rename" - }, - { - "code": "def _stream(self):\n timer = None\n try:\n proc = subprocess.Popen(\n self.cmd, cwd=self.cwd, env=self.env,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n timer = threading.Timer(\n self.timeout,\n kill_proc, [proc, self.cmd, time.time()]\n )\n timer.start()\n yield proc\n finally:\n if timer is not None:\n timer.cancel()", - "docstring": "execute subprocess with timeout\n\n Usage::\n\n >>> with cmd_proc.run_with_timeout() as cmd_proc:\n ... stdout, stderr = cmd_proc.communicate()\n ...\n >>> assert cmd_proc.proc.return_code == 0, \"proc exec failed\"" - }, - { - "code": "def ReadUser(self, user_link, options=None):\n if options is None:\n options = {}\n path = base.GetPathFromLink(user_link)\n user_id = base.GetResourceIdOrFullNameFromLink(user_link)\n return self.Read(path, 'users', user_id, None, options)", - "docstring": "Reads a user.\n\n :param str user_link:\n The link to the user entity.\n :param dict options:\n The request options for the request.\n\n :return:\n The read User.\n :rtype:\n dict" - }, - { - "code": "def parse_size_name(type_name):\n if ' ' in type_name:\n raise ArgumentError(\"There should not be a space in config variable type specifier\", specifier=type_name)\n variable = False\n count = 1\n base_type = type_name\n if type_name[-1] == ']':\n variable = True\n start_index = type_name.find('[')\n if start_index == -1:\n raise ArgumentError(\"Could not find matching [ for ] character\", specifier=type_name)\n count = int(type_name[start_index+1:-1], 0)\n base_type = type_name[:start_index]\n matched_type = TYPE_CODES.get(base_type)\n if matched_type is None:\n raise ArgumentError(\"Could not find base type name\", base_type=base_type, type_string=type_name)\n base_size = struct.calcsize(\"<%s\" % matched_type)\n total_size = base_size*count\n return total_size, base_size, matched_type, variable", - "docstring": "Calculate size and encoding from a type name.\n\n This method takes a C-style type string like uint8_t[10] and returns\n - the total size in bytes\n - the unit size of each member (if it's an array)\n - the scruct.{pack,unpack} format code for decoding the base type\n - whether it is an array." - }, - { - "code": "def spawn(spec, kwargs, pass_fds=()):\n r, w = os.pipe()\n for fd in [r] + list(pass_fds):\n set_inheritable(fd, True)\n preparation_data = get_preparation_data()\n r_handle = get_handle(r)\n args, env = get_command_line(pipe_handle=r_handle)\n process = subprocess.Popen(args, env=env, close_fds=False)\n to_child = os.fdopen(w, 'wb')\n to_child.write(pickle.dumps([preparation_data, spec, kwargs]))\n to_child.close()\n return process", - "docstring": "Invoke a python function in a subprocess." - }, - { - "code": "def associate_by_user_id(backend, details, response, user=None, *args, **kwargs):\n if user:\n return None\n user_id = response.get('id')\n if user_id:\n for provider in ('google-appengine-oauth', 'google-appengine-oauth2'):\n social = backend.strategy.storage.user.get_social_auth(provider, user_id)\n if social:\n user = social.user\n if user:\n return {'user': user}", - "docstring": "Associate current auth with a user with the same Google user_id in the DB." - }, - { - "code": "def latlon_to_grid(latlon):\n from MAVProxy.modules.lib.ANUGA import redfearn\n (zone, easting, northing) = redfearn.redfearn(latlon[0], latlon[1])\n if latlon[0] < 0:\n hemisphere = 'S'\n else:\n hemisphere = 'N'\n return UTMGrid(zone, easting, northing, hemisphere=hemisphere)", - "docstring": "convert to grid reference" - }, - { - "code": "def print_clusters(self, names=False):\n clusters = self.get_clusters()\n if names:\n clusters = [cluster['name'] for cluster in clusters]\n pprint.pprint(clusters)", - "docstring": "Print contexts." - }, - { - "code": "def _apply_krauss_single_qubit(krauss: Union[Tuple[Any], Sequence[Any]],\n args: 'ApplyChannelArgs') -> np.ndarray:\n zero_left = linalg.slice_for_qubits_equal_to(args.left_axes, 0)\n one_left = linalg.slice_for_qubits_equal_to(args.left_axes, 1)\n zero_right = linalg.slice_for_qubits_equal_to(args.right_axes, 0)\n one_right = linalg.slice_for_qubits_equal_to(args.right_axes, 1)\n for krauss_op in krauss:\n np.copyto(dst=args.target_tensor,\n src=args.auxiliary_buffer0)\n linalg.apply_matrix_to_slices(\n args.target_tensor,\n krauss_op,\n [zero_left, one_left],\n out=args.auxiliary_buffer1)\n linalg.apply_matrix_to_slices(\n args.auxiliary_buffer1,\n np.conjugate(krauss_op),\n [zero_right, one_right],\n out=args.target_tensor)\n args.out_buffer += args.target_tensor\n return args.out_buffer", - "docstring": "Use slicing to apply single qubit channel." - }, - { - "code": "def initiate(self, aspect=None, view=None, parent=None, officer=None):\n def got_view(view):\n if view is None:\n return None\n return init(view)\n def init(view):\n self.view = view\n d = self.call_mro(\"init\")\n d.addCallback(retrieve_reference)\n d.addCallback(update_reference)\n return d\n def retrieve_reference(_param):\n if callable(self._model_reference):\n context = self.make_context()\n return self._model_reference(self.source, context)\n return self._model_reference\n def update_reference(reference):\n self.reference = reference\n return self\n if officer is not None:\n self.officer = IOfficer(officer)\n self.aspect = IAspect(aspect) if aspect is not None else None\n if self._model_view is not None:\n if callable(self._model_view):\n context = self.make_context(view=view)\n d = self._model_view(None, context)\n return d.addCallback(got_view)\n return init(self._model_view)\n return init(view)", - "docstring": "Do not keep any reference to its parent,\n this way it can be garbage-collected." - }, - { - "code": "def _make_bz_instance(opt):\n if opt.bztype != 'auto':\n log.info(\"Explicit --bztype is no longer supported, ignoring\")\n cookiefile = None\n tokenfile = None\n use_creds = False\n if opt.cache_credentials:\n cookiefile = opt.cookiefile or -1\n tokenfile = opt.tokenfile or -1\n use_creds = True\n bz = bugzilla.Bugzilla(\n url=opt.bugzilla,\n cookiefile=cookiefile,\n tokenfile=tokenfile,\n sslverify=opt.sslverify,\n use_creds=use_creds,\n cert=opt.cert)\n return bz", - "docstring": "Build the Bugzilla instance we will use" - }, - { - "code": "def _read_opt_ip_dff(self, code, *, desc):\n _type = self._read_opt_type(code)\n _size = self._read_unpack(1)\n if _size != 2:\n raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')\n _verf = self._read_binary(1)\n _seqn = self._read_unpack(2)\n opt = dict(\n desc=desc,\n type=_type,\n length=_size + 2,\n version=_verf[:2],\n flags=dict(\n dup=True if int(_verf[2], base=2) else False,\n ret=True if int(_verf[3], base=2) else False,\n ),\n seq=_seqn,\n )\n return opt", - "docstring": "Read HOPOPT IP_DFF option.\n\n Structure of HOPOPT IP_DFF option [RFC 6971]:\n 1 2 3\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n | Next Header | Hdr Ext Len | OptTypeDFF | OptDataLenDFF |\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n |VER|D|R|0|0|0|0| Sequence Number | Pad1 |\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n Octets Bits Name Description\n 0 0 hopopt.ip_dff.type Option Type\n 0 0 hopopt.ip_dff.type.value Option Number\n 0 0 hopopt.ip_dff.type.action Action (11)\n 0 2 hopopt.ip_dff.type.change Change Flag (1)\n 1 8 hopopt.ip_dff.length Length of Option Data\n 2 16 hopopt.ip_dff.version Version\n 2 18 hopopt.ip_dff.flags Flags\n 2 18 hopopt.ip_dff.flags.dup DUP Flag\n 2 19 hopopt.ip_dff.flags.ret RET Flag\n 2 20 - Reserved\n 3 24 hopopt.ip_dff.seq Sequence Number" - }, - { - "code": "def _put_attachment_data(self, id, filename, data, content_type, include_online=False):\n uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])\n params = {'IncludeOnline': 'true'} if include_online else {}\n headers = {'Content-Type': content_type, 'Content-Length': str(len(data))}\n return uri, params, 'put', data, headers, False", - "docstring": "Upload an attachment to the Xero object." - }, - { - "code": "def create_cluster(resource_root, name, version=None, fullVersion=None):\n if version is None and fullVersion is None:\n raise Exception(\"Either 'version' or 'fullVersion' must be specified\")\n if fullVersion is not None:\n api_version = 6\n version = None\n else:\n api_version = 1\n apicluster = ApiCluster(resource_root, name, version, fullVersion)\n return call(resource_root.post, CLUSTERS_PATH, ApiCluster, True,\n data=[apicluster], api_version=api_version)[0]", - "docstring": "Create a cluster\n @param resource_root: The root Resource object.\n @param name: Cluster name\n @param version: Cluster CDH major version (eg: \"CDH4\")\n - The CDH minor version will be assumed to be the\n latest released version for CDH4, or 5.0 for CDH5.\n @param fullVersion: Cluster's full CDH version. (eg: \"5.1.1\")\n - If specified, 'version' will be ignored.\n - Since: v6\n @return: An ApiCluster object" - }, - { - "code": "def as_object(obj):\n LOGGER.debug('as_object(%s)', obj)\n if isinstance(obj, datetime.date):\n return as_date(obj)\n elif hasattr(obj, '__dict__'):\n out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')}\n for k, v in (\n (p, getattr(obj, p))\n for p, _ in inspect.getmembers(\n obj.__class__,\n lambda x: isinstance(x, property))\n ):\n out[k] = v\n return out", - "docstring": "Return a JSON serializable type for ``o``.\n\n Args:\n obj (:py:class:`object`): the object to be serialized.\n\n Raises:\n :py:class:`AttributeError`:\n when ``o`` is not a Python object.\n\n Returns:\n (dict): JSON serializable type for the given object." - }, - { - "code": "def new(self, name, *args, **kwargs):\n if name in self._instance_map:\n raise ValueError('Instance {0} is already initialized'\n .format(name))\n instance = self._class_map[name](*args, **kwargs)\n self._instance_map[name] = instance\n return instance", - "docstring": "Create an instance.\n\n Args:\n name (str): The name of the class\n args: The arguments to pass to the class.\n kwargs: The keyword arguments to pass to the class.\n\n Returns:\n instance" - }, - { - "code": "def target_heating_level(self):\n try:\n if self.side == 'left':\n level = self.device.device_data['leftTargetHeatingLevel']\n elif self.side == 'right':\n level = self.device.device_data['rightTargetHeatingLevel']\n return level\n except TypeError:\n return None", - "docstring": "Return target heating level." - }, - { - "code": "def do_format(self, format):\n if (format is not None):\n raise IIIFError(code=415, parameter=\"format\",\n text=\"Null manipulator does not support specification of output format.\")\n if (self.outfile is None):\n self.outfile = self.srcfile\n else:\n try:\n shutil.copyfile(self.srcfile, self.outfile)\n except IOError as e:\n raise IIIFError(code=500,\n text=\"Failed to copy file (%s).\" % (str(e)))\n self.mime_type = None", - "docstring": "Null implementation of format selection.\n\n This is the last step, this null implementation does not accept any\n specification of a format because we don't even know what the input\n format is." - }, - { - "code": "def list_tables(self):\n if not self._tables:\n for table_name in os.listdir(self.db_path):\n self._tables[table_name] = self._load_table(table_name)\n return self._tables.keys()", - "docstring": "Load existing tables and their descriptions.\n\n :return:" - }, - { - "code": "def config_args(self):\n self.arg_parser.add_argument('--version', action='version',\n version='%(prog)s ' + str(__version__))\n self.arg_parser.add_argument('--verbose',\n action='store_true', dest = 'verbosemode',\n help=_('set verbose terminal output'))\n self.arg_parser.add_argument('-s',\n action='store_true', dest = 'silentmode',\n help=_('silence terminal output'))\n self.arg_parser.add_argument('--list-parsers',\n action='store_true', dest='list_parsers',\n help=_('return a list of available parsers'))\n self.arg_parser.add_argument('-p',\n action='store', dest='parser', default='syslog',\n help=_('select a parser (default: syslog)'))\n self.arg_parser.add_argument('-z', '--unzip',\n action='store_true', dest='unzip',\n help=_('include files compressed with gzip'))\n self.arg_parser.add_argument('-t',\n action='store', dest='tzone',\n help=_('specify timezone offset to UTC (e.g. \\'+0500\\')'))\n self.arg_parser.add_argument('files',\n metavar='file', nargs='*',\n help=_('specify input files'))\n self.arg_parser.add_argument_group(self.filter_args)\n self.arg_parser.add_argument_group(self.output_args)\n self.args = self.arg_parser.parse_args()", - "docstring": "Set config options" - }, - { - "code": "def type(self):\n \"The type of elements stored in the mapping.\"\n if self._type is None and len(self):\n self._type = self.values()[0].__class__\n return self._type", - "docstring": "The type of elements stored in the mapping." - }, - { - "code": "def ancestor(self, value):\n if not isinstance(value, Key):\n raise TypeError(\"Ancestor must be a Key\")\n self._ancestor = value", - "docstring": "Set the ancestor for the query\n\n :type value: :class:`~google.cloud.datastore.key.Key`\n :param value: the new ancestor key" - }, - { - "code": "def deploy_chef(ask=\"yes\", version=\"11\"):\n env.host_string = lib.get_env_host_string()\n if ask == \"no\" or littlechef.noninteractive:\n print(\"Deploying Chef using omnibus installer version: ...\".format(version))\n else:\n message = ('\\nAre you sure you want to install Chef version:'\n '{0} on node {1}?'.format(version, env.host_string))\n if not confirm(message):\n abort('Aborted by user')\n lib.print_header(\"Configuring Chef Solo on {0}\".format(env.host_string))\n if not __testing__:\n solo.install(version)\n solo.configure()\n with settings(hide('stdout'), warn_only=True):\n output = sudo('ohai -l warn')\n if output.succeeded:\n try:\n ohai = json.loads(output)\n except ValueError:\n abort(\"Could not parse ohai's output\"\n \":\\n {0}\".format(output))\n node = {\"run_list\": []}\n for attribute in [\"ipaddress\", \"platform\", \"platform_family\",\n \"platform_version\"]:\n if ohai.get(attribute):\n node[attribute] = ohai[attribute]\n chef.save_config(node)", - "docstring": "Install chef-solo on a node" - }, - { - "code": "def get_holiday_label(self, day):\n day = cleaned_date(day)\n return {day: label for day, label in self.holidays(day.year)\n }.get(day)", - "docstring": "Return the label of the holiday, if the date is a holiday" - }, - { - "code": "def dist(self, x1, x2):\n dnorms = np.fromiter(\n ((x1i - x2i).norm() for x1i, x2i in zip(x1, x2)),\n dtype=np.float64, count=len(x1))\n if self.exponent == float('inf'):\n return self.const * np.linalg.norm(dnorms, ord=self.exponent)\n else:\n return (self.const ** (1 / self.exponent) *\n np.linalg.norm(dnorms, ord=self.exponent))", - "docstring": "Calculate the constant-weighted distance between two elements.\n\n Parameters\n ----------\n x1, x2 : `ProductSpaceElement`\n Elements whose mutual distance is calculated.\n\n Returns\n -------\n dist : float\n The distance between the elements." - }, - { - "code": "def makeaddress(self, label):\n addr = address.new(label)\n if not addr.repo:\n addr.repo = self.address.repo\n if not addr.path:\n addr.path = self.address.path\n return addr", - "docstring": "Turn a label into an Address with current context.\n\n Adds repo and path if given a label that only has a :target part." - }, - { - "code": "def complete(\n response: Response,\n project: typing.Union[Project, None],\n starting: ProjectStep = None,\n force: bool = False,\n limit: int = -1\n) -> list:\n if project is None:\n project = cauldron.project.get_internal_project()\n starting_index = 0\n if starting:\n starting_index = project.steps.index(starting)\n count = 0\n steps_run = []\n for ps in project.steps:\n if 0 < limit <= count:\n break\n if ps.index < starting_index:\n continue\n if not force and not ps.is_dirty():\n if limit < 1:\n environ.log(\n '[{}]: Nothing to update'.format(ps.definition.name)\n )\n continue\n count += 1\n steps_run.append(ps)\n success = source.run_step(response, project, ps, force=True)\n if not success or project.stop_condition.halt:\n return steps_run\n return steps_run", - "docstring": "Runs the entire project, writes the results files, and returns the URL to\n the report file\n\n :param response:\n :param project:\n :param starting:\n :param force:\n :param limit:\n :return:\n Local URL to the report path" - }, - { - "code": "def overlaps(self,junc,tolerance=0):\n if not self.left.overlaps(junc.left,padding=tolerance): return False\n if not self.right.overlaps(junc.right,padding=tolerance): return False\n return True", - "docstring": "see if junction overlaps with tolerance" - }, - { - "code": "def get_current_cmus():\n result = subprocess.run('cmus-remote -Q'.split(' '), check=True,\n stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n info = {}\n for line in result.stdout.decode().split('\\n'):\n line = line.split(' ')\n if line[0] != 'tag':\n continue\n key = line[1]\n if key in ['album', 'title', 'artist', 'albumartist'] and\\\n key not in info:\n info[key] = ' '.join(line[2:])\n if 'albumartist' in info:\n info['artist'] = info['albumartist']\n del info['albumartist']\n return Song(**info)", - "docstring": "Get the current song from cmus." - }, - { - "code": "def visit_Call(self, node, **kwargs):\n lineNum = node.lineno - 1\n match = AstWalker.__implementsRE.match(self.lines[lineNum])\n if match:\n self.lines[lineNum] = '{0}\n match.group(1), match.group(2), linesep,\n self.lines[lineNum].rstrip())\n if self.options.debug:\n stderr.write(\"\n linesep))\n self.generic_visit(node, containingNodes=kwargs['containingNodes'])", - "docstring": "Handles function calls within code.\n\n Function calls in Python are used to represent interface implementations\n in addition to their normal use. If a call appears to mark an\n implementation, it gets labeled as such for Doxygen." - }, - { - "code": "def get_current_repo():\n remote_url = subprocess.check_output(['git', 'config', '--get',\n 'remote.origin.url']).decode('utf-8')\n _, org, git_repo = remote_url.rsplit('.git', 1)[0].rsplit('/', 2)\n return (org + '/' + git_repo)", - "docstring": "Get the GitHub repo name for the current directory.\n\n Assumes that the repo is in the ``origin`` remote." - }, - { - "code": "def interpolate_to_isosurface(level_var, interp_var, level, **kwargs):\n r\n bottom_up_search = kwargs.pop('bottom_up_search', True)\n above, below, good = metpy.calc.find_bounding_indices(level_var, [level], axis=0,\n from_below=bottom_up_search)\n interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above]))\n * (interp_var[below] - interp_var[above])) + interp_var[above]\n interp_level[~good] = np.nan\n minvar = (np.min(level_var, axis=0) >= level)\n maxvar = (np.max(level_var, axis=0) <= level)\n interp_level[0][minvar] = interp_var[-1][minvar]\n interp_level[0][maxvar] = interp_var[0][maxvar]\n return interp_level.squeeze()", - "docstring": "r\"\"\"Linear interpolation of a variable to a given vertical level from given values.\n\n This function assumes that highest vertical level (lowest pressure) is zeroth index.\n A classic use of this function would be to compute the potential temperature on the\n dynamic tropopause (2 PVU surface).\n\n Parameters\n ----------\n level_var: array_like (P, M, N)\n Level values in 3D grid on common vertical coordinate (e.g., PV values on\n isobaric levels). Assumes height dimension is highest to lowest in atmosphere.\n interp_var: array_like (P, M, N)\n Variable on 3D grid with same vertical coordinate as level_var to interpolate to\n given level (e.g., potential temperature on isobaric levels)\n level: int or float\n Desired interpolated level (e.g., 2 PVU surface)\n\n Other Parameters\n ----------------\n bottom_up_search : bool, optional\n Controls whether to search for levels bottom-up, or top-down. Defaults to\n True, which is bottom-up search.\n\n Returns\n -------\n interp_level: (M, N) ndarray\n The interpolated variable (e.g., potential temperature) on the desired level (e.g.,\n 2 PVU surface)\n\n Notes\n -----\n This function implements a linear interpolation to estimate values on a given surface.\n The prototypical example is interpolation of potential temperature to the dynamic\n tropopause (e.g., 2 PVU surface)" - }, - { - "code": "def change(self, symbol, typ, **kwargs):\n self.print_change(symbol, typ, **kwargs)\n if not self.dry_run:\n try:\n yield\n except:\n raise\n else:\n self.amazon.changes = True", - "docstring": "Print out a change and then do the change if not doing a dry run" - }, - { - "code": "def measure(self, geometry):\n message = 'Size with NaN value : geometry valid={valid}, WKT={wkt}'\n feature_size = 0\n if geometry.isMultipart():\n for single in geometry.asGeometryCollection():\n if self.geometry_type == QgsWkbTypes.LineGeometry:\n geometry_size = self.calculator.measureLength(single)\n else:\n geometry_size = self.calculator.measureArea(single)\n if not isnan(geometry_size):\n feature_size += geometry_size\n else:\n LOGGER.debug(message.format(\n valid=single.isGeosValid(),\n wkt=single.asWkt()))\n else:\n if self.geometry_type == QgsWkbTypes.LineGeometry:\n geometry_size = self.calculator.measureLength(geometry)\n else:\n geometry_size = self.calculator.measureArea(geometry)\n if not isnan(geometry_size):\n feature_size = geometry_size\n else:\n LOGGER.debug(message.format(\n valid=geometry.isGeosValid(),\n wkt=geometry.asWkt()))\n feature_size = round(feature_size)\n if self.output_unit:\n if self.output_unit != self.default_unit:\n feature_size = convert_unit(\n feature_size, self.default_unit, self.output_unit)\n return feature_size", - "docstring": "Measure the length or the area of a geometry.\n\n :param geometry: The geometry.\n :type geometry: QgsGeometry\n\n :return: The geometric size in the expected exposure unit.\n :rtype: float" - }, - { - "code": "def subscribe(self, feedUrl):\n response = self.httpPost(\n ReaderUrl.SUBSCRIPTION_EDIT_URL,\n {'ac':'subscribe', 's': feedUrl})\n if response and 'OK' in response:\n return True\n else:\n return False", - "docstring": "Adds a feed to the top-level subscription list\n\n Ubscribing seems idempotent, you can subscribe multiple times\n without error\n\n returns True or throws HTTPError" - }, - { - "code": "def true_false_returns(func):\n @functools.wraps(func)\n def _execute(*args, **kwargs):\n try:\n func(*args, **kwargs)\n return True\n except:\n return False\n return _execute", - "docstring": "Executes function, if error returns False, else True\n\n :param func: function to call\n :return: True iff ok, else False" - }, - { - "code": "def to_madeline(self):\n self.logger.debug(\"Returning madeline info\")\n if self.sex == 1:\n madeline_gender = 'M'\n elif self.sex == 2:\n madeline_gender = 'F'\n else:\n madeline_gender = '.'\n if self.father == '0':\n madeline_father = '.'\n else:\n madeline_father = self.father\n if self.mother == '0':\n madeline_mother = '.'\n else:\n madeline_mother = self.mother\n if self.phenotype == 1:\n madeline_phenotype = 'U'\n elif self.phenotype == 2:\n madeline_phenotype = 'A'\n else:\n madeline_phenotype = '.'\n return \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\".format(\n self.family, self.individual_id, madeline_gender, \n madeline_father, madeline_mother, madeline_phenotype,\n self.proband, self.consultand, self.alive\n )", - "docstring": "Return the individual info in a madeline formated string" - }, - { - "code": "def expanduser(path):\n if hdfs_fs.default_is_local():\n return os.path.expanduser(path)\n m = re.match(r'^~([^/]*)', path)\n if m is None:\n return path\n user = m.groups()[0] or common.DEFAULT_USER\n return '/user/%s%s' % (user, path[m.end(1):])", - "docstring": "Replace initial ``~`` or ``~user`` with the user's home directory.\n\n **NOTE:** if the default file system is HDFS, the ``~user`` form is\n expanded regardless of the user's existence." - }, - { - "code": "def ncores_reserved(self):\n return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)", - "docstring": "Returns the number of cores reserved in this moment.\n A core is reserved if it's still not running but\n we have submitted the task to the queue manager." - }, - { - "code": "def _master(self):\n logger.info(\n 'Master at rank %d starts to allocate tasks',\n MPI.COMM_WORLD.Get_rank()\n )\n results = []\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels \\\n else self.num_voxels\n current_task = (0, sending_voxels)\n status = MPI.Status()\n using_size = size\n for i in range(0, size):\n if i == self.master_rank:\n continue\n if current_task[1] == 0:\n using_size = i\n break\n logger.debug(\n 'master starts to send a task to worker %d' %\n i\n )\n comm.send(current_task,\n dest=i,\n tag=self._WORKTAG)\n next_start = current_task[0] + current_task[1]\n sending_voxels = self.voxel_unit \\\n if self.voxel_unit < self.num_voxels - next_start \\\n else self.num_voxels - next_start\n current_task = (next_start, sending_voxels)\n while using_size == size:\n if current_task[1] == 0:\n break\n result = comm.recv(source=MPI.ANY_SOURCE,\n tag=MPI.ANY_TAG,\n status=status)\n results += result\n comm.send(current_task,\n dest=status.Get_source(),\n tag=self._WORKTAG)\n next_start = current_task[0] + current_task[1]\n sending_voxels = self.voxel_unit \\\n if self.voxel_unit < self.num_voxels - next_start \\\n else self.num_voxels - next_start\n current_task = (next_start, sending_voxels)\n for i in range(0, using_size):\n if i == self.master_rank:\n continue\n result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)\n results += result\n for i in range(0, size):\n if i == self.master_rank:\n continue\n comm.send(None,\n dest=i,\n tag=self._TERMINATETAG)\n return results", - "docstring": "Master node's operation.\n\n Assigning tasks to workers and collecting results from them\n\n Parameters\n ----------\n None\n\n Returns\n -------\n results: list of tuple (voxel_id, accuracy)\n the accuracy numbers of all voxels, in accuracy descending order\n the length of array equals the number of voxels" - }, - { - "code": "def create_subfield_layer(aspect, ip):\n layer = []\n if 'PANTS' in aspect:\n layer = pgnreader.parse_pagan_file(FILE_SUBFIELD, ip, invert=False, sym=True)\n else:\n layer = pgnreader.parse_pagan_file(FILE_MIN_SUBFIELD, ip, invert=False, sym=True)\n return layer", - "docstring": "Reads the SUBFIELD.pgn file and creates\n the subfield layer." - }, - { - "code": "def set_scale(self):\n f_min = float(self.lower_freq)\n f_max = float(self.higher_freq)\n y_min = f_min\n y_max = f_max\n for y in range(self.image_height):\n freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min)\n fft_bin = freq / f_max * (self.fft_size / 2 + 1)\n if fft_bin < self.fft_size / 2:\n alpha = fft_bin - int(fft_bin)\n self.y_to_bin.append((int(fft_bin), alpha * 255))", - "docstring": "generate the lookup which translates y-coordinate to fft-bin" - }, - { - "code": "def get_example_features(example):\n return (example.features.feature if isinstance(example, tf.train.Example)\n else example.context.feature)", - "docstring": "Returns the non-sequence features from the provided example." - }, - { - "code": "def get_fullpath(self, withext=True):\n p = self.get_path(self._obj)\n n = self.get_name(self._obj, withext)\n fp = os.path.join(p,n)\n return os.path.normpath(fp)", - "docstring": "Return the filepath with the filename\n\n :param withext: If True, return with the fileextension.\n :type withext: bool\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def set_maxdays(name, maxdays):\n pre_info = info(name)\n if maxdays == pre_info['max']:\n return True\n cmd = 'passwd -x {0} {1}'.format(maxdays, name)\n __salt__['cmd.run'](cmd, python_shell=False)\n post_info = info(name)\n if post_info['max'] != pre_info['max']:\n return post_info['max'] == maxdays", - "docstring": "Set the maximum number of days during which a password is valid. See man\n passwd.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' shadow.set_maxdays username 90" - }, - { - "code": "def handle(self, *args, **options):\n try:\n admin = User.objects.get(username='admin')\n except User.DoesNotExist:\n admin = User(\n username='admin',\n first_name='admin',\n last_name='admin',\n email='admin@localhost.localdomain',\n is_staff=True,\n is_active=True,\n is_superuser=True,\n )\n admin.set_password('admin')\n admin.save()", - "docstring": "Load a default admin user" - }, - { - "code": "def get_film_id(self, title, three_dimensional=False):\n films = self.film_search(title)\n for film in films:\n if (film['title'].find('3D') is - 1) is not three_dimensional:\n return film['edi']\n return -1", - "docstring": "get the film id using the title in conjunction with the searching function" - }, - { - "code": "def resample_multichan(xs, ann, fs, fs_target, resamp_ann_chan=0):\n assert resamp_ann_chan < xs.shape[1]\n lx = []\n lt = None\n for chan in range(xs.shape[1]):\n resampled_x, resampled_t = resample_sig(xs[:, chan], fs, fs_target)\n lx.append(resampled_x)\n if chan == resamp_ann_chan:\n lt = resampled_t\n new_sample = resample_ann(lt, ann.sample)\n assert ann.sample.shape == new_sample.shape\n resampled_ann = Annotation(record_name=ann.record_name,\n extension=ann.extension,\n sample=new_sample,\n symbol=ann.symbol,\n subtype=ann.subtype,\n chan=ann.chan,\n num=ann.num,\n aux_note=ann.aux_note,\n fs=fs_target)\n return np.column_stack(lx), resampled_ann", - "docstring": "Resample multiple channels with their annotations\n\n Parameters\n ----------\n xs: numpy array\n The signal array\n ann : wfdb Annotation\n The wfdb annotation object\n fs : int, or float\n The original frequency\n fs_target : int, or float\n The target frequency\n resample_ann_channel : int, optional\n The signal channel used to compute new annotation indices\n\n Returns\n -------\n resampled_xs : numpy array\n Array of the resampled signal values\n resampled_ann : wfdb Annotation\n Annotation containing resampled annotation locations" - }, - { - "code": "def tag(**tags):\n transaction = execution_context.get_transaction()\n if not transaction:\n error_logger.warning(\"Ignored tags %s. No transaction currently active.\", \", \".join(tags.keys()))\n else:\n transaction.tag(**tags)", - "docstring": "Tags current transaction. Both key and value of the tag should be strings." - }, - { - "code": "def apply(self, f, axes=None, filter=None, do_integrity_check=False,\n consolidate=True, **kwargs):\n result_blocks = []\n if filter is not None:\n filter_locs = set(self.items.get_indexer_for(filter))\n if len(filter_locs) == len(self.items):\n filter = None\n else:\n kwargs['filter'] = filter_locs\n if consolidate:\n self._consolidate_inplace()\n if f == 'where':\n align_copy = True\n if kwargs.get('align', True):\n align_keys = ['other', 'cond']\n else:\n align_keys = ['cond']\n elif f == 'putmask':\n align_copy = False\n if kwargs.get('align', True):\n align_keys = ['new', 'mask']\n else:\n align_keys = ['mask']\n elif f == 'fillna':\n align_copy = False\n align_keys = ['value']\n else:\n align_keys = []\n aligned_args = {k: kwargs[k]\n for k in align_keys\n if hasattr(kwargs[k], 'values') and\n not isinstance(kwargs[k], ABCExtensionArray)}\n for b in self.blocks:\n if filter is not None:\n if not b.mgr_locs.isin(filter_locs).any():\n result_blocks.append(b)\n continue\n if aligned_args:\n b_items = self.items[b.mgr_locs.indexer]\n for k, obj in aligned_args.items():\n axis = getattr(obj, '_info_axis_number', 0)\n kwargs[k] = obj.reindex(b_items, axis=axis,\n copy=align_copy)\n applied = getattr(b, f)(**kwargs)\n result_blocks = _extend_blocks(applied, result_blocks)\n if len(result_blocks) == 0:\n return self.make_empty(axes or self.axes)\n bm = self.__class__(result_blocks, axes or self.axes,\n do_integrity_check=do_integrity_check)\n bm._consolidate_inplace()\n return bm", - "docstring": "iterate over the blocks, collect and create a new block manager\n\n Parameters\n ----------\n f : the callable or function name to operate on at the block level\n axes : optional (if not supplied, use self.axes)\n filter : list, if supplied, only call the block if the filter is in\n the block\n do_integrity_check : boolean, default False. Do the block manager\n integrity check\n consolidate: boolean, default True. Join together blocks having same\n dtype\n\n Returns\n -------\n Block Manager (new object)" - }, - { - "code": "def Add(self, file_desc_proto):\n proto_name = file_desc_proto.name\n if proto_name not in self._file_desc_protos_by_file:\n self._file_desc_protos_by_file[proto_name] = file_desc_proto\n elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:\n raise DescriptorDatabaseConflictingDefinitionError(\n '%s already added, but with different descriptor.' % proto_name)\n package = file_desc_proto.package\n for message in file_desc_proto.message_type:\n self._file_desc_protos_by_symbol.update(\n (name, file_desc_proto) for name in _ExtractSymbols(message, package))\n for enum in file_desc_proto.enum_type:\n self._file_desc_protos_by_symbol[\n '.'.join((package, enum.name))] = file_desc_proto\n for extension in file_desc_proto.extension:\n self._file_desc_protos_by_symbol[\n '.'.join((package, extension.name))] = file_desc_proto\n for service in file_desc_proto.service:\n self._file_desc_protos_by_symbol[\n '.'.join((package, service.name))] = file_desc_proto", - "docstring": "Adds the FileDescriptorProto and its types to this database.\n\n Args:\n file_desc_proto: The FileDescriptorProto to add.\n Raises:\n DescriptorDatabaseConflictingDefinitionError: if an attempt is made to\n add a proto with the same name but different definition than an\n exisiting proto in the database." - }, - { - "code": "def fetch(self):\n params = values.of({})\n payload = self._version.fetch(\n 'GET',\n self._uri,\n params=params,\n )\n return AssignedAddOnInstance(\n self._version,\n payload,\n account_sid=self._solution['account_sid'],\n resource_sid=self._solution['resource_sid'],\n sid=self._solution['sid'],\n )", - "docstring": "Fetch a AssignedAddOnInstance\n\n :returns: Fetched AssignedAddOnInstance\n :rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.AssignedAddOnInstance" - }, - { - "code": "def convert_sample_to_video_time(sample, orig_s_freq, sampleStamp,\n sampleTime):\n if sample < sampleStamp[0]:\n s_freq = orig_s_freq\n id0 = 0\n elif sample > sampleStamp[-1]:\n s_freq = orig_s_freq\n id0 = len(sampleStamp) - 1\n else:\n id0 = where(asarray(sampleStamp) <= sample)[0][-1]\n id1 = where(asarray(sampleStamp) >= sample)[0][0]\n if id0 == id1:\n return sampleTime[id0]\n s_freq = ((sampleStamp[id1] - sampleStamp[id0]) /\n (sampleTime[id1] - sampleTime[id0]).total_seconds())\n time_diff = timedelta(seconds=(sample - sampleStamp[id0]) / s_freq)\n return sampleTime[id0] + time_diff", - "docstring": "Convert sample number to video time, using snc information.\n\n Parameters\n ----------\n sample : int\n sample that you want to convert in time\n orig_s_freq : int\n sampling frequency (used as backup)\n sampleStamp : list of int\n Sample number from start of study\n sampleTime : list of datetime.datetime\n File time representation of sampleStamp\n\n Returns\n -------\n instance of datetime\n absolute time of the sample.\n\n Notes\n -----\n Note that there is a discrepancy of 4 or 5 hours between the time in\n snc and the time in the header. I'm pretty sure that the time in the\n header is accurate, so we use that. I think that the time in snc does\n not take into account the time zone (that'd explain the 4 or 5\n depending on summertime). This time is only used to get the right video\n so we call this \"video time\"." - }, - { - "code": "def _ModifyInterface(\n self, interface_config, config_key, config_value, replace=False):\n config_entry = '%s=%s' % (config_key, config_value)\n if not open(interface_config).read().count(config_key):\n with open(interface_config, 'a') as config:\n config.write('%s\\n' % config_entry)\n elif replace:\n for line in fileinput.input(interface_config, inplace=True):\n print(re.sub(r'%s=.*' % config_key, config_entry, line.rstrip()))", - "docstring": "Write a value to a config file if not already present.\n\n Args:\n interface_config: string, the path to a config file.\n config_key: string, the configuration key to set.\n config_value: string, the value to set for the configuration key.\n replace: bool, replace the configuration option if already present." - }, - { - "code": "def piece_wise_linear(scale, points):\n assert len(points) >= 2\n assert points[0][0] == 0\n assert points[-1][0] == 1\n assert all(i < j for i, j in zip(points[:-1], points[1:]))\n out = numpy.zeros((scale, 3))\n p1, c1 = points[0]\n p2, c2 = points[1]\n next_pt = 2\n for i in range(1, scale):\n v = i / scale\n if v > p2:\n p1, c1 = p2, c2\n p2, c2 = points[next_pt]\n next_pt += 1\n frac = (v - p1) / (p2 - p1)\n out[i, :] = c1 * (1 - frac) + c2 * frac\n return out", - "docstring": "Create a palette that is piece-wise linear given some colors at points." - }, - { - "code": "def train(self, nerdocs, mode_filename):\n trainer = pycrfsuite.Trainer(algorithm=self.algorithm,\n params={'c2': self.c2},\n verbose=self.verbose)\n for doc in nerdocs:\n for snt in doc.sentences:\n xseq = [t.feature_list() for t in snt]\n yseq = [t.label for t in snt]\n trainer.append(xseq, yseq)\n trainer.train(mode_filename)", - "docstring": "Train a CRF model using given documents.\n\n Parameters\n ----------\n nerdocs: list of estnltk.estner.ner.Document.\n The documents for model training.\n mode_filename: str\n The fielname where to save the model." - }, - { - "code": "def __parse(self) -> object:\n char = self.data[self.idx: self.idx + 1]\n if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']:\n str_len = int(self.__read_to(b':'))\n return self.__read(str_len)\n elif char == b'i':\n self.idx += 1\n return int(self.__read_to(b'e'))\n elif char == b'd':\n return self.__parse_dict()\n elif char == b'l':\n return self.__parse_list()\n elif char == b'':\n raise bencodepy.DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx)))\n else:\n raise bencodepy.DecodingError(\n 'Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))", - "docstring": "Selects the appropriate method to decode next bencode element and returns the result." - }, - { - "code": "def grab_project_data(prj):\n if not prj:\n return {}\n data = {}\n for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS:\n try:\n data[section] = getattr(prj, section)\n except AttributeError:\n _LOGGER.debug(\"Project lacks section '%s', skipping\", section)\n return data", - "docstring": "From the given Project, grab Sample-independent data.\n\n There are some aspects of a Project of which it's beneficial for a Sample\n to be aware, particularly for post-hoc analysis. Since Sample objects\n within a Project are mutually independent, though, each doesn't need to\n know about any of the others. A Project manages its, Sample instances,\n so for each Sample knowledge of Project data is limited. This method\n facilitates adoption of that conceptual model.\n\n :param Project prj: Project from which to grab data\n :return Mapping: Sample-independent data sections from given Project" - }, - { - "code": "def set_ports(self, port0 = 0x00, port1 = 0x00):\n 'Writes specified value to the pins defined as output by method. Writing to input pins has no effect.'\n self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port0)\n self.bus.write_byte_data(self.address, self.CONTROL_PORT0, port1)\n return", - "docstring": "Writes specified value to the pins defined as output by method. Writing to input pins has no effect." - }, - { - "code": "def get_encoder_from_vocab(vocab_filepath):\n if not tf.gfile.Exists(vocab_filepath):\n raise ValueError(\"Vocab file does not exist: {}.\".format(vocab_filepath))\n tf.logging.info(\"Found vocab file: %s\", vocab_filepath)\n encoder = text_encoder.SubwordTextEncoder(vocab_filepath)\n return encoder", - "docstring": "Get encoder from vocab file.\n\n If vocab is not found in output dir, it will be copied there by\n copy_vocab_to_output_dir to clarify the vocab used to generate the data.\n\n Args:\n vocab_filepath: path to vocab, either local or cns\n\n Returns:\n A SubwordTextEncoder vocabulary object. None if the output_parallel_text\n is set." - }, - { - "code": "def put(self):\n try:\n self.cloudwatch.put_metric_data(\n Namespace=self.namespace,\n MetricData=[{\n 'MetricName': self.name,\n 'Value': self.value,\n 'Timestamp': self.timestamp\n }]\n )\n except Exception:\n logging.exception(\"Error pushing {0} to CloudWatch.\".format(str(self)))", - "docstring": "Push the info represented by this ``Metric`` to CloudWatch." - }, - { - "code": "def enable_disable(self):\n if self.enabled:\n self.data['enabled'] = False\n else:\n self.data['enabled'] = True\n self.update()", - "docstring": "Enable or disable this endpoint. If enabled, it will be disabled\n and vice versa.\n\n :return: None" - }, - { - "code": "def _and_join(self, terms):\n if len(terms) > 1:\n return ' AND '.join([self._or_join(t) for t in terms])\n else:\n return self._or_join(terms[0])", - "docstring": "Joins terms using AND operator.\n\n Args:\n terms (list): terms to join\n\n Examples:\n self._and_join(['term1']) -> 'term1'\n self._and_join(['term1', 'term2']) -> 'term1 AND term2'\n self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3'\n\n Returns:\n str" - }, - { - "code": "def get_processing_block_ids(self):\n _processing_block_ids = []\n pattern = '*:processing_block:*'\n block_ids = self._db.get_ids(pattern)\n for block_id in block_ids:\n id_split = block_id.split(':')[-1]\n _processing_block_ids.append(id_split)\n return sorted(_processing_block_ids)", - "docstring": "Get list of processing block ids using the processing block id" - }, - { - "code": "def intersection(self, other, ignore_conflicts=False):\n result = self.copy()\n result.intersection_update(other, ignore_conflicts)\n return result", - "docstring": "Return a new definition from the intersection of the definitions." - }, - { - "code": "def lst_avg(lst):\n salt.utils.versions.warn_until(\n 'Neon',\n 'This results of this function are currently being rounded.'\n 'Beginning in the Salt Neon release, results will no longer be '\n 'rounded and this warning will be removed.',\n stacklevel=3\n )\n if not isinstance(lst, collections.Hashable):\n return float(sum(lst)/len(lst))\n return float(lst)", - "docstring": "Returns the average value of a list.\n\n .. code-block:: jinja\n\n {% my_list = [1,2,3,4] -%}\n {{ set my_list | avg }}\n\n will be rendered as:\n\n .. code-block:: yaml\n\n 2.5" - }, - { - "code": "def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:\n kwargs.update({\"user\": user})\n return self.api_call(\"users.getPresence\", http_verb=\"GET\", params=kwargs)", - "docstring": "Gets user presence information.\n\n Args:\n user (str): User to get presence info on. Defaults to the authed user.\n e.g. 'W1234567890'" - }, - { - "code": "def continuous_eval_on_train_data(self):\n for ckpt_path in next_checkpoint(self._hparams.model_dir,\n self._hparams.eval_timeout_mins):\n train_step = decoding.get_step_from_ckpt_path(ckpt_path)\n if train_step == 0:\n tf.logging.info(\"Skipping evaluation at step 0\")\n continue\n self.evaluate_on_train_data()", - "docstring": "Evaluate on train data until checkpoints stop being produced." - }, - { - "code": "def set_default_layouts(self, ignored_layouts=None):\n for key in self.__default_layouts_settings.allKeys():\n if ignored_layouts:\n if tuple((layout for layout in ignored_layouts if layout in key)):\n continue\n self.__settings.setValue(key, self.__default_layouts_settings.value(key))\n return True", - "docstring": "Sets the default layouts in the preferences file.\n\n :param ignored_layouts: Ignored layouts.\n :type ignored_layouts: tuple or list\n :return: Method success.\n :rtype: bool" - }, - { - "code": "def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,\n random_state=None):\n rng = check_random_state(random_state)\n n_dim = len(mean)\n rand = rng.randn(n_dim, n_samples)\n if n_samples == 1:\n rand.shape = (n_dim,)\n if covariance_type == 'spherical':\n rand *= np.sqrt(covar)\n elif covariance_type == 'diag':\n rand = np.dot(np.diag(np.sqrt(covar)), rand)\n else:\n s, U = linalg.eigh(covar)\n s.clip(0, out=s)\n np.sqrt(s, out=s)\n U *= s\n rand = np.dot(U, rand)\n return (rand.T + mean).T", - "docstring": "Generate random samples from a Gaussian distribution.\n\n Parameters\n ----------\n mean : array_like, shape (n_features,)\n Mean of the distribution.\n\n covar : array_like, optional\n Covariance of the distribution. The shape depends on `covariance_type`:\n scalar if 'spherical',\n (n_features) if 'diag',\n (n_features, n_features) if 'tied', or 'full'\n\n covariance_type : string, optional\n Type of the covariance parameters. Must be one of\n 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.\n\n n_samples : int, optional\n Number of samples to generate. Defaults to 1.\n\n Returns\n -------\n X : array, shape (n_features, n_samples)\n Randomly generated sample" - }, - { - "code": "def to_bigquery_field(self, name_case=DdlParseBase.NAME_CASE.original):\n col_name = self.get_name(name_case)\n mode = self.bigquery_mode\n if self.array_dimensional <= 1:\n type = self.bigquery_legacy_data_type\n else:\n type = \"RECORD\"\n fields = OrderedDict()\n fields_cur = fields\n for i in range(1, self.array_dimensional):\n is_last = True if i == self.array_dimensional - 1 else False\n fields_cur['fields'] = [OrderedDict()]\n fields_cur = fields_cur['fields'][0]\n fields_cur['name'] = \"dimension_{}\".format(i)\n fields_cur['type'] = self.bigquery_legacy_data_type if is_last else \"RECORD\"\n fields_cur['mode'] = self.bigquery_mode if is_last else \"REPEATED\"\n col = OrderedDict()\n col['name'] = col_name\n col['type'] = type\n col['mode'] = mode\n if self.array_dimensional > 1:\n col['fields'] = fields['fields']\n return json.dumps(col)", - "docstring": "Generate BigQuery JSON field define" - }, - { - "code": "def _bulk_size_generator(num_records, bulk_size, active):\n while active and num_records > 0:\n req_size = min(num_records, bulk_size)\n num_records -= req_size\n yield req_size", - "docstring": "Generate bulk_size until num_records is reached or active becomes false\n\n >>> gen = _bulk_size_generator(155, 50, [True])\n >>> list(gen)\n [50, 50, 50, 5]" - }, - { - "code": "def validate_image(image, number_tiles):\n TILE_LIMIT = 99 * 99\n try:\n number_tiles = int(number_tiles)\n except:\n raise ValueError('number_tiles could not be cast to integer.')\n if number_tiles > TILE_LIMIT or number_tiles < 2:\n raise ValueError('Number of tiles must be between 2 and {} (you \\\n asked for {}).'.format(TILE_LIMIT, number_tiles))", - "docstring": "Basic sanity checks prior to performing a split." - }, - { - "code": "def cross(triangles):\n vectors = np.diff(triangles, axis=1)\n crosses = np.cross(vectors[:, 0], vectors[:, 1])\n return crosses", - "docstring": "Returns the cross product of two edges from input triangles\n\n Parameters\n --------------\n triangles: (n, 3, 3) float\n Vertices of triangles\n\n Returns\n --------------\n crosses : (n, 3) float\n Cross product of two edge vectors" - }, - { - "code": "def encrypt(self, data, pad=True):\n encrypted_data = b\"\"\n for i in range(0, len(data), 8):\n block = data[i:i + 8]\n block_length = len(block)\n if block_length != 8 and pad:\n block += b\"\\x00\" * (8 - block_length)\n elif block_length != 8:\n raise ValueError(\"DES encryption must be a multiple of 8 \"\n \"bytes\")\n encrypted_data += self._encode_block(block)\n return encrypted_data", - "docstring": "DES encrypts the data based on the key it was initialised with.\n\n :param data: The bytes string to encrypt\n :param pad: Whether to right pad data with \\x00 to a multiple of 8\n :return: The encrypted bytes string" - }, - { - "code": "def handle(self, *args, **options):\n if options[\"sub_command\"] == \"add\":\n self.handle_add(options)\n elif options[\"sub_command\"] == \"update\":\n self.handle_update(options)\n elif options[\"sub_command\"] == \"details\":\n self.handle_details(options[\"username\"])\n elif options[\"sub_command\"] == \"list\":\n self.handle_list(options[\"all\"], options[\"csv\"])", - "docstring": "Forward to the right sub-handler" - }, - { - "code": "def _restore_case(s, memory):\n cased_s = []\n for i, c in enumerate(s):\n if i + 1 > len(memory):\n break\n cased_s.append(c if memory[i] else c.upper())\n return ''.join(cased_s)", - "docstring": "Restore a lowercase string's characters to their original case." - }, - { - "code": "def FilterMessages(\n self,\n Channel,\n FromID,\n ToID,\n Mode):\n try:\n res = self.__m_dllBasic.CAN_FilterMessages(Channel,FromID,ToID,Mode)\n return TPCANStatus(res)\n except:\n logger.error(\"Exception on PCANBasic.FilterMessages\")\n raise", - "docstring": "Configures the reception filter\n\n Remarks:\n The message filter will be expanded with every call to this function.\n If it is desired to reset the filter, please use the 'SetValue' function.\n\n Parameters:\n Channel : A TPCANHandle representing a PCAN Channel\n FromID : A c_uint value with the lowest CAN ID to be received\n ToID : A c_uint value with the highest CAN ID to be received\n Mode : A TPCANMode representing the message type (Standard, 11-bit\n identifier, or Extended, 29-bit identifier)\n\n Returns:\n A TPCANStatus error code" - }, - { - "code": "def get_resource_admin_session_for_bin(self, bin_id, proxy):\n if not self.supports_resource_admin():\n raise errors.Unimplemented()\n return sessions.ResourceAdminSession(bin_id, proxy, self._runtime)", - "docstring": "Gets a resource administration session for the given bin.\n\n arg: bin_id (osid.id.Id): the ``Id`` of the bin\n arg: proxy (osid.proxy.Proxy): a proxy\n return: (osid.resource.ResourceAdminSession) - ``a\n ResourceAdminSession``\n raise: NotFound - ``bin_id`` not found\n raise: NullArgument - ``bin_id`` or ``proxy`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: Unimplemented - ``supports_resource_admin()`` or\n ``supports_visible_federation()`` is ``false``\n *compliance: optional -- This method must be implemented if\n ``supports_resource_admin()`` and\n ``supports_visible_federation()`` are ``true``.*" - }, - { - "code": "def _get_brew_versions():\n from bcbio import install\n tooldir = install.get_defaults().get(\"tooldir\")\n brew_cmd = os.path.join(tooldir, \"bin\", \"brew\") if tooldir else \"brew\"\n try:\n vout = subprocess.check_output([brew_cmd, \"list\", \"--versions\"])\n except OSError:\n vout = \"\"\n out = {}\n for vstr in vout.split(\"\\n\"):\n if vstr.strip():\n parts = vstr.rstrip().split()\n name = parts[0]\n v = parts[-1]\n out[name] = v\n return out", - "docstring": "Retrieve versions of tools installed via brew." - }, - { - "code": "def serialize_encrypted_data_key(encrypted_data_key):\n encrypted_data_key_format = (\n \">\"\n \"H\"\n \"{provider_id_len}s\"\n \"H\"\n \"{provider_info_len}s\"\n \"H\"\n \"{enc_data_key_len}s\"\n )\n return struct.pack(\n encrypted_data_key_format.format(\n provider_id_len=len(encrypted_data_key.key_provider.provider_id),\n provider_info_len=len(encrypted_data_key.key_provider.key_info),\n enc_data_key_len=len(encrypted_data_key.encrypted_data_key),\n ),\n len(encrypted_data_key.key_provider.provider_id),\n to_bytes(encrypted_data_key.key_provider.provider_id),\n len(encrypted_data_key.key_provider.key_info),\n to_bytes(encrypted_data_key.key_provider.key_info),\n len(encrypted_data_key.encrypted_data_key),\n encrypted_data_key.encrypted_data_key,\n )", - "docstring": "Serializes an encrypted data key.\n\n .. versionadded:: 1.3.0\n\n :param encrypted_data_key: Encrypted data key to serialize\n :type encrypted_data_key: aws_encryption_sdk.structures.EncryptedDataKey\n :returns: Serialized encrypted data key\n :rtype: bytes" - }, - { - "code": "def list_groups_available_in_context_accounts(self, account_id, include=None, only_own_groups=None):\r\n path = {}\r\n data = {}\r\n params = {}\r\n path[\"account_id\"] = account_id\r\n if only_own_groups is not None:\r\n params[\"only_own_groups\"] = only_own_groups\r\n if include is not None:\r\n self._validate_enum(include, [\"tabs\"])\r\n params[\"include\"] = include\r\n self.logger.debug(\"GET /api/v1/accounts/{account_id}/groups with query params: {params} and form data: {data}\".format(params=params, data=data, **path))\r\n return self.generic_request(\"GET\", \"/api/v1/accounts/{account_id}/groups\".format(**path), data=data, params=params, all_pages=True)", - "docstring": "List the groups available in a context.\r\n\r\n Returns the list of active groups in the given context that are visible to user." - }, - { - "code": "def stylize(text, styles, reset=True):\n terminator = attr(\"reset\") if reset else \"\"\n return \"{}{}{}\".format(\"\".join(styles), text, terminator)", - "docstring": "conveniently styles your text as and resets ANSI codes at its end." - }, - { - "code": "def _exit_with_output(content, exit_code=0):\n (sys.stdout if exit_code == 0 else sys.stderr).write(content + os.linesep)\n sys.exit(exit_code)", - "docstring": "Exit the program with printing out messages.\n\n :param content: content to print out\n :param exit_code: Exit code" - }, - { - "code": "def addUnderlineAnnot(self, rect):\n CheckParent(self)\n val = _fitz.Page_addUnderlineAnnot(self, rect)\n if not val: return\n val.thisown = True\n val.parent = weakref.proxy(self)\n self._annot_refs[id(val)] = val\n return val", - "docstring": "Underline content in a rectangle or quadrilateral." - }, - { - "code": "def findFilesWithPattern(\n self, fileName, searchPattern, fillFindData, dokanFileInfo\n ):\n try:\n ret = self.operations('findFilesWithPattern', fileName, searchPattern)\n if ret is None:\n return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR\n for r in ret:\n create_ft = self.python_timestamp_to_win32_filetime(r['ctime'])\n last_access_ft = self.python_timestamp_to_win32_filetime(r['atime'])\n last_write_ft = self.python_timestamp_to_win32_filetime(r['wtime'])\n cft = ctypes.wintypes.FILETIME(create_ft[0], create_ft[1])\n laft = ctypes.wintypes.FILETIME(last_access_ft[0], last_access_ft[1])\n lwft = ctypes.wintypes.FILETIME(last_write_ft[0], last_write_ft[1])\n size = self.pyint_to_double_dwords(r['size'])\n File = ctypes.wintypes.WIN32_FIND_DATAW(\n ctypes.c_ulong(r['attr']),\n cft,\n laft,\n lwft,\n size[1],\n size[0],\n ctypes.c_ulong(0),\n ctypes.c_ulong(0),\n r['name'],\n '',\n )\n pFile = ctypes.wintypes.PWIN32_FIND_DATAW(File)\n fillFindData(pFile, dokanFileInfo)\n return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS\n except Exception as e:\n logging.error('%s', e)\n return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR", - "docstring": "Find files in a certain path that match the search pattern.\n\n :param fileName: path to search\n :type fileName: ctypes.c_wchar_p\n :param searchPattern: pattern to search for\n :type searchPattern: ctypes.c_wchar_p\n :param fillFindData: function pointer for populating search results\n :type fillFindData: PFillFindData\n :param dokanFileInfo: used by Dokan\n :type dokanFileInfo: PDOKAN_FILE_INFO\n :return: error code\n :rtype: ctypes.c_int" - }, - { - "code": "def check_credentials(self, username, password):\n return password is not None and self.credentials.get(username, None) == password", - "docstring": "Override credential checking to use configured credentials." - }, - { - "code": "def change_password():\n basic_auth = '%s:%s' % (DEFAULT_USERNAME, DEFAULT_PASSWORD)\n try:\n auth = base64.encodestring(basic_auth)\n except TypeError:\n auth = base64.encodestring(bytes(basic_auth, 'utf-8')).decode()\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Basic %s\" % auth.strip()\n }\n response = None\n retry = 0\n while not response:\n sleep(1)\n con = http.HTTPConnection('localhost:7474', timeout=10)\n try:\n con.request('GET', 'http://localhost:7474/user/neo4j', headers=headers)\n response = json.loads(con.getresponse().read().decode('utf-8'))\n except ValueError:\n con.close()\n retry += 1\n if retry > 10:\n print(\"Could not change password for user neo4j\")\n break\n if response and response.get('password_change_required', None):\n payload = json.dumps({'password': 'testing'})\n con.request('POST', 'http://localhost:7474/user/neo4j/password', payload, headers)\n print(\"Password changed for user neo4j\")\n con.close()", - "docstring": "Changes the standard password from neo4j to testing to be able to run the test suite." - }, - { - "code": "def is_set(self):\n if self.__event.is_set():\n if self.exception is not None:\n raise self.exception\n return True\n return False", - "docstring": "Returns True if the request has finished or False if it is still pending.\n\n Raises [LinkException](AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if the request failed due to a\n network related problem." - }, - { - "code": "def createPushParser(SAX, chunk, size, URI):\n ret = libxml2mod.xmlCreatePushParser(SAX, chunk, size, URI)\n if ret is None:raise parserError('xmlCreatePushParser() failed')\n return parserCtxt(_obj=ret)", - "docstring": "Create a progressive XML parser context to build either an\n event flow if the SAX object is not None, or a DOM tree\n otherwise." - }, - { - "code": "def _merge_model_defined_relation_wheres_to_has_query(self, has_query, relation):\n relation_query = relation.get_base_query()\n has_query.merge_wheres(relation_query.wheres, relation_query.get_bindings())\n self._query.add_binding(has_query.get_query().get_bindings(), \"where\")", - "docstring": "Merge the \"wheres\" from a relation query to a has query.\n\n :param has_query: The has query\n :type has_query: Builder\n\n :param relation: The relation to count\n :type relation: orator.orm.relations.Relation" - }, - { - "code": "def load_character(tile_map, gamescreen):\n tile_obj = thc.TileHeroCharacter(tile_map, gamescreen)\n tile_obj.load_texture(\"..//Textures//character.png\")\n tile_obj.origin = r.Vector2(0, 0)\n tile_obj.hazard_touched_method = hazard_touched_method\n tile_obj.special_touched_method = special_touched_method\n return tile_obj", - "docstring": "Create an instance of the main character and return it." - }, - { - "code": "def nt2codon_rep(ntseq):\n nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3}\n codon_rep ='\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf'\n return ''.join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])", - "docstring": "Represent nucleotide sequence by sequence of codon symbols.\n\n 'Translates' the nucleotide sequence into a symbolic representation of\n 'amino acids' where each codon gets its own unique character symbol. These \n characters should be reserved only for representing the 64 individual \n codons --- note that this means it is important that this function matches \n the corresponding function in the preprocess script and that any custom \n alphabet does not use these symbols. Defining symbols for each individual\n codon allows for Pgen computation of inframe nucleotide sequences. \n\n Parameters\n ----------\n\n ntseq : str\n A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be \n 'translated' into the codon - symbol representation. Can be either\n uppercase or lowercase, but only composed of A, C, G, or T.\n\n Returns\n -------\n codon_rep : str\n The codon - symbolic representation of ntseq. Note that if \n len(ntseq) == 3L --> len(codon_rep) == L\n \n Example\n --------\n >>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')\n '\\xbb\\x96\\xab\\xb8\\x8e\\xb6\\xa5\\x92\\xa8\\xba\\x9a\\x93\\x94\\x9f'" - }, - { - "code": "def _folder_to_dict(self, path):\n res = {}\n for key in os.listdir(path):\n if key.startswith('.'):\n continue\n key_path = os.path.join(path, key)\n if os.path.isfile(key_path):\n val = open(key_path).read()\n key = key.split('.')[0]\n res[key] = val\n else:\n res[key] = self._folder_to_dict(key_path)\n return res", - "docstring": "Recursively reads the files from the directory given by `path` and\n writes their contents to a nested dictionary, which is then returned." - }, - { - "code": "def clear_from(self, timestamp):\n block_size = self.config.block_size\n offset, remainder = timestamp // block_size, timestamp % block_size\n if remainder:\n raise ValueError('Timestamp must be on a block boundary')\n self.driver.clear_from(offset, timestamp)", - "docstring": "Clear all data from `timestamp` onwards. Note that the timestamp\n is rounded down to the nearest block boundary" - }, - { - "code": "def columns_are_indexed(self, columns):\n for index in self._indexes.values():\n if index.spans_columns(columns):\n return True\n return False", - "docstring": "Checks if an index begins in the order of the given columns.\n\n :type columns: list\n\n :rtype: bool" - }, - { - "code": "def FromCurrentSystem(cls):\n uname = platform.uname()\n fqdn = socket.getfqdn()\n system = uname[0]\n architecture, _ = platform.architecture()\n if system == \"Windows\":\n service_pack = platform.win32_ver()[2]\n kernel = uname[3]\n release = uname[2]\n version = uname[3] + service_pack\n elif system == \"Darwin\":\n kernel = uname[2]\n release = \"OSX\"\n version = platform.mac_ver()[0]\n elif system == \"Linux\":\n kernel = uname[2]\n release = platform.linux_distribution()[0]\n version = platform.linux_distribution()[1]\n if pep425tags:\n pep425tag = \"%s%s-%s-%s\" % (\n pep425tags.get_abbr_impl(), pep425tags.get_impl_ver(),\n str(pep425tags.get_abi_tag()).lower(), pep425tags.get_platform())\n else:\n pep425tag = \"%s_%s_%s\" % (system, release, architecture)\n return cls(\n system=system,\n architecture=architecture,\n release=release,\n version=version,\n machine=uname[4],\n kernel=kernel,\n fqdn=fqdn,\n pep425tag=pep425tag,\n )", - "docstring": "Fill a Uname from the currently running platform." - }, - { - "code": "def get_enum_labels(enum_cls):\n if not issubclass(enum_cls, enum.Enum):\n raise EnumTypeError(\"Input class '%s' must be derived from enum.Enum\"\n % enum_cls)\n try:\n enum.unique(enum_cls)\n except ValueError as exc:\n raise EnumTypeError(\"Input class '%s' must be unique - %s\"\n % (enum_cls, exc))\n values = [member.value for member in enum_cls]\n if not values:\n raise EnumTypeError(\"Input class '%s' has no members!\" % enum_cls)\n expected_value = 0\n for value in values:\n if value != expected_value:\n raise EnumTypeError(\"Enum values for '%s' must start at 0 and \"\n \"increment by 1. Values: %s\"\n % (enum_cls, values))\n expected_value += 1\n return [member.name for member in enum_cls]", - "docstring": "Return list of enumeration labels from Enum class.\n\n The list is useful when creating an attribute, for the\n `enum_labels` parameter. The enumeration values are checked\n to ensure they are unique, start at zero, and increment by one.\n\n :param enum_cls: the Enum class to be inspected\n :type enum_cls: :py:obj:`enum.Enum`\n\n :return: List of label strings\n :rtype: :py:obj:`list`\n\n :raises EnumTypeError: in case the given class is invalid" - }, - { - "code": "def populate(self, obj=None, section=None, parse_types=True):\n section = self.default_section if section is None else section\n obj = Settings() if obj is None else obj\n is_dict = isinstance(obj, dict)\n for k, v in self.get_options(section).items():\n if parse_types:\n if v == 'None':\n v = None\n elif self.FLOAT_REGEXP.match(v):\n v = float(v)\n elif self.INT_REGEXP.match(v):\n v = int(v)\n elif self.BOOL_REGEXP.match(v):\n v = v == 'True'\n else:\n m = self.EVAL_REGEXP.match(v)\n if m:\n evalstr = m.group(1)\n v = eval(evalstr)\n logger.debug('setting {} => {} on {}'.format(k, v, obj))\n if is_dict:\n obj[k] = v\n else:\n setattr(obj, k, v)\n return obj", - "docstring": "Set attributes in ``obj`` with ``setattr`` from the all values in\n ``section``." - }, - { - "code": "def load(self):\n import gspread\n from oauth2client.service_account import ServiceAccountCredentials\n self._validate_table_name()\n self._validate_title()\n scope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/drive\"]\n credentials = ServiceAccountCredentials.from_json_keyfile_name(self.source, scope)\n gc = gspread.authorize(credentials)\n try:\n for worksheet in gc.open(self.title).worksheets():\n self._worksheet = worksheet\n self.__all_values = [row for row in worksheet.get_all_values()]\n if self._is_empty_sheet():\n continue\n try:\n self.__strip_empty_col()\n except ValueError:\n continue\n value_matrix = self.__all_values[self._get_start_row_idx() :]\n try:\n headers = value_matrix[0]\n rows = value_matrix[1:]\n except IndexError:\n continue\n self.inc_table_count()\n yield TableData(\n self.make_table_name(),\n headers,\n rows,\n dp_extractor=self.dp_extractor,\n type_hints=self._extract_type_hints(headers),\n )\n except gspread.exceptions.SpreadsheetNotFound:\n raise OpenError(\"spreadsheet '{}' not found\".format(self.title))\n except gspread.exceptions.APIError as e:\n raise APIError(e)", - "docstring": "Load table data from a Google Spreadsheet.\n\n This method consider :py:attr:`.source` as a path to the\n credential JSON file to access Google Sheets API.\n\n The method automatically search the header row start from\n :py:attr:`.start_row`. The condition of the header row is that\n all of the columns have value (except empty columns).\n\n :return:\n Loaded table data. Return one |TableData| for each sheet in\n the workbook. The table name for data will be determined by\n :py:meth:`~.GoogleSheetsTableLoader.make_table_name`.\n :rtype: iterator of |TableData|\n :raises pytablereader.DataError:\n If the header row is not found.\n :raises pytablereader.OpenError:\n If the spread sheet not found." - }, - { - "code": "def parse_numeric(self):\n word = ''\n frac = False\n if self.char == '-':\n word += self.char\n self.update_chars()\n while self.char.isdigit() or (self.char == '.' and not frac):\n if self.char == '.':\n frac = True\n word += self.char\n self.update_chars()\n if self.char in 'eEdD':\n word += self.char\n self.update_chars()\n if self.char in '+-':\n word += self.char\n self.update_chars()\n while self.char.isdigit():\n word += self.char\n self.update_chars()\n return word", - "docstring": "Tokenize a Fortran numerical value." - }, - { - "code": "def flush_and_refresh(self, index):\n self.client.indices.flush(wait_if_ongoing=True, index=index)\n self.client.indices.refresh(index=index)\n self.client.cluster.health(\n wait_for_status='yellow', request_timeout=30)\n return True", - "docstring": "Flush and refresh one or more indices.\n\n .. warning::\n\n Do not call this method unless you know what you are doing. This\n method is only intended to be called during tests." - }, - { - "code": "def jinja_filter_param_value_str(value, str_quote_style=\"\", bool_is_str=False):\n if (type(value) == bool) and not bool_is_str:\n if (value) == True:\n return '1'\n else:\n return '0'\n elif type(value) == str or ((type(value) == bool) and bool_is_str):\n return str_quote_style + str(value) + str_quote_style\n else:\n return str(value)", - "docstring": "Convert a parameter value to string suitable to be passed to an EDA tool\n\n Rules:\n - Booleans are represented as 0/1 or \"true\"/\"false\" depending on the\n bool_is_str argument\n - Strings are either passed through or enclosed in the characters specified\n in str_quote_style (e.g. '\"' or '\\\\\"')\n - Everything else (including int, float, etc.) are converted using the str()\n function." - }, - { - "code": "def send(\n self,\n to=None,\n subject=None,\n contents=None,\n attachments=None,\n cc=None,\n bcc=None,\n preview_only=False,\n headers=None,\n newline_to_break=True,\n ):\n self.login()\n recipients, msg_string = self.prepare_send(\n to, subject, contents, attachments, cc, bcc, headers, newline_to_break\n )\n if preview_only:\n return (recipients, msg_string)\n return self._attempt_send(recipients, msg_string)", - "docstring": "Use this to send an email with gmail" - }, - { - "code": "def set_scene_active(self, scene_id):\n if self.state.activeSceneId != scene_id:\n self._deactivate_scene()\n sequence_number = self.zmq_publisher.publish_active_scene(scene_id)\n self.state.activeSceneId = scene_id\n if self.state.mainswitch is True:\n self._activate_scene()\n logging.debug(\"Set scene {sceneNum} as active scene\".format(sceneNum=scene_id))\n return (True, sequence_number, \"OK\")\n else:\n logging.debug(\"Scene {sceneNum} already is active scene\".format(sceneNum=scene_id))\n return (False, 0, \"This already is the activated scene.\")", - "docstring": "sets the active scene by scene ID" - }, - { - "code": "async def start_client(self,\n sock: anyio.abc.SocketStream,\n addr,\n path: str,\n headers: Optional[List] = None,\n subprotocols: Optional[List[str]] = None):\n self._sock = sock\n self._connection = WSConnection(ConnectionType.CLIENT)\n if headers is None:\n headers = []\n if subprotocols is None:\n subprotocols = []\n data = self._connection.send(\n Request(\n host=addr[0],\n target=path,\n extra_headers=headers,\n subprotocols=subprotocols))\n await self._sock.send_all(data)\n assert self._scope is None\n self._scope = True\n try:\n event = await self._next_event()\n if not isinstance(event, AcceptConnection):\n raise ConnectionError(\"Failed to establish a connection\",\n event)\n return event\n finally:\n self._scope = None", - "docstring": "Start a client WS connection on this socket.\n\n Returns: the AcceptConnection message." - }, - { - "code": "def _update_extended(self, system):\n if self.system.pflow.solved is False:\n logger.warning(\n 'Cannot update extended summary. Power flow not solved.')\n return\n Sloss = sum(system.Line.S1 + system.Line.S2)\n self.extended.update({\n 'Ptot':\n sum(system.PV.pmax) + sum(system.SW.pmax),\n 'Pon':\n sum(mul(system.PV.u, system.PV.pmax)),\n 'Pg':\n sum(system.Bus.Pg),\n 'Qtot_min':\n sum(system.PV.qmin) + sum(system.SW.qmin),\n 'Qtot_max':\n sum(system.PV.qmax) + sum(system.SW.qmax),\n 'Qon_min':\n sum(mul(system.PV.u, system.PV.qmin)),\n 'Qon_max':\n sum(mul(system.PV.u, system.PV.qmax)),\n 'Qg':\n round(sum(system.Bus.Qg), 5),\n 'Pl':\n round(sum(system.PQ.p), 5),\n 'Ql':\n round(sum(system.PQ.q), 5),\n 'Psh':\n 0.0,\n 'Qsh':\n round(sum(system.PQ.q) - sum(system.Bus.Ql), 5),\n 'Ploss':\n round(Sloss.real, 5),\n 'Qloss':\n round(Sloss.imag, 5),\n 'Pch':\n round(sum(system.Line.Pchg1 + system.Line.Pchg2), 5),\n 'Qch':\n round(sum(system.Line.Qchg1 + system.Line.Qchg2), 5),\n })", - "docstring": "Update the extended data" - }, - { - "code": "def conform(self, rhs):\n if not is_list_like(rhs):\n rhs = [rhs]\n if isinstance(rhs, np.ndarray):\n rhs = rhs.ravel()\n return rhs", - "docstring": "inplace conform rhs" - }, - { - "code": "def max_consecutive_days(self) -> Optional[Tuple[int, Interval]]:\n if len(self.intervals) == 0:\n return None\n startdate = self.start_date()\n enddate = self.end_date()\n seq = ''\n ndays = (enddate - startdate).days + 1\n for i in range(ndays):\n date = startdate + datetime.timedelta(days=i)\n wholeday = Interval.wholeday(date)\n if any([x.overlaps(wholeday) for x in self.intervals]):\n seq += '+'\n else:\n seq += ' '\n longest = max(seq.split(), key=len)\n longest_len = len(longest)\n longest_idx = seq.index(longest)\n longest_interval = Interval.dayspan(\n startdate + datetime.timedelta(days=longest_idx),\n startdate + datetime.timedelta(days=longest_idx + longest_len)\n )\n return longest_len, longest_interval", - "docstring": "The length of the longest sequence of days in which all days include\n an interval.\n\n Returns:\n tuple:\n ``(longest_length, longest_interval)`` where\n ``longest_interval`` is a :class:`Interval` containing the\n start and end date of the longest span -- or ``None`` if we\n contain no intervals." - }, - { - "code": "def format(sql, args=None):\n resolved_vars = {}\n code = []\n SqlStatement._find_recursive_dependencies(sql, args, code=code,\n resolved_vars=resolved_vars)\n parts = []\n for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):\n if escape:\n parts.append('$')\n elif placeholder:\n variable = placeholder[1:]\n try:\n value = resolved_vars[variable]\n except KeyError as e:\n raise Exception('Invalid sql. Unable to substitute $%s.' % e.args[0])\n if isinstance(value, types.ModuleType):\n value = _utils.get_default_query_from_module(value)\n if isinstance(value, SqlStatement):\n sql = value.format(value._sql, resolved_vars)\n value = '(%s)' % sql\n elif '_repr_sql_' in dir(value):\n value = value._repr_sql_()\n elif isinstance(value, basestring):\n value = SqlStatement._escape_string(value)\n elif isinstance(value, list) or isinstance(value, tuple):\n if isinstance(value, tuple):\n value = list(value)\n expansion = '('\n for v in value:\n if len(expansion) > 1:\n expansion += ', '\n if isinstance(v, basestring):\n expansion += SqlStatement._escape_string(v)\n else:\n expansion += str(v)\n expansion += ')'\n value = expansion\n else:\n value = str(value)\n parts.append(value)\n elif literal:\n parts.append(literal)\n expanded = ''.join(parts)\n return expanded", - "docstring": "Resolve variable references in a query within an environment.\n\n This computes and resolves the transitive dependencies in the query and raises an\n exception if that fails due to either undefined or circular references.\n\n Args:\n sql: query to format.\n args: a dictionary of values to use in variable expansion.\n\n Returns:\n The resolved SQL text with variables expanded.\n\n Raises:\n Exception on failure." - }, - { - "code": "def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,\n options=None, ciphers=None):\n context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)\n if options is None:\n options = 0\n options |= OP_NO_SSLv2\n options |= OP_NO_SSLv3\n options |= OP_NO_COMPRESSION\n context.options |= options\n if getattr(context, 'supports_set_ciphers', True):\n context.set_ciphers(ciphers or _DEFAULT_CIPHERS)\n context.verify_mode = cert_reqs\n if getattr(context, 'check_hostname', None) is not None:\n context.check_hostname = False\n return context", - "docstring": "All arguments have the same meaning as ``ssl_wrap_socket``.\n\n By default, this function does a lot of the same work that\n ``ssl.create_default_context`` does on Python 3.4+. It:\n\n - Disables SSLv2, SSLv3, and compression\n - Sets a restricted set of server ciphers\n\n If you wish to enable SSLv3, you can do::\n\n from urllib3.util import ssl_\n context = ssl_.create_urllib3_context()\n context.options &= ~ssl_.OP_NO_SSLv3\n\n You can do the same to enable compression (substituting ``COMPRESSION``\n for ``SSLv3`` in the last line above).\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.\n :param ciphers:\n Which cipher suites to allow the server to select.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext" - }, - { - "code": "def add_soup(response, soup_config):\n if (\"text/html\" in response.headers.get(\"Content-Type\", \"\") or\n Browser.__looks_like_html(response)):\n response.soup = bs4.BeautifulSoup(response.content, **soup_config)\n else:\n response.soup = None", - "docstring": "Attaches a soup object to a requests response." - }, - { - "code": "def print_pack(document_loader,\n processobj,\n uri,\n metadata\n ):\n packed = pack(document_loader, processobj, uri, metadata)\n if len(packed[\"$graph\"]) > 1:\n return json_dumps(packed, indent=4)\n return json_dumps(packed[\"$graph\"][0], indent=4)", - "docstring": "Return a CWL serialization of the CWL document in JSON." - }, - { - "code": "def predict(self, L, P, R):\n phi = self.extract_one(L, P, R)\n return self.classifier.predict(phi)", - "docstring": "Given an left context `L`, punctuation mark `P`, and right context\n `R`, return True iff this observation is hypothesized to be a\n sentence boundary." - }, - { - "code": "def count(self):\n try:\n return self.object_list.count()\n except (AttributeError, TypeError):\n return len(self.object_list)", - "docstring": "Returns the total number of objects, across all pages." - }, - { - "code": "def serialize_output(output):\n if not ('value' in output and 'script_hex' in output):\n raise Exception('Invalid output')\n return ''.join([\n hexlify(struct.pack(' typing.Tuple[str, str]:\n if isinstance(json_type, list):\n for j_type in json_type:\n if j_type != 'null':\n json_type = j_type\n break\n return (json_type, JSON_TYPES_TO_NATIVE[json_type])", - "docstring": "Returns the json and native python type based on the json_type input.\n\n If json_type is a list of types it will return the first non 'null' value.\n\n :param json_type: A json type or a list of json types.\n :returns: A tuple containing the json type and native python type." - }, - { - "code": "def _parse_aot(self, first, name_first):\n payload = [first]\n self._aot_stack.append(name_first)\n while not self.end():\n is_aot_next, name_next = self._peek_table()\n if is_aot_next and name_next == name_first:\n _, table = self._parse_table(name_first)\n payload.append(table)\n else:\n break\n self._aot_stack.pop()\n return AoT(payload, parsed=True)", - "docstring": "Parses all siblings of the provided table first and bundles them into\n an AoT." - }, - { - "code": "def listBlocksParents(self):\n try :\n body = request.body.read()\n data = cjson.decode(body)\n data = validateJSONInputNoCopy(\"block\", data, read=True)\n max_array_size = 1000\n if ( 'block_names' in data.keys() and isinstance(data['block_names'], list) and len(data['block_names'])>max_array_size):\n dbsExceptionHandler(\"dbsException-invalid-input\",\n \"The Max list length supported in listBlocksParents is %s.\" %max_array_size, self.logger.exception)\n return self.dbsBlock.listBlockParents(data[\"block_name\"])\n except dbsException as de:\n dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)\n except cjson.DecodeError as de:\n sError = \"DBSReaderModel/listBlockParents. %s\\n. Exception trace: \\n %s\" \\\n % (de, traceback.format_exc())\n msg = \"DBSReaderModel/listBlockParents. %s\" % de\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, sError)\n except HTTPError as he:\n raise he\n except Exception as ex:\n sError = \"DBSReaderModel/listBlockParents. %s\\n. Exception trace: \\n %s\" \\\n % (ex, traceback.format_exc())\n dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)", - "docstring": "API to list block parents of multiple blocks. To be called by blockparents url with post call.\n\n :param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.\n :type block_names: list" - }, - { - "code": "def save_species_fitness(self, delimiter=' ', null_value='NA', filename='species_fitness.csv'):\n with open(filename, 'w') as f:\n w = csv.writer(f, delimiter=delimiter)\n for s in self.get_species_fitness(null_value):\n w.writerow(s)", - "docstring": "Log species' average fitness throughout evolution." - }, - { - "code": "def priority(self,priority):\n with self.__lock:\n old_priorities = {}\n try:\n for thread in self.__threads:\n old_priorities[thread] = thread.priority\n thread.priority = priority\n except Exception:\n for (thread,old_priority) in old_priorities.iteritems():\n try:\n thread.priority = old_priority\n except Exception:\n pass\n raise\n else:\n self.__priority = priority", - "docstring": "Set the priority for all threads in this group.\n\n If setting priority fails on any thread, the priority of all threads\n is restored to its previous value." - }, - { - "code": "def infer_dtype_from(val, pandas_dtype=False):\n if is_scalar(val):\n return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)\n return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)", - "docstring": "interpret the dtype from a scalar or array. This is a convenience\n routines to infer dtype from a scalar or an array\n\n Parameters\n ----------\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, scalar/array belongs to pandas extension types is inferred as\n object" - }, - { - "code": "def _fix_alert_poster(self, state, shard):\n recp = recipient.Broadcast(AlertPoster.protocol_id, shard)\n state.alerter.update_recipients(recp)", - "docstring": "Called after agent has switched a shard. Alert poster needs an update\n in this case, bacause otherwise its posting to lobby instead of the\n shard exchange." - }, - { - "code": "def list_groups(self, **kwargs):\n kwargs = self._verify_sort_options(kwargs)\n api = self._get_api(iam.DeveloperApi)\n return PaginatedResponse(api.get_all_groups, lwrap_type=Group, **kwargs)", - "docstring": "List all groups in organisation.\n\n :param int limit: The number of groups to retrieve\n :param str order: The ordering direction, ascending (asc) or descending (desc)\n :param str after: Get groups after/starting at given group ID\n :returns: a list of :py:class:`Group` objects.\n :rtype: PaginatedResponse" - }, - { - "code": "def load_ragged_time_series(filename, dtype=float, delimiter=r'\\s+',\n header=False):\n r\n times = []\n values = []\n splitter = re.compile(delimiter)\n if header:\n start_row = 1\n else:\n start_row = 0\n with _open(filename, mode='r') as input_file:\n for row, line in enumerate(input_file, start_row):\n data = splitter.split(line.strip())\n try:\n converted_time = float(data[0])\n except (TypeError, ValueError) as exe:\n six.raise_from(ValueError(\"Couldn't convert value {} using {} \"\n \"found at {}:{:d}:\\n\\t{}\".format(\n data[0], float.__name__,\n filename, row, line)), exe)\n times.append(converted_time)\n try:\n converted_value = np.array(data[1:], dtype=dtype)\n except (TypeError, ValueError) as exe:\n six.raise_from(ValueError(\"Couldn't convert value {} using {} \"\n \"found at {}:{:d}:\\n\\t{}\".format(\n data[1:], dtype.__name__,\n filename, row, line)), exe)\n values.append(converted_value)\n return np.array(times), values", - "docstring": "r\"\"\"Utility function for loading in data from a delimited time series\n annotation file with a variable number of columns.\n Assumes that column 0 contains time stamps and columns 1 through n contain\n values. n may be variable from time stamp to time stamp.\n\n Examples\n --------\n >>> # Load a ragged list of tab-delimited multi-f0 midi notes\n >>> times, vals = load_ragged_time_series('multif0.txt', dtype=int,\n delimiter='\\t')\n >>> # Load a raggled list of space delimited multi-f0 values with a header\n >>> times, vals = load_ragged_time_series('labeled_events.csv',\n header=True)\n\n Parameters\n ----------\n filename : str\n Path to the annotation file\n dtype : function\n Data type to apply to values columns.\n delimiter : str\n Separator regular expression.\n By default, lines will be split by any amount of whitespace.\n header : bool\n Indicates whether a header row is present or not.\n By default, assumes no header is present.\n\n Returns\n -------\n times : np.ndarray\n array of timestamps (float)\n values : list of np.ndarray\n list of arrays of corresponding values" - }, - { - "code": "def _read_depth_images(self, num_images):\n depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit)\n for i in range(0, num_images):\n depth_images[i] = depth_images[i] * MM_TO_METERS\n if self._flip_images:\n depth_images[i] = np.flipud(depth_images[i])\n depth_images[i] = np.fliplr(depth_images[i])\n depth_images[i] = DepthImage(depth_images[i], frame=self._frame) \n return depth_images", - "docstring": "Reads depth images from the device" - }, - { - "code": "def marginalize(self, variables, inplace=True):\n if isinstance(variables, six.string_types):\n raise TypeError('Expected list or array-like type got type str')\n factor_set = self if inplace else self.copy()\n factors_to_be_marginalized = set(filter(lambda x: set(x.scope()).intersection(variables),\n factor_set.factors))\n for factor in factors_to_be_marginalized:\n variables_to_be_marginalized = list(set(factor.scope()).intersection(variables))\n if inplace:\n factor.marginalize(variables_to_be_marginalized, inplace=True)\n else:\n factor_set.remove_factors(factor)\n factor_set.add_factors(factor.marginalize(variables_to_be_marginalized, inplace=False))\n if not inplace:\n return factor_set", - "docstring": "Marginalizes the factors present in the factor sets with respect to the given variables.\n\n Parameters\n ----------\n variables: list, array-like\n List of the variables to be marginalized.\n\n inplace: boolean (Default value True)\n If inplace=True it will modify the factor set itself, would create a new factor set\n\n Returns\n -------\n If inplace = False, will return a new marginalized FactorSet object.\n\n Examples\n --------\n >>> from pgmpy.factors import FactorSet\n >>> from pgmpy.factors.discrete import DiscreteFactor\n >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))\n >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))\n >>> factor_set1 = FactorSet(phi1, phi2)\n >>> factor_set1.marginalize('x1')\n >>> print(factor_set1)\n set([,\n ])" - }, - { - "code": "def EmitProto(cls):\n result = \"message %s {\\n\" % cls.__name__\n for _, desc in sorted(iteritems(cls.type_infos_by_field_number)):\n result += desc.Definition()\n result += \"}\\n\"\n return result", - "docstring": "Emits .proto file definitions." - }, - { - "code": "def filter_entries(self, request_type=None, content_type=None,\n status_code=None, http_version=None, regex=True):\n results = []\n for entry in self.entries:\n valid_entry = True\n p = self.parser\n if request_type is not None and not p.match_request_type(\n entry, request_type, regex=regex):\n valid_entry = False\n if content_type is not None:\n if not self.parser.match_content_type(entry, content_type,\n regex=regex):\n valid_entry = False\n if status_code is not None and not p.match_status_code(\n entry, status_code, regex=regex):\n valid_entry = False\n if http_version is not None and not p.match_http_version(\n entry, http_version, regex=regex):\n valid_entry = False\n if valid_entry:\n results.append(entry)\n return results", - "docstring": "Returns a ``list`` of entry objects based on the filter criteria.\n\n :param request_type: ``str`` of request type (i.e. - GET or POST)\n :param content_type: ``str`` of regex to use for finding content type\n :param status_code: ``int`` of the desired status code\n :param http_version: ``str`` of HTTP version of request\n :param regex: ``bool`` indicating whether to use regex or exact match." - }, - { - "code": "def combine_futures(*futures):\n expected = len(futures)\n results = []\n completed = AtomicInteger()\n combined = Future()\n def done(f):\n if not combined.done():\n if f.is_success():\n results.append(f.result())\n if completed.get_and_increment() + 1 == expected:\n combined.set_result(results)\n else:\n combined.set_exception(f.exception(), f.traceback())\n for future in futures:\n future.add_done_callback(done)\n return combined", - "docstring": "Combines set of Futures.\n\n :param futures: (Futures), Futures to be combined.\n :return: Result of the combination." - }, - { - "code": "def _get_ref_lengths(self):\n sam_reader = pysam.Samfile(self.bam, \"rb\")\n return dict(zip(sam_reader.references, sam_reader.lengths))", - "docstring": "Gets the length of each reference sequence from the header of the bam. Returns dict name => length" - }, - { - "code": "def PrintTags(self, file):\n print >>file, '/* Tag definition for %s */' % self._name\n print >>file, 'enum %s_ {' % self._name.lower()\n for entry in self._entries:\n print >>file, ' %s=%d,' % (self.EntryTagName(entry),\n entry.Tag())\n print >>file, ' %s_MAX_TAGS' % (self._name.upper())\n print >>file, '};\\n'", - "docstring": "Prints the tag definitions for a structure." - }, - { - "code": "def from_bytes(value):\n result = (value.decode('utf-8')\n if isinstance(value, six.binary_type) else value)\n if isinstance(result, six.text_type):\n return result\n else:\n raise ValueError(\n '{0!r} could not be converted to unicode'.format(value))", - "docstring": "Converts bytes to a string value, if necessary.\n\n Args:\n value (Union[str, bytes]): The value to be converted.\n\n Returns:\n str: The original value converted to unicode (if bytes) or as passed in\n if it started out as unicode.\n\n Raises:\n ValueError: If the value could not be converted to unicode." - }, - { - "code": "def alpha_blend(self, other):\n fa = self.__a + other.__a - (self.__a * other.__a)\n if fa==0: sa = 0\n else: sa = min(1.0, self.__a/other.__a)\n da = 1.0 - sa\n sr, sg, sb = [v * sa for v in self.__rgb]\n dr, dg, db = [v * da for v in other.__rgb]\n return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref)", - "docstring": "Alpha-blend this color on the other one.\n\n Args:\n :other:\n The grapefruit.Color to alpha-blend with this one.\n\n Returns:\n A grapefruit.Color instance which is the result of alpha-blending\n this color on the other one.\n\n >>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)\n >>> c2 = Color.from_rgb(1, 1, 1, 0.8)\n >>> c3 = c1.alpha_blend(c2)\n >>> c3\n Color(1.0, 0.875, 0.75, 0.84)" - }, - { - "code": "def _event_duration(vevent):\n if hasattr(vevent, 'dtend'):\n return vevent.dtend.value - vevent.dtstart.value\n elif hasattr(vevent, 'duration') and vevent.duration.value:\n return vevent.duration.value\n return timedelta(0)", - "docstring": "unify dtend and duration to the duration of the given vevent" - }, - { - "code": "def getSoname(filename):\n cmd = [\"objdump\", \"-p\", \"-j\", \".dynamic\", filename]\n m = re.search(r'\\s+SONAME\\s+([^\\s]+)', compat.exec_command(*cmd))\n if m:\n return m.group(1)", - "docstring": "Return the soname of a library." - }, - { - "code": "def refine_hbonds_ldon(self, all_hbonds, salt_lneg, salt_pneg):\n i_set = {}\n for hbond in all_hbonds:\n i_set[hbond] = False\n for salt in salt_pneg:\n protidx, ligidx = [at.idx for at in salt.negative.atoms], [at.idx for at in salt.positive.atoms]\n if hbond.d.idx in ligidx and hbond.a.idx in protidx:\n i_set[hbond] = True\n for salt in salt_lneg:\n protidx, ligidx = [at.idx for at in salt.positive.atoms], [at.idx for at in salt.negative.atoms]\n if hbond.d.idx in ligidx and hbond.a.idx in protidx:\n i_set[hbond] = True\n second_set = {}\n hbls = [k for k in i_set.keys() if not i_set[k]]\n for hbl in hbls:\n if hbl.d.idx not in second_set:\n second_set[hbl.d.idx] = (hbl.angle, hbl)\n else:\n if second_set[hbl.d.idx][0] < hbl.angle:\n second_set[hbl.d.idx] = (hbl.angle, hbl)\n return [hb[1] for hb in second_set.values()]", - "docstring": "Refine selection of hydrogen bonds. Do not allow groups which already form salt bridges to form H-Bonds." - }, - { - "code": "def method_descriptor(descriptor: str) -> MethodDescriptor:\n end_para = descriptor.find(')')\n returns = descriptor[end_para + 1:]\n args = descriptor[1:end_para]\n return MethodDescriptor(\n parse_descriptor(returns)[0],\n parse_descriptor(args),\n returns,\n args,\n descriptor\n )", - "docstring": "Parses a Method descriptor as described in section 4.3.3 of the JVM\n specification." - }, - { - "code": "def multi_mask_sequences(records, slices):\n for record in records:\n record_indices = list(range(len(record)))\n keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]),\n slices, frozenset(record_indices))\n seq = ''.join(b if i in keep_indices else '-'\n for i, b in enumerate(str(record.seq)))\n record.seq = Seq(seq)\n yield record", - "docstring": "Replace characters sliced by slices with gap characters." - }, - { - "code": "def reshape(self, *shape):\n if prod(self.shape) != prod(shape):\n raise ValueError(\"Reshaping must leave the number of elements unchanged\")\n if self.shape[-1] != shape[-1]:\n raise ValueError(\"Reshaping cannot change the size of the constituent series (last dimension)\")\n if self.labels is not None:\n newlabels = self.labels.reshape(*shape[:-1])\n else:\n newlabels = None\n return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',))", - "docstring": "Reshape the Series object\n\n Cannot change the last dimension.\n\n Parameters\n ----------\n shape: one or more ints\n New shape" - }, - { - "code": "def _reduce_age(self, now):\n if self.max_age:\n keys = [\n key for key, value in iteritems(self.data)\n if now - value['date'] > self.max_age\n ]\n for key in keys:\n del self.data[key]", - "docstring": "Reduce size of cache by date.\n\n :param datetime.datetime now: Current time" - }, - { - "code": "def _set_padding(self, attr, value):\n if not value:\n setattr(self, attr, \"\")\n else:\n value = str(value)\n if not value.isspace():\n raise ValueError(\"padding must be entirely whitespace\")\n setattr(self, attr, value)", - "docstring": "Setter for the value of a padding attribute." - }, - { - "code": "def insertAt(self, text: str, line: int, col: int):\n undoObj = UndoInsertAt(self, text, line, col)\n self.qteUndoStack.push(undoObj)", - "docstring": "Undo safe wrapper for the native ``insertAt`` method.\n\n |Args|\n\n * ``text`` (**str**): text to insert at the specified position.\n * ``line`` (**int**): line number.\n * ``col`` (**int**): column number.\n\n |Returns|\n\n **None**\n\n |Raises|\n\n * **QtmacsArgumentError** if at least one argument has an invalid type." - }, - { - "code": "def _query_account_rg(cli_ctx, account_name):\n scf = get_mgmt_service_client(cli_ctx, CUSTOM_MGMT_STORAGE)\n acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)\n if acc:\n from msrestazure.tools import parse_resource_id\n return parse_resource_id(acc.id)['resource_group'], scf\n raise ValueError(\"Storage account '{}' not found.\".format(account_name))", - "docstring": "Query the storage account's resource group, which the mgmt sdk requires." - }, - { - "code": "def refresh(self):\n self._screen.force_update()\n self._screen.refresh()\n self._update(1)", - "docstring": "Refresh the list and the screen" - }, - { - "code": "def home_page(self, tld_type: Optional[TLDType] = None) -> str:\n resource = self.random.choice(USERNAMES)\n domain = self.top_level_domain(\n tld_type=tld_type,\n )\n return 'http://www.{}{}'.format(\n resource, domain)", - "docstring": "Generate a random home page.\n\n :param tld_type: TLD type.\n :return: Random home page.\n\n :Example:\n http://www.fontir.info" - }, - { - "code": "def read(self, frame, data):\n \"Returns a list of values, eats all of data.\"\n seq = []\n while data:\n elem, data = self.spec.read(frame, data)\n seq.append(elem)\n return seq, data", - "docstring": "Returns a list of values, eats all of data." - }, - { - "code": "def InitializeContext(self, args):\n if args is None:\n args = rdf_flow_runner.FlowRunnerArgs()\n output_plugins_states = []\n for plugin_descriptor in args.output_plugins:\n if not args.client_id:\n self.Log(\n \"Not initializing output plugin %s as flow does not run on \"\n \"the client.\", plugin_descriptor.plugin_name)\n continue\n plugin_class = plugin_descriptor.GetPluginClass()\n try:\n plugin, plugin_state = plugin_class.CreatePluginAndDefaultState(\n source_urn=self.flow_obj.output_urn,\n args=plugin_descriptor.plugin_args,\n token=self.token)\n plugin_state[\"logs\"] = []\n plugin_state[\"errors\"] = []\n output_plugins_states.append(\n rdf_flow_runner.OutputPluginState(\n plugin_state=plugin_state, plugin_descriptor=plugin_descriptor))\n except Exception as e:\n logging.exception(\"Plugin %s failed to initialize (%s), ignoring it.\",\n plugin, e)\n parent_creator = None\n if self.parent_runner:\n parent_creator = self.parent_runner.context.creator\n context = rdf_flow_runner.FlowContext(\n create_time=rdfvalue.RDFDatetime.Now(),\n creator=parent_creator or self.token.username,\n current_state=\"Start\",\n output_plugins_states=output_plugins_states,\n state=rdf_flow_runner.FlowContext.State.RUNNING,\n )\n return context", - "docstring": "Initializes the context of this flow." - }, - { - "code": "def call_api(self, method_type, method_name,\n valid_status_codes, resource, data,\n uid, **kwargs):\n url = resource.get_resource_url(\n resource, base_url=self.Meta.base_url\n )\n if method_type in SINGLE_RESOURCE_METHODS:\n if not uid and not kwargs:\n raise MissingUidException\n url = resource.get_url(\n url=url, uid=uid, **kwargs)\n params = {\n 'headers': self.get_http_headers(\n self.Meta.name, method_name, **kwargs),\n 'url': url\n }\n if method_type in ['POST', 'PUT', 'PATCH'] and isinstance(data, dict):\n params.update(json=data)\n prepared_request = self.prepare_http_request(\n method_type, params, **kwargs)\n response = self.session.send(prepared_request)\n return self._handle_response(response, valid_status_codes, resource)", - "docstring": "Make HTTP calls.\n\n Args:\n method_type: The HTTP method\n method_name: The name of the python method making the HTTP call\n valid_status_codes: A tuple of integer status codes\n deemed acceptable as response statuses\n resource: The resource class that will be generated\n data: The post data being sent.\n uid: The unique identifier of the resource.\n Returns:\n\n kwargs is a list of keyword arguments. Additional custom keyword\n arguments can be sent into this method and will be passed into\n subclass methods:\n\n - get_url\n - prepare_http_request\n - get_http_headers" - }, - { - "code": "def inter_data_operation(self, axis, func, other):\n if axis:\n partitions = self.row_partitions\n other_partitions = other.row_partitions\n else:\n partitions = self.column_partitions\n other_partitions = other.column_partitions\n func = self.preprocess_func(func)\n result = np.array(\n [\n partitions[i].apply(\n func,\n num_splits=self._compute_num_partitions(),\n other_axis_partition=other_partitions[i],\n )\n for i in range(len(partitions))\n ]\n )\n return self.__constructor__(result) if axis else self.__constructor__(result.T)", - "docstring": "Apply a function that requires two BaseFrameManager objects.\n\n Args:\n axis: The axis to apply the function over (0 - rows, 1 - columns)\n func: The function to apply\n other: The other BaseFrameManager object to apply func to.\n\n Returns:\n A new BaseFrameManager object, the type of object that called this." - }, - { - "code": "def csr_for_names(names, key):\n if len(names) == 0:\n raise ValueError('Must have at least one name')\n if len(names[0]) > 64:\n common_name = u'san.too.long.invalid'\n else:\n common_name = names[0]\n return (\n x509.CertificateSigningRequestBuilder()\n .subject_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, common_name)]))\n .add_extension(\n x509.SubjectAlternativeName(list(map(x509.DNSName, names))),\n critical=False)\n .sign(key, hashes.SHA256(), default_backend()))", - "docstring": "Generate a certificate signing request for the given names and private key.\n\n .. seealso:: `acme.client.Client.request_issuance`\n\n .. seealso:: `generate_private_key`\n\n :param ``List[str]``: One or more names (subjectAltName) for which to\n request a certificate.\n :param key: A Cryptography private key object.\n\n :rtype: `cryptography.x509.CertificateSigningRequest`\n :return: The certificate request message." - }, - { - "code": "def from_cap(cls, theta, lwin, clat=None, clon=None, nwin=None,\n theta_degrees=True, coord_degrees=True, dj_matrix=None,\n weights=None):\n if theta_degrees:\n tapers, eigenvalues, taper_order = _shtools.SHReturnTapers(\n _np.radians(theta), lwin)\n else:\n tapers, eigenvalues, taper_order = _shtools.SHReturnTapers(\n theta, lwin)\n return SHWindowCap(theta, tapers, eigenvalues, taper_order,\n clat, clon, nwin, theta_degrees, coord_degrees,\n dj_matrix, weights, copy=False)", - "docstring": "Construct spherical cap localization windows.\n\n Usage\n -----\n x = SHWindow.from_cap(theta, lwin, [clat, clon, nwin, theta_degrees,\n coord_degrees, dj_matrix, weights])\n\n Returns\n -------\n x : SHWindow class instance\n\n Parameters\n ----------\n theta : float\n Angular radius of the spherical cap localization domain (default\n in degrees).\n lwin : int\n Spherical harmonic bandwidth of the localization windows.\n clat, clon : float, optional, default = None\n Latitude and longitude of the center of the rotated spherical cap\n localization windows (default in degrees).\n nwin : int, optional, default (lwin+1)**2\n Number of localization windows.\n theta_degrees : bool, optional, default = True\n True if theta is in degrees.\n coord_degrees : bool, optional, default = True\n True if clat and clon are in degrees.\n dj_matrix : ndarray, optional, default = None\n The djpi2 rotation matrix computed by a call to djpi2.\n weights : ndarray, optional, default = None\n Taper weights used with the multitaper spectral analyses." - }, - { - "code": "def as_dictionary(self):\n values = {\n 'id': self._id,\n 'type': self._type\n }\n if self._owner:\n values['owner'] = self._owner\n return values", - "docstring": "Return the key as a python dictionary." - }, - { - "code": "def dict():\n default = defaultdict(list)\n for key, value in entries():\n default[key].append(value)\n return default", - "docstring": "Compatibility with NLTK.\n Returns the cmudict lexicon as a dictionary, whose keys are\n lowercase words and whose values are lists of pronunciations." - }, - { - "code": "def get_file(self, filename):\n try:\n return self.zip.read(filename)\n except KeyError:\n raise FileNotPresent(filename)", - "docstring": "Return the raw data of the specified filename\n inside the APK\n\n :rtype: bytes" - }, - { - "code": "def delay(self):\n invocation = time.monotonic()\n interval = invocation - self._last_invocation\n self._last_invocation = invocation\n if interval > self._reset_time:\n self._exp = 0\n self._exp = min(self._exp + 1, self._max)\n return self._randfunc(0, self._base * 2 ** self._exp)", - "docstring": "Compute the next delay\n\n Returns the next delay to wait according to the exponential\n backoff algorithm. This is a value between 0 and base * 2^exp\n where exponent starts off at 1 and is incremented at every\n invocation of this method up to a maximum of 10.\n\n If a period of more than base * 2^11 has passed since the last\n retry, the exponent is reset to 1." - }, - { - "code": "def codes_get_long_array(handle, key, size):\n values = ffi.new('long[]', size)\n size_p = ffi.new('size_t *', size)\n _codes_get_long_array(handle, key.encode(ENC), values, size_p)\n return list(values)", - "docstring": "Get long array values from a key.\n\n :param bytes key: the keyword whose value(s) are to be extracted\n\n :rtype: List(int)" - }, - { - "code": "def create_dataset(\n self,\n parent,\n dataset,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if \"create_dataset\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"create_dataset\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.create_dataset,\n default_retry=self._method_configs[\"CreateDataset\"].retry,\n default_timeout=self._method_configs[\"CreateDataset\"].timeout,\n client_info=self._client_info,\n )\n request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset)\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"parent\", parent)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n return self._inner_api_calls[\"create_dataset\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", - "docstring": "Creates a dataset.\n\n Example:\n >>> from google.cloud import automl_v1beta1\n >>>\n >>> client = automl_v1beta1.AutoMlClient()\n >>>\n >>> parent = client.location_path('[PROJECT]', '[LOCATION]')\n >>>\n >>> # TODO: Initialize `dataset`:\n >>> dataset = {}\n >>>\n >>> response = client.create_dataset(parent, dataset)\n\n Args:\n parent (str): The resource name of the project to create the dataset for.\n dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.automl_v1beta1.types.Dataset`\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will not\n be retried.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid." - }, - { - "code": "def list_all(prefix=None, app=None, owner=None, description_contains=None,\n name_not_contains=None, profile=\"splunk\"):\n client = _get_splunk(profile)\n name = \"splunk_search.list_all get defaults\"\n try:\n client.saved_searches.delete(name)\n except Exception:\n pass\n search = client.saved_searches.create(name, search=\"nothing\")\n defaults = dict(search.content)\n client.saved_searches.delete(name)\n readonly_keys = (\"triggered_alert_count\",\n \"action.email\",\n \"action.populate_lookup\",\n \"action.rss\",\n \"action.script\",\n \"action.summary_index\",\n \"qualifiedSearch\",\n \"next_scheduled_time\")\n results = OrderedDict()\n searches = sorted([(s.name, s) for s in client.saved_searches])\n for name, search in searches:\n if app and search.access.app != app:\n continue\n if owner and search.access.owner != owner:\n continue\n if name_not_contains and name_not_contains in name:\n continue\n if prefix:\n if name.startswith(prefix):\n continue\n name = prefix + name\n d = [{\"name\": name}]\n description = ''\n for (k, v) in sorted(search.content.items()):\n if k in readonly_keys:\n continue\n if k.startswith(\"display.\"):\n continue\n if not v:\n continue\n if k in defaults and defaults[k] == v:\n continue\n d.append({k: v})\n if k == 'description':\n description = v\n if description_contains and description_contains not in description:\n continue\n results[\"manage splunk search \" + name] = {\"splunk_search.present\": d}\n return salt.utils.yaml.safe_dump(results, default_flow_style=False, width=120)", - "docstring": "Get all splunk search details. Produces results that can be used to create\n an sls file.\n\n if app or owner are specified, results will be limited to matching saved\n searches.\n\n if description_contains is specified, results will be limited to those\n where \"description_contains in description\" is true if name_not_contains is\n specified, results will be limited to those where \"name_not_contains not in\n name\" is true.\n\n If prefix parameter is given, alarm names in the output will be prepended\n with the prefix; alarms that have the prefix will be skipped. This can be\n used to convert existing alarms to be managed by salt, as follows:\n\n CLI example:\n\n 1. Make a \"backup\" of all existing searches\n $ salt-call splunk_search.list_all --out=txt | sed \"s/local: //\" > legacy_searches.sls\n\n 2. Get all searches with new prefixed names\n $ salt-call splunk_search.list_all \"prefix=**MANAGED BY SALT** \" --out=txt | sed \"s/local: //\" > managed_searches.sls\n\n 3. Insert the managed searches into splunk\n $ salt-call state.sls managed_searches.sls\n\n 4. Manually verify that the new searches look right\n\n 5. Delete the original searches\n $ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls\n $ salt-call state.sls remove_legacy_searches.sls\n\n 6. Get all searches again, verify no changes\n $ salt-call splunk_search.list_all --out=txt | sed \"s/local: //\" > final_searches.sls\n $ diff final_searches.sls managed_searches.sls" - }, - { - "code": "def add(context, filenames):\n logging.info(_('Current Mode: Add Linux data'))\n context.obj['database'].add_data(filenames)\n sys.exit(0)", - "docstring": "Add data on Linux system calls.\n\n Arguments shall be *.tbl files from Linux x86 source code,\n or output from grep.\n\n Delete the old database before adding if necessary." - }, - { - "code": "def process_locals(self, node, node_visited, *skipped):\n local_vars = self.scope[node].difference(skipped)\n local_vars = local_vars.difference(self.openmp_deps)\n if not local_vars:\n return node_visited\n locals_visited = []\n for varname in local_vars:\n vartype = self.typeof(varname)\n decl = Statement(\"{} {}\".format(vartype, varname))\n locals_visited.append(decl)\n self.ldecls.difference_update(local_vars)\n return Block(locals_visited + [node_visited])", - "docstring": "Declare variable local to node and insert declaration before.\n\n Not possible for function yielding values." - }, - { - "code": "def visualize_embeddings(summary_writer, config):\n logdir = summary_writer.get_logdir()\n if logdir is None:\n raise ValueError('Summary writer must have a logdir')\n config_pbtxt = _text_format.MessageToString(config)\n path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME)\n with tf.io.gfile.GFile(path, 'w') as f:\n f.write(config_pbtxt)", - "docstring": "Stores a config file used by the embedding projector.\n\n Args:\n summary_writer: The summary writer used for writing events.\n config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`\n proto that holds the configuration for the projector such as paths to\n checkpoint files and metadata files for the embeddings. If\n `config.model_checkpoint_path` is none, it defaults to the\n `logdir` used by the summary_writer.\n\n Raises:\n ValueError: If the summary writer does not have a `logdir`." - }, - { - "code": "def call_cur(f):\n \"decorator for opening a connection and passing a cursor to the function\"\n @functools.wraps(f)\n def f2(self, *args, **kwargs):\n with self.withcur() as cur:\n return f(self, cur, *args, **kwargs)\n return f2", - "docstring": "decorator for opening a connection and passing a cursor to the function" - }, - { - "code": "def _name_exists(self, name):\n for i in range(self.count()):\n if self.tabText(i) == name:\n return True\n return False", - "docstring": "Checks if we already have an opened tab with the same name." - }, - { - "code": "def update(self, points, pointvol=0., rstate=None, bootstrap=0,\n pool=None, kdtree=None, mc_integrate=False):\n if rstate is None:\n rstate = np.random\n if pool is None:\n M = map\n else:\n M = pool.map\n if bootstrap == 0.:\n hsides = _friends_leaveoneout_radius(points, 'cubes')\n else:\n ps = [points for it in range(bootstrap)]\n ftypes = ['cubes' for it in range(bootstrap)]\n args = zip(ps, ftypes)\n hsides = list(M(_friends_bootstrap_radius, args))\n hsmax = max(hsides)\n self.hside = hsmax\n self.vol_cube = (2. * self.hside)**self.n\n self.expand = 1.\n if pointvol > 0.:\n v = pointvol\n if self.vol_cube < v:\n self.scale_to_vol(v)\n if mc_integrate:\n self.vol, self.funit = self.monte_carlo_vol(points, kdtree=kdtree,\n return_overlap=True)", - "docstring": "Update the half-side-lengths of our cubes.\n\n Parameters\n ----------\n points : `~numpy.ndarray` with shape (npoints, ndim)\n The set of points to bound.\n\n pointvol : float, optional\n The minimum volume associated with each point. Default is `0.`.\n\n rstate : `~numpy.random.RandomState`, optional\n `~numpy.random.RandomState` instance.\n\n bootstrap : int, optional\n The number of bootstrapped realizations of the ellipsoids. The\n maximum distance to the set of points \"left out\" during each\n iteration is used to enlarge the resulting volumes.\n Default is `0`.\n\n pool : user-provided pool, optional\n Use this pool of workers to execute operations in parallel.\n\n kdtree : `~scipy.spatial.KDTree`, optional\n K-D Tree used to perform nearest neighbor searches.\n\n mc_integrate : bool, optional\n Whether to use Monte Carlo methods to compute the effective\n volume and fractional overlap of the final union of balls\n with the unit cube. Default is `False`." - }, - { - "code": "def reset_placeholder_dropdown(cls, input_el):\n text = cls.get_placeholder_text(input_el)\n cls.set_placeholder_text(\n input_el=input_el,\n text=text.replace(cls._dropdown_text, \"\")\n )", - "docstring": "Reset the element back to default." - }, - { - "code": "def validate_signature(request, secret_key):\n data = request.GET.copy()\n if request.method != 'GET':\n message_body = getattr(request, request.method, {})\n data.update(message_body)\n if data.get('sig', False):\n sig = data['sig']\n del data['sig']\n else:\n return False\n if data.get('t', False):\n timestamp = int(data.get('t', False))\n del data['t']\n else:\n return False\n local_time = datetime.utcnow()\n remote_time = datetime.utcfromtimestamp(timestamp)\n if local_time > remote_time:\n delta = local_time - remote_time\n else: \n delta = remote_time - local_time\n if delta.seconds > 5 * 60:\n return False\n return sig == calculate_signature(secret_key, data, timestamp)", - "docstring": "Validates the signature associated with the given request." - }, - { - "code": "def set_azimuth_ticklabels(self, labels, fontdict=None, **kwargs):\n return self._polar.set_xticklabels(labels, fontdict, **kwargs)", - "docstring": "Sets the labels for the azimuthal ticks.\n\n Parameters\n ----------\n labels : A sequence of strings\n Azimuth tick labels\n **kwargs\n Additional parameters are text properties for the labels." - }, - { - "code": "def _get_more_data(self, file, timeout):\n timeout = datetime.timedelta(seconds=timeout)\n timer = Stopwatch()\n while timer.split() < timeout:\n data = file.read()\n if data:\n return data\n raise RuntimeError(\"Timeout\")", - "docstring": "Return data from the file, if available. If no data is received\n by the timeout, then raise RuntimeError." - }, - { - "code": "def _get_properties(path=\"\", method=\"GET\", forced_params=None):\n if api is None:\n _import_api()\n sub = api\n path_levels = [level for level in path.split('/') if level != '']\n search_path = ''\n props = []\n parameters = set([] if forced_params is None else forced_params)\n for elem in path_levels[:-1]:\n search_path += '/' + elem\n sub = (item for item in sub if item[\"path\"] == search_path).next()['children']\n search_path += '/' + path_levels[-1]\n sub = next((item for item in sub if item[\"path\"] == search_path))\n try:\n props = sub['info'][method]['parameters']['properties'].keys()\n except KeyError as exc:\n log.error('method not found: \"%s\"', exc)\n for prop in props:\n numerical = re.match(r'(\\w+)\\[n\\]', prop)\n if numerical:\n for i in range(10):\n parameters.add(numerical.group(1) + six.text_type(i))\n else:\n parameters.add(prop)\n return parameters", - "docstring": "Return the parameter list from api for defined path and HTTP method" - }, - { - "code": "def process_step(self, observation, reward, done, info):\n observation = self.process_observation(observation)\n reward = self.process_reward(reward)\n info = self.process_info(info)\n return observation, reward, done, info", - "docstring": "Processes an entire step by applying the processor to the observation, reward, and info arguments.\n\n # Arguments\n observation (object): An observation as obtained by the environment.\n reward (float): A reward as obtained by the environment.\n done (boolean): `True` if the environment is in a terminal state, `False` otherwise.\n info (dict): The debug info dictionary as obtained by the environment.\n\n # Returns\n The tupel (observation, reward, done, reward) with with all elements after being processed." - }, - { - "code": "def tasks(self, name):\n found = self[name]\n if isinstance(found, Shovel):\n return [v for _, v in found.items()]\n return [found]", - "docstring": "Get all the tasks that match a name" - }, - { - "code": "async def certify(client: Client, certification_signed_raw: str) -> ClientResponse:\n return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP)", - "docstring": "POST certification raw document\n\n :param client: Client to connect to the api\n :param certification_signed_raw: Certification raw document\n :return:" - }, - { - "code": "def to_struct(self):\n structobj = self.struct_type()\n for k in structobj.attributes():\n self.log.info(\"Setting attribute %s to %r\" % (k, getattr(self, k)))\n setattr(structobj, k, getattr(self, k))\n return structobj", - "docstring": "Initialize properties of the appropriate struct class from this model class." - }, - { - "code": "def rebuild(self):\r\n table = self.tableType()\r\n form = nativestring(self.filterFormat())\r\n if not table and form:\r\n if self.layout().count() == 0:\r\n self.layout().addWidget(QLabel(form, self))\r\n else:\r\n self.layout().itemAt(0).widget().setText(form)\r\n return\r\n elif not form:\r\n return\r\n for child in self.findChildren(QWidget):\r\n child.close()\r\n child.setParent(None)\r\n child.deleteLater()\r\n self.setUpdatesEnabled(False)\r\n schema = table.schema()\r\n vlayout = self.layout()\r\n for i in range(vlayout.count()):\r\n vlayout.takeAt(0)\r\n self._plugins = []\r\n for line in form.split('\\n'):\r\n row = QHBoxLayout()\r\n row.setContentsMargins(0, 0, 0, 0)\r\n row.setSpacing(0)\r\n for label, lookup in FORMAT_SPLITTER.findall(line):\r\n lbl = QLabel(label, self)\r\n row.addWidget(lbl)\r\n opts = lookup.split(':')\r\n if len(opts) == 1:\r\n opts.append('is')\r\n column = schema.column(opts[0])\r\n if not column:\r\n continue\r\n plugin = self.pluginFactory().plugin(column)\r\n if not plugin:\r\n continue\r\n editor = plugin.createEditor(self, column, opts[1], None)\r\n if editor:\r\n editor.setObjectName(opts[0])\r\n row.addWidget(editor)\r\n self._plugins.append((opts[0], opts[1], plugin, editor))\r\n row.addStretch(1)\r\n vlayout.addLayout(row)\r\n self.setUpdatesEnabled(True)\r\n self.adjustSize()", - "docstring": "Rebuilds the data associated with this filter widget." - }, - { - "code": "def _cli_main(args=None):\n arguments = _parse_arguments(args)\n _remove_none_values(arguments)\n verbosity = min(arguments.pop('verbose'), 4)\n levels = [logging.ERROR,\n logging.WARNING,\n logging.INFO,\n logging.DEBUG,\n TRACE_LEVEL]\n arguments.setdefault('debug_level', levels[verbosity])\n with open_tunnel(**arguments) as tunnel:\n if tunnel.is_alive:\n input_(\n)", - "docstring": "Pass input arguments to open_tunnel\n\n Mandatory: ssh_address, -R (remote bind address list)\n\n Optional:\n -U (username) we may gather it from SSH_CONFIG_FILE or current username\n -p (server_port), defaults to 22\n -P (password)\n -L (local_bind_address), default to 0.0.0.0:22\n -k (ssh_host_key)\n -K (private_key_file), may be gathered from SSH_CONFIG_FILE\n -S (private_key_password)\n -t (threaded), allow concurrent connections over tunnels\n -v (verbose), up to 3 (-vvv) to raise loglevel from ERROR to DEBUG\n -V (version)\n -x (proxy), ProxyCommand's IP:PORT, may be gathered from config file\n -c (ssh_config), ssh configuration file (defaults to SSH_CONFIG_FILE)\n -z (compress)\n -n (noagent), disable looking for keys from an Agent\n -d (host_pkey_directories), look for keys on these folders" - }, - { - "code": "def send_request(self, request):\n self.aggregate.wait_for_host(self.urlparts[1])\n kwargs = self.get_request_kwargs()\n kwargs[\"allow_redirects\"] = False\n self._send_request(request, **kwargs)", - "docstring": "Send request and store response in self.url_connection." - }, - { - "code": "def allow_ip(*ips: typing.Union[str, ipaddress.IPv4Network, ipaddress.IPv4Address]):\n for ip in ips:\n if isinstance(ip, ipaddress.IPv4Address):\n allowed_ips.add(ip)\n elif isinstance(ip, str):\n allowed_ips.add(ipaddress.IPv4Address(ip))\n elif isinstance(ip, ipaddress.IPv4Network):\n allowed_ips.update(ip.hosts())\n else:\n raise ValueError(f\"Bad type of ipaddress: {type(ip)} ('{ip}')\")", - "docstring": "Allow ip address.\n\n :param ips:\n :return:" - }, - { - "code": "def getChecks(self, **parameters):\n for key in parameters:\n if key not in ['limit', 'offset', 'tags']:\n sys.stderr.write('%s not a valid argument for getChecks()\\n'\n % key)\n response = self.request('GET', 'checks', parameters)\n return [PingdomCheck(self, x) for x in response.json()['checks']]", - "docstring": "Pulls all checks from pingdom\n\n Optional Parameters:\n\n * limit -- Limits the number of returned probes to the\n specified quantity.\n Type: Integer (max 25000)\n Default: 25000\n\n * offset -- Offset for listing (requires limit.)\n Type: Integer\n Default: 0\n\n * tags -- Filter listing by tag/s\n Type: String\n Default: None" - }, - { - "code": "def read_zmat(cls, inputfile, implicit_index=True):\n cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']\n if implicit_index:\n zmat_frame = pd.read_table(inputfile, comment='\n delim_whitespace=True,\n names=cols)\n zmat_frame.index = range(1, len(zmat_frame) + 1)\n else:\n zmat_frame = pd.read_table(inputfile, comment='\n delim_whitespace=True,\n names=['temp_index'] + cols)\n zmat_frame.set_index('temp_index', drop=True, inplace=True)\n zmat_frame.index.name = None\n if pd.isnull(zmat_frame.iloc[0, 1]):\n zmat_values = [1.27, 127., 127.]\n zmat_refs = [constants.int_label[x] for x in\n ['origin', 'e_z', 'e_x']]\n for row, i in enumerate(zmat_frame.index[:3]):\n cols = ['b', 'a', 'd']\n zmat_frame.loc[:, cols] = zmat_frame.loc[:, cols].astype('O')\n if row < 2:\n zmat_frame.loc[i, cols[row:]] = zmat_refs[row:]\n zmat_frame.loc[i, ['bond', 'angle', 'dihedral'][row:]\n ] = zmat_values[row:]\n else:\n zmat_frame.loc[i, 'd'] = zmat_refs[2]\n zmat_frame.loc[i, 'dihedral'] = zmat_values[2]\n elif zmat_frame.iloc[0, 1] in constants.int_label.keys():\n zmat_frame = zmat_frame.replace(\n {col: constants.int_label for col in ['b', 'a', 'd']})\n zmat_frame = cls._cast_correct_types(zmat_frame)\n try:\n Zmat = cls(zmat_frame)\n except InvalidReference:\n raise UndefinedCoordinateSystem(\n 'Your zmatrix cannot be transformed to cartesian coordinates')\n return Zmat", - "docstring": "Reads a zmat file.\n\n Lines beginning with ``#`` are ignored.\n\n Args:\n inputfile (str):\n implicit_index (bool): If this option is true the first column\n has to be the element symbols for the atoms.\n The row number is used to determine the index.\n\n Returns:\n Zmat:" - }, - { - "code": "def get_keyboard_mapping(self, first_keycode, count):\n r = request.GetKeyboardMapping(display = self.display,\n first_keycode = first_keycode,\n count = count)\n return r.keysyms", - "docstring": "Return the current keyboard mapping as a list of tuples,\n starting at first_keycount and no more than count." - }, - { - "code": "def recv_match(self, condition=None, type=None, blocking=False):\n if type is not None and not isinstance(type, list):\n type = [type]\n while True:\n m = self.recv_msg()\n if m is None:\n return None\n if type is not None and not m.get_type() in type:\n continue\n if not mavutil.evaluate_condition(condition, self.messages):\n continue\n return m", - "docstring": "recv the next message that matches the given condition\n type can be a string or a list of strings" - }, - { - "code": "def run(self, cmd, fn=None, globals=None, locals=None):\n if globals is None:\n import __main__\n globals = __main__.__dict__\n if locals is None:\n locals = globals\n self.reset()\n if isinstance(cmd, str):\n str_cmd = cmd\n cmd = compile(str_cmd, fn or \"\", \"exec\")\n self.compile_cache[id(cmd)] = str_cmd\n if fn:\n from linecache import getline\n lno = 1\n while True:\n line = getline(fn, lno, globals)\n if line is None:\n lno = None\n break\n if executable_line(line):\n break\n lno += 1\n self.start_trace()\n if lno is not None:\n self.breakpoints.add(LineBreakpoint(fn, lno, temporary=True))\n try:\n execute(cmd, globals, locals)\n finally:\n self.stop_trace()", - "docstring": "Run the cmd `cmd` with trace" - }, - { - "code": "def _deleteObject(self, xref):\n if self.isClosed:\n raise ValueError(\"operation illegal for closed doc\")\n return _fitz.Document__deleteObject(self, xref)", - "docstring": "Delete an object given its xref." - }, - { - "code": "def do_load(self, design, init=False):\n if design:\n filename = self._validated_config_filename(design)\n with open(filename, \"r\") as f:\n text = f.read()\n structure = json_decode(text)\n else:\n structure = {}\n attributes = structure.get(\"attributes\", structure)\n children = structure.get(\"children\", structure)\n name, mri, x, y, visible = [], [], [], [], []\n for part_name, d in attributes.get(\"layout\", {}).items():\n name.append(part_name)\n mri.append(\"\")\n x.append(d[\"x\"])\n y.append(d[\"y\"])\n visible.append(d[\"visible\"])\n self.set_layout(LayoutTable(name, mri, x, y, visible))\n source, export = [], []\n for source_name, export_name in attributes.get(\"exports\", {}).items():\n source.append(source_name)\n export.append(export_name)\n self.exports.set_value(ExportTable(source, export))\n our_values = {k: v for k, v in attributes.items()\n if k in self.our_config_attributes}\n block = self.block_view()\n block.put_attribute_values(our_values)\n self.run_hooks(\n LoadHook(p, c, children.get(p.name, {}), init)\n for p, c in self.create_part_contexts(only_visible=False).items())\n self._mark_clean(design, init)", - "docstring": "Load a design name, running the child LoadHooks.\n\n Args:\n design: Name of the design json file, without extension\n init: Passed to the LoadHook to tell the children if this is being\n run at Init or not" - }, - { - "code": "def clean_slug(self):\n source = self.cleaned_data.get('slug', '')\n lang_choice = self.language_code\n if not source:\n source = slugify(self.cleaned_data.get('title', ''))\n qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)\n used = list(qs.values_list('translations__slug', flat=True))\n slug = source\n i = 1\n while slug in used:\n slug = '%s-%s' % (source, i)\n i += 1\n return slug", - "docstring": "Generate a valid slug, in case the given one is taken" - }, - { - "code": "def has_cache(self):\n if not self.cache_enabled:\n return False\n if self._cache is None:\n self.build_cache()\n return True", - "docstring": "Intended to be called before any call that might access the\n cache. If the cache is not selected, then returns False,\n otherwise the cache is build if needed and returns True." - }, - { - "code": "def get_progressbar(self, total, **options):\n progressbar = ColoredProgressBar(total)\n progressbar.steps_label = 'Commit'\n progressbar.elements += ['eta', 'time']\n return progressbar", - "docstring": "Returns progress bar instance for a given ``total`` number of clicks\n it should do." - }, - { - "code": "def setConnStringForWindows():\n global _dbConnectString\n from peek_platform.file_config.PeekFileConfigABC import PeekFileConfigABC\n from peek_platform.file_config.PeekFileConfigSqlAlchemyMixin import \\\n PeekFileConfigSqlAlchemyMixin\n from peek_platform import PeekPlatformConfig\n class _WorkerTaskConfigMixin(PeekFileConfigABC,\n PeekFileConfigSqlAlchemyMixin):\n pass\n PeekPlatformConfig.componentName = peekWorkerName\n _dbConnectString = _WorkerTaskConfigMixin().dbConnectString", - "docstring": "Set Conn String for Windiws\n\n Windows has a different way of forking processes, which causes the\n @worker_process_init.connect signal not to work in \"CeleryDbConnInit\"" - }, - { - "code": "def deep_align(objects, join='inner', copy=True, indexes=None,\n exclude=frozenset(), raise_on_invalid=True):\n from .dataarray import DataArray\n from .dataset import Dataset\n if indexes is None:\n indexes = {}\n def is_alignable(obj):\n return isinstance(obj, (DataArray, Dataset))\n positions = []\n keys = []\n out = []\n targets = []\n no_key = object()\n not_replaced = object()\n for n, variables in enumerate(objects):\n if is_alignable(variables):\n positions.append(n)\n keys.append(no_key)\n targets.append(variables)\n out.append(not_replaced)\n elif is_dict_like(variables):\n for k, v in variables.items():\n if is_alignable(v) and k not in indexes:\n positions.append(n)\n keys.append(k)\n targets.append(v)\n out.append(OrderedDict(variables))\n elif raise_on_invalid:\n raise ValueError('object to align is neither an xarray.Dataset, '\n 'an xarray.DataArray nor a dictionary: %r'\n % variables)\n else:\n out.append(variables)\n aligned = align(*targets, join=join, copy=copy, indexes=indexes,\n exclude=exclude)\n for position, key, aligned_obj in zip(positions, keys, aligned):\n if key is no_key:\n out[position] = aligned_obj\n else:\n out[position][key] = aligned_obj\n assert all(arg is not not_replaced for arg in out)\n return out", - "docstring": "Align objects for merging, recursing into dictionary values.\n\n This function is not public API." - }, - { - "code": "def indexBy(self, val=None):\n if val is None:\n val = lambda *args: args[0]\n def by(result, key, value):\n result[key] = value\n res = self._group(self.obj, val, by)\n return self._wrap(res)", - "docstring": "Indexes the object's values by a criterion, similar to\n `groupBy`, but for when you know that your index values will be unique." - }, - { - "code": "def _SetupDatabase(host=None,\n port=None,\n user=None,\n password=None,\n database=None,\n client_key_path=None,\n client_cert_path=None,\n ca_cert_path=None):\n with contextlib.closing(\n _Connect(\n host=host,\n port=port,\n user=user,\n password=password,\n database=None,\n client_key_path=client_key_path,\n client_cert_path=client_cert_path,\n ca_cert_path=ca_cert_path)) as conn:\n with contextlib.closing(conn.cursor()) as cursor:\n try:\n cursor.execute(CREATE_DATABASE_QUERY.format(database))\n except MySQLdb.MySQLError as e:\n if e.args[0] != mysql_error_constants.DB_CREATE_EXISTS:\n raise\n cursor.execute(\"USE {}\".format(database))\n _CheckCollation(cursor)\n def _MigrationConnect():\n return _Connect(\n host=host,\n port=port,\n user=user,\n password=password,\n database=database,\n client_key_path=client_key_path,\n client_cert_path=client_cert_path,\n ca_cert_path=ca_cert_path)\n mysql_migration.ProcessMigrations(_MigrationConnect,\n config.CONFIG[\"Mysql.migrations_dir\"])", - "docstring": "Connect to the given MySQL host and create a utf8mb4_unicode_ci database.\n\n Args:\n host: The hostname to connect to.\n port: The port to connect to.\n user: The username to connect as.\n password: The password to connect with.\n database: The database name to create.\n client_key_path: The path of the client private key file.\n client_cert_path: The path of the client public key certificate file.\n ca_cert_path: The path of the Certificate Authority (CA) certificate file." - }, - { - "code": "def as_object_version(value):\n return value if isinstance(value, ObjectVersion) \\\n else ObjectVersion.query.filter_by(version_id=value).one_or_none()", - "docstring": "Get an object version object from an object version ID or an object version.\n\n :param value: A :class:`invenio_files_rest.models.ObjectVersion` or an\n object version ID.\n :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance." - }, - { - "code": "def image_post_delete_handler(sender, instance, **kwargs):\n for f in glob.glob('{}/{}*'.format(instance.image.storage.location,\n instance.image.name)):\n if not os.path.isdir(f):\n instance.image.storage.delete(f)", - "docstring": "Makes sure that a an image is also deleted from the media directory.\n\n This should prevent a load of \"dead\" image files on disc." - }, - { - "code": "def output_sizes(self):\n return tuple([l() if callable(l) else l for l in self._output_sizes])", - "docstring": "Returns a tuple of all output sizes of all the layers." - }, - { - "code": "async def items(self, name=None, *, watch=None):\n path = \"/v1/event/list\"\n params = {\"name\": name}\n response = await self._api.get(path, params=params, watch=watch)\n results = [format_event(data) for data in response.body]\n return consul(results, meta=extract_meta(response.headers))", - "docstring": "Lists the most recent events an agent has seen\n\n Parameters:\n name (str): Filter events by name.\n watch (Blocking): Do a blocking query\n Returns:\n CollectionMeta: where value is a list of events\n\n It returns a JSON body like this::\n\n [\n {\n \"ID\": \"b54fe110-7af5-cafc-d1fb-afc8ba432b1c\",\n \"Name\": \"deploy\",\n \"Payload\": bytes(\"abcd\"),\n \"NodeFilter\": re.compile(\"node-\\d+\"),\n \"ServiceFilter\": \"\",\n \"TagFilter\": \"\",\n \"Version\": 1,\n \"LTime\": 19\n },\n ...\n ]" - }, - { - "code": "def get_recommendation_store(self):\n return RedisStore(self.config['host'],\n self.config['port'],\n self.config['db'],\n self.config['prefix'])", - "docstring": "Get the configured recommendation store." - }, - { - "code": "def shear_matrix(angle, direction, point, normal):\n normal = unit_vector(normal[:3])\n direction = unit_vector(direction[:3])\n if abs(np.dot(normal, direction)) > 1e-6:\n raise ValueError(\"direction and normal vectors are not orthogonal\")\n angle = math.tan(angle)\n M = np.identity(4)\n M[:3, :3] += angle * np.outer(direction, normal)\n M[:3, 3] = -angle * np.dot(point[:3], normal) * direction\n return M", - "docstring": "Return matrix to shear by angle along direction vector on shear plane.\n\n The shear plane is defined by a point and normal vector. The direction\n vector must be orthogonal to the plane's normal vector.\n\n A point P is transformed by the shear matrix into P\" such that\n the vector P-P\" is parallel to the direction vector and its extent is\n given by the angle of P-P'-P\", where P' is the orthogonal projection\n of P onto the shear plane.\n\n >>> angle = (random.random() - 0.5) * 4*math.pi\n >>> direct = np.random.random(3) - 0.5\n >>> point = np.random.random(3) - 0.5\n >>> normal = np.cross(direct, np.random.random(3))\n >>> S = shear_matrix(angle, direct, point, normal)\n >>> np.allclose(1, np.linalg.det(S))\n True" - }, - { - "code": "def remove(self, fact):\n token = Token.invalid(fact)\n MATCHER.debug(\" added %r\", token)\n for child in self.children:\n child.callback(token)", - "docstring": "Create an INVALID token and send it to all children." - }, - { - "code": "def palette(fg, bg=-1):\n if not hasattr(palette, \"counter\"):\n palette.counter = 1\n if not hasattr(palette, \"selections\"):\n palette.selections = {}\n selection = \"%s%s\" % (str(fg), str(bg))\n if not selection in palette.selections:\n palette.selections[selection] = palette.counter\n palette.counter += 1\n colors = [c for c in dir(_curses) if c.startswith('COLOR')]\n if isinstance(fg, str):\n if not \"COLOR_\"+fg.upper() in colors:\n fg = -1\n else:\n fg = getattr(_curses, \"COLOR_\"+fg.upper())\n if isinstance(bg, str):\n if not \"COLOR_\"+bg.upper() in colors:\n bg = -1\n else:\n bg = getattr(_curses, \"COLOR_\"+bg.upper())\n _curses.init_pair(palette.selections[selection], fg, bg)\n return _curses.color_pair(palette.selections[selection])", - "docstring": "Since curses only supports a finite amount of initialised colour pairs\n we memoise any selections you've made as an attribute on this function" - }, - { - "code": "def protein_statistics(self):\n d = {}\n d['id'] = self.id\n d['sequences'] = [x.id for x in self.sequences]\n d['num_sequences'] = self.num_sequences\n if self.representative_sequence:\n d['representative_sequence'] = self.representative_sequence.id\n d['repseq_gene_name'] = self.representative_sequence.gene_name\n d['repseq_uniprot'] = self.representative_sequence.uniprot\n d['repseq_description'] = self.representative_sequence.description\n d['num_structures'] = self.num_structures\n d['experimental_structures'] = [x.id for x in self.get_experimental_structures()]\n d['num_experimental_structures'] = self.num_structures_experimental\n d['homology_models'] = [x.id for x in self.get_homology_models()]\n d['num_homology_models'] = self.num_structures_homology\n if self.representative_structure:\n d['representative_structure'] = self.representative_structure.id\n d['representative_chain'] = self.representative_chain\n d['representative_chain_seq_coverage'] = self.representative_chain_seq_coverage\n d['repstruct_description'] = self.description\n if self.representative_structure.is_experimental:\n d['repstruct_resolution'] = self.representative_structure.resolution\n d['num_sequence_alignments'] = len(self.sequence_alignments)\n d['num_structure_alignments'] = len(self.structure_alignments)\n return d", - "docstring": "Get a dictionary of basic statistics describing this protein" - }, - { - "code": "def parse_optimization_expression(obj, linear=True, quadratic=False, expression=None, **kwargs):\n if expression is None:\n expression = obj.expression\n if not (linear or quadratic):\n if obj.is_Linear:\n linear = True\n elif obj.is_Quadratic:\n quadratic = True\n else:\n raise ValueError(\"Expression is not linear or quadratic. Other expressions are not currently supported.\")\n assert linear or quadratic\n if quadratic:\n offset, linear_coefficients, quadratic_coefficients = _parse_quadratic_expression(expression, **kwargs)\n else:\n offset, linear_coefficients = _parse_linear_expression(expression, **kwargs)\n quadratic_coefficients = {}\n return offset, linear_coefficients, quadratic_coefficients", - "docstring": "Function for parsing the expression of a Constraint or Objective object.\n\n Parameters\n ----------\n object: Constraint or Objective\n The optimization expression to be parsed\n linear: Boolean\n If True the expression will be assumed to be linear\n quadratic: Boolean\n If True the expression will be assumed to be quadratic\n expression: Sympy expression or None (optional)\n An expression can be passed explicitly to avoid getting the expression from the solver.\n If this is used then 'linear' or 'quadratic' should be True.\n\n If both linear and quadratic are False, the is_Linear and is_Quadratic methods will be used to determine how it should be parsed\n\n Returns\n ----------\n A tuple of (linear_coefficients, quadratic_coefficients)\n linear_coefficients is a dictionary of {variable: coefficient} pairs\n quadratic_coefficients is a dictionary of {frozenset(variables): coefficient} pairs" - }, - { - "code": "def add_child_repository(self, repository_id, child_id):\n if self._catalog_session is not None:\n return self._catalog_session.add_child_catalog(catalog_id=repository_id, child_id=child_id)\n return self._hierarchy_session.add_child(id_=repository_id, child_id=child_id)", - "docstring": "Adds a child to a repository.\n\n arg: repository_id (osid.id.Id): the ``Id`` of a repository\n arg: child_id (osid.id.Id): the ``Id`` of the new child\n raise: AlreadyExists - ``repository_id`` is already a parent of\n ``child_id``\n raise: NotFound - ``repository_id`` or ``child_id`` not found\n raise: NullArgument - ``repository_id`` or ``child_id`` is\n ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def tagged(self, tag, offset=0, count=25):\n return json.loads(self.client('tag', 'get', tag, offset, count))", - "docstring": "Return the paginated jids of jobs tagged with a tag" - }, - { - "code": "def network_lopf_prepare_solver(network, solver_name=\"glpk\", solver_io=None):\n network.opt = SolverFactory(solver_name, solver_io=solver_io)\n patch_optsolver_record_memusage_before_solving(network.opt, network)\n if isinstance(network.opt, PersistentSolver):\n network.opt.set_instance(network.model)\n return network.opt", - "docstring": "Prepare solver for linear optimal power flow.\n\n Parameters\n ----------\n solver_name : string\n Must be a solver name that pyomo recognises and that is\n installed, e.g. \"glpk\", \"gurobi\"\n solver_io : string, default None\n Solver Input-Output option, e.g. \"python\" to use \"gurobipy\" for\n solver_name=\"gurobi\"\n\n Returns\n -------\n None" - }, - { - "code": "def _CreateConfig(self, project_id):\n project_id = project_id or self._GetNumericProjectId()\n if not project_id:\n return\n self.boto_config_header %= (\n self.boto_config_script, self.boto_config_template)\n config = config_manager.ConfigManager(\n config_file=self.boto_config_template,\n config_header=self.boto_config_header)\n boto_dir = os.path.dirname(self.boto_config_script)\n config.SetOption('GSUtil', 'default_project_id', project_id)\n config.SetOption('GSUtil', 'default_api_version', '2')\n config.SetOption('GoogleCompute', 'service_account', 'default')\n config.SetOption('Plugin', 'plugin_directory', boto_dir)\n config.WriteConfig(config_file=self.boto_config)", - "docstring": "Create the boto config to support standalone GSUtil.\n\n Args:\n project_id: string, the project ID to use in the config file." - }, - { - "code": "def coord(self):\n if self._coordinate is None:\n self._coordinate = SkyCoord(self.origin.ra, self.origin.dec + 45 * units.arcsec)\n return self._coordinate", - "docstring": "The center of the camera pointing in sky coordinates" - }, - { - "code": "def set_log_level(self):\n if self.options.debug:\n self.logger.setLevel(logging.DEBUG)\n elif self.options.quiet:\n self.logger.setLevel(logging.ERROR)\n else:\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(logging.StreamHandler())\n return self.logger", - "docstring": "Set log level according to command-line options\n\n @returns: logger object" - }, - { - "code": "def stretch(self, scale_factor, callback=True):\n self.scale_pct *= scale_factor\n self.scale_and_shift(self.scale_pct, 0.0, callback=callback)", - "docstring": "Stretch the color map via altering the shift map." - }, - { - "code": "def merge_down_loci(self):\n old_locus_size = -1\n z = 0\n while len(self.loci) != old_locus_size:\n z+=1\n old_locus_size = len(self.loci)\n locus_size = len(self.loci)\n if self.verbose:\n sys.stderr.write(str(locus_size)+\" Combining down loci step \"+str(z)+\" \\r\")\n combined = set()\n for i in range(0,locus_size):\n if i in combined: continue\n for j in range(i+1,locus_size):\n if self.loci[i].range.overlaps_with_padding(self.loci[j].range,self.overhang):\n if self.use_direction and self.loci[i].range.direction != self.loci[j].range.direction: continue\n for obj in self.loci[j].members:\n self.loci[i].add_member(obj)\n combined.add(j)\n break\n newloci = []\n for i in range(0,locus_size):\n if i not in combined:\n newloci.append(self.loci[i])\n self.loci = newloci\n if self.verbose:\n sys.stderr.write(\"Finished combining down \"+str(len(self.loci))+\" loci in \"+str(z)+\" steps \\n\")\n return", - "docstring": "Called internally to make loci overlapping into one set" - }, - { - "code": "def _next_partition(self, topic, key=None):\n while self.client.metadata_error_for_topic(topic):\n if self._req_attempts >= self._max_attempts:\n _check_error(self.client.metadata_error_for_topic(topic))\n yield self.client.load_metadata_for_topics(topic)\n if not self.client.metadata_error_for_topic(topic):\n break\n self._req_attempts += 1\n d = Deferred()\n self.client.reactor.callLater(\n self._retry_interval, d.callback, True)\n self._retry_interval *= self.RETRY_INTERVAL_FACTOR\n yield d\n partitions = self.client.topic_partitions[topic]\n if topic not in self.partitioners:\n self.partitioners[topic] = \\\n self.partitioner_class(topic, partitions)\n partition = self.partitioners[topic].partition(key, partitions)\n returnValue(partition)", - "docstring": "get the next partition to which to publish\n\n Check with our client for the latest partitions for the topic, then\n ask our partitioner for the next partition to which we should publish\n for the give key. If needed, create a new partitioner for the topic." - }, - { - "code": "def per_implementation_data(self):\n ret = {}\n for cache_date in self.cache_dates:\n data = self._cache_get(cache_date)\n ret[cache_date] = {}\n for impl_name, impl_data in data['by_implementation'].items():\n for impl_ver, count in impl_data.items():\n k = self._compound_column_value(\n impl_name,\n self._shorten_version(impl_ver)\n )\n ret[cache_date][k] = count\n if len(ret[cache_date]) == 0:\n ret[cache_date]['unknown'] = 0\n return ret", - "docstring": "Return download data by python impelementation name and version.\n\n :return: dict of cache data; keys are datetime objects, values are\n dict of implementation name/version (str) to count (int).\n :rtype: dict" - }, - { - "code": "def condense(input_string):\n try:\n assert isinstance(input_string, basestring)\n except AssertionError:\n raise TypeError\n removed_leading_whitespace = re.sub('>\\s+', '>', input_string).strip()\n removed_trailing_whitespace = re.sub('\\s+<', '<', removed_leading_whitespace).strip()\n return removed_trailing_whitespace", - "docstring": "Trims leadings and trailing whitespace between tags in an html document\n\n Args:\n input_string: A (possible unicode) string representing HTML.\n\n Returns:\n A (possibly unicode) string representing HTML.\n\n Raises:\n TypeError: Raised if input_string isn't a unicode string or string." - }, - { - "code": "def _inference_tip_cached(func, instance, args, kwargs, _cache={}):\n node = args[0]\n try:\n return iter(_cache[func, node])\n except KeyError:\n result = func(*args, **kwargs)\n original, copy = itertools.tee(result)\n _cache[func, node] = list(copy)\n return original", - "docstring": "Cache decorator used for inference tips" - }, - { - "code": "def get(self, path, params=None, headers=None):\n response = requests.get(\n self._url_for(path),\n params=params,\n headers=self._headers(headers)\n )\n self._handle_errors(response)\n return response", - "docstring": "Perform a GET request, optionally providing query-string params.\n\n Args:\n path (str): A path that gets appended to ``base_url``.\n params (dict, optional): Dictionary of param names to values.\n\n Example:\n api_client.get('/users', params={'active': True})\n\n Returns:\n A requests ``Response`` object." - }, - { - "code": "def _ReadEventDataIntoEvent(self, event):\n if self._storage_type != definitions.STORAGE_TYPE_SESSION:\n return\n event_data_identifier = event.GetEventDataIdentifier()\n if event_data_identifier:\n lookup_key = event_data_identifier.CopyToString()\n event_data = self._event_data[lookup_key]\n for attribute_name, attribute_value in event_data.GetAttributes():\n setattr(event, attribute_name, attribute_value)", - "docstring": "Reads the data into the event.\n\n This function is intended to offer backwards compatible event behavior.\n\n Args:\n event (EventObject): event." - }, - { - "code": "def log(self, _from=None, to=None):\n command = [\"git\", \"log\"]\n if _from:\n to = \"HEAD\" if not to else to\n revision_range = f\"{_from}..{to}\"\n command.append(revision_range)\n git_log_text = _run_command(command)\n commit_text_lst = _extract_commit_texts(git_log_text)\n return [Commit(commit_text) for commit_text in commit_text_lst]", - "docstring": "Run git-log." - }, - { - "code": "def join(input_files, output_file):\n final_features = []\n for file in input_files:\n with open(file) as f:\n feat_collection = geojson.load(f)\n final_features += feat_collection['features']\n feat_collection['features'] = final_features\n with open(output_file, 'w') as f:\n geojson.dump(feat_collection, f)", - "docstring": "Join geojsons into one. The spatial reference system of the output file is the same\n as the one of the last file in the list.\n\n Args:\n input_files (list): List of file name strings.\n output_file (str): Output file name." - }, - { - "code": "def encode_many(chord_labels, reduce_extended_chords=False):\n num_items = len(chord_labels)\n roots, basses = np.zeros([2, num_items], dtype=np.int)\n semitones = np.zeros([num_items, 12], dtype=np.int)\n local_cache = dict()\n for i, label in enumerate(chord_labels):\n result = local_cache.get(label, None)\n if result is None:\n result = encode(label, reduce_extended_chords)\n local_cache[label] = result\n roots[i], semitones[i], basses[i] = result\n return roots, semitones, basses", - "docstring": "Translate a set of chord labels to numerical representations for sane\n evaluation.\n\n Parameters\n ----------\n chord_labels : list\n Set of chord labels to encode.\n reduce_extended_chords : bool\n Whether to map the upper voicings of extended chords (9's, 11's, 13's)\n to semitone extensions.\n (Default value = False)\n\n Returns\n -------\n root_number : np.ndarray, dtype=int\n Absolute semitone of the chord's root.\n interval_bitmap : np.ndarray, dtype=int\n 12-dim vector of relative semitones in the given chord quality.\n bass_number : np.ndarray, dtype=int\n Relative semitones of the chord's bass notes." - }, - { - "code": "def update_incoming(self, incoming_id, incoming_dict):\n return self._create_put_request(resource=INCOMINGS, billomat_id=incoming_id, send_data=incoming_dict)", - "docstring": "Updates an incoming\n\n :param incoming_id: the incoming id\n :param incoming_dict: dict\n :return: dict" - }, - { - "code": "def autoschema(self, objects, **kwargs):\n return autoschema(objects=objects, exclude_keys=self.RESTRICTED_KEYS,\n **kwargs)", - "docstring": "wrapper around utils.autoschema function" - }, - { - "code": "def connect_to_pipe(pipe_name):\n pipe_handle = windll.kernel32.CreateFileW(\n pipe_name,\n DWORD(GENERIC_READ | GENERIC_WRITE | FILE_WRITE_ATTRIBUTES),\n DWORD(0),\n None,\n DWORD(OPEN_EXISTING),\n FILE_FLAG_OVERLAPPED,\n None\n )\n if pipe_handle == INVALID_HANDLE_VALUE:\n raise Exception('Invalid handle. Connecting to pipe %r failed.' % pipe_name)\n dwMode = DWORD(PIPE_READMODE_MESSAGE)\n windll.kernel32.SetNamedPipeHandleState(\n pipe_handle,\n byref(dwMode),\n None,\n None)\n return pipe_handle", - "docstring": "Connect to a new pipe in message mode." - }, - { - "code": "def read_stack(stack, plyt=None, laminaprop=None, rho=None, plyts=None, laminaprops=None,\n rhos=None, offset=0., calc_scf=True):\n lam = Laminate()\n lam.offset = offset\n lam.stack = stack\n if plyts is None:\n if plyt is None:\n raise ValueError('plyt or plyts must be supplied')\n else:\n plyts = [plyt for i in stack]\n if laminaprops is None:\n if laminaprop is None:\n raise ValueError('laminaprop or laminaprops must be supplied')\n else:\n laminaprops = [laminaprop for i in stack]\n if rhos is None:\n rhos = [rho for i in stack]\n lam.plies = []\n for plyt, laminaprop, theta, rho in zip(plyts, laminaprops, stack, rhos):\n laminaprop = laminaprop\n ply = Lamina()\n ply.theta = float(theta)\n ply.h = plyt\n ply.matobj = read_laminaprop(laminaprop, rho)\n lam.plies.append(ply)\n lam.rebuild()\n lam.calc_constitutive_matrix()\n if calc_scf:\n lam.calc_scf()\n return lam", - "docstring": "Read a laminate stacking sequence data.\n\n An ``Laminate`` object is returned based on the inputs given.\n\n Parameters\n ----------\n stack : list\n Angles of the stacking sequence in degrees.\n plyt : float, optional\n When all plies have the same thickness, ``plyt`` can be supplied.\n laminaprop : tuple, optional\n When all plies have the same material properties, ``laminaprop``\n can be supplied.\n rho : float, optional\n Uniform material density to be used for all plies.\n plyts : list, optional\n A list of floats with the thickness of each ply.\n laminaprops : list, optional\n A list of tuples with a laminaprop for each ply.\n rhos : list, optional\n A list of floats with the material density of each ply.\n offset : float, optional\n Offset along the normal axis about the mid-surface, which influences\n the laminate properties.\n calc_scf : bool, optional\n If True, use :method:`.Laminate.calc_scf` to compute shear correction\n factors, otherwise the default value of 5/6 is used\n\n Notes\n -----\n ``plyt`` or ``plyts`` must be supplied\n ``laminaprop`` or ``laminaprops`` must be supplied\n\n For orthotropic plies, the ``laminaprop`` should be::\n\n laminaprop = (E11, E22, nu12, G12, G13, G23)\n\n For isotropic pliey, the ``laminaprop`` should be::\n\n laminaprop = (E, E, nu)" - }, - { - "code": "def change_combo_val(self, new_val):\n choice_obj = self._intern.value\n assert isinstance(self._intern.value, PrefChoice), 'must be a choice'\n return choice_obj.get_tuple()", - "docstring": "Checks to see if a selection is a valid index or choice of a combo\n preference" - }, - { - "code": "def value_check(arg_name, pos, allowed_values):\n def decorator(fn):\n def logic(*args, **kwargs):\n arg_count = len(args)\n if arg_count:\n if pos < arg_count:\n if args[pos] in allowed_values:\n return fn(*args, **kwargs)\n else:\n raise ValueError(\n \"'{0}' at position {1} not in allowed values {2}\".format(args[pos], pos, allowed_values))\n else:\n if arg_name in kwargs:\n value = kwargs[arg_name]\n if value in allowed_values:\n return fn(*args, **kwargs)\n else:\n raise ValueError(\"'{0}' is not an allowed kwarg\".format(arg_name))\n else:\n return fn(*args, **kwargs)\n else:\n if arg_name in kwargs:\n value = kwargs[arg_name]\n if value in allowed_values:\n return fn(*args, **kwargs)\n else:\n raise ValueError(\"'{0}' is not an allowed kwarg\".format(arg_name))\n return logic\n return decorator", - "docstring": "allows value checking at runtime for args or kwargs" - }, - { - "code": "def to_fastq_apipe_cl(sdf_file, start=None, end=None):\n cmd = [\"rtg\", \"sdf2fastq\", \"--no-gzip\", \"-o\", \"-\"]\n if start is not None:\n cmd += [\"--start-id=%s\" % start]\n if end is not None:\n cmd += [\"--end-id=%s\" % end]\n if is_paired(sdf_file):\n out = []\n for ext in [\"left\", \"right\"]:\n out.append(\"<(%s)\" % _rtg_cmd(cmd + [\"-i\", os.path.join(sdf_file, ext)]))\n return out\n else:\n cmd += [\"-i\", sdf_file]\n return [\"<(%s)\" % _rtg_cmd(cmd), None]", - "docstring": "Return a command lines to provide streaming fastq input.\n\n For paired end, returns a forward and reverse command line. For\n single end returns a single command line and None for the pair." - }, - { - "code": "def send_stderr(cls, sock, payload):\n cls.write_chunk(sock, ChunkType.STDERR, payload)", - "docstring": "Send the Stderr chunk over the specified socket." - }, - { - "code": "def profile_update_schema(profile):\n if profile.get('autoclear') is None:\n print(\n '{}{}Profile Update: Adding new \"autoclear\" parameter.'.format(\n c.Style.BRIGHT, c.Fore.YELLOW\n )\n )\n profile['autoclear'] = True\n for validation in profile.get('validations') or []:\n if validation.get('data_type') is None:\n print(\n '{}{}Profile Update: Adding new \"data_type\" parameter.'.format(\n c.Style.BRIGHT, c.Fore.YELLOW\n )\n )\n validation['data_type'] = 'redis'\n if profile.get('install_json') is not None and profile.get('script') is not None:\n print(\n '{}{}Removing deprecated \"script\" parameter.'.format(c.Style.BRIGHT, c.Fore.YELLOW)\n )\n profile.pop('script')", - "docstring": "Update profile to latest schema.\n\n Args:\n profile (dict): The dictionary containting the profile settings." - }, - { - "code": "def total_supply(self, block_identifier='latest'):\n return self.proxy.contract.functions.totalSupply().call(block_identifier=block_identifier)", - "docstring": "Return the total supply of the token at the given block identifier." - }, - { - "code": "def write(self, file_path, hoys=None, write_hours=False):\n if not file_path.lower().endswith('.wea'):\n file_path += '.wea'\n full_wea = False\n if not hoys:\n hoys = self.hoys\n full_wea = True\n lines = [self.header]\n if full_wea:\n for dir_rad, dif_rad, dt in zip(self.direct_normal_irradiance,\n self.diffuse_horizontal_irradiance,\n self.datetimes):\n line = \"%d %d %.3f %d %d\\n\" \\\n % (dt.month, dt.day, dt.float_hour, dir_rad, dif_rad)\n lines.append(line)\n else:\n for hoy in hoys:\n try:\n dir_rad, dif_rad = self.get_irradiance_value_for_hoy(hoy)\n except IndexError:\n print('Warn: Wea data for {} is not available!'.format(dt))\n continue\n dt = DateTime.from_hoy(hoy)\n dt = dt.add_minute(30) if self.timestep == 1 else dt\n line = \"%d %d %.3f %d %d\\n\" \\\n % (dt.month, dt.day, dt.float_hour, dir_rad, dif_rad)\n lines.append(line)\n file_data = ''.join(lines)\n write_to_file(file_path, file_data, True)\n if write_hours:\n hrs_file_path = file_path[:-4] + '.hrs'\n hrs_data = ','.join(str(h) for h in hoys) + '\\n'\n write_to_file(hrs_file_path, hrs_data, True)\n return file_path", - "docstring": "Write the wea file.\n\n WEA carries irradiance values from epw and is what gendaymtx uses to\n generate the sky." - }, - { - "code": "def blend(self, proportion=0.2, stratify=False, seed=100, indices=None):\n if self.use_cache:\n pdict = {'proportion': proportion, 'stratify': stratify, 'seed': seed, 'indices': indices}\n if indices is not None:\n pdict['train_index'] = np_hash(indices[0])\n pdict['test_index'] = np_hash(indices[1])\n dhash = self._dhash(pdict)\n c = Cache(dhash, prefix='b')\n if c.available:\n logger.info('Loading %s\\'s blend results from cache.' % self._name)\n train = c.retrieve('train')\n test = c.retrieve('test')\n y_train = c.retrieve('y_train')\n return Dataset(X_train=train, y_train=y_train, X_test=test)\n elif not self.dataset.loaded:\n self.dataset.load()\n X_train, y_train, X_test, y_test = self.dataset.split(test_size=proportion, stratify=stratify,\n seed=seed, indices=indices)\n xt_shape = X_test.shape[0]\n x_t = concat(X_test, self.dataset.X_test)\n prediction_concat = reshape_1d(self._predict(X_train, y_train, x_t))\n new_train, new_test = tsplit(prediction_concat, xt_shape)\n if self.use_cache:\n c.store('train', new_train)\n c.store('test', new_test)\n c.store('y_train', y_test)\n return Dataset(new_train, y_test, new_test)", - "docstring": "Blend a single model.\n You should rarely be using this method. Use `ModelsPipeline.blend` instead.\n\n Parameters\n ----------\n proportion : float, default 0.2\n Test size holdout.\n stratify : bool, default False\n seed : int, default 100\n indices : list(np.ndarray,np.ndarray), default None\n Two numpy arrays that contain indices for train/test slicing. (train_index,test_index)\n\n Returns\n -------\n `Dataset`" - }, - { - "code": "def ListHuntApprovals(context=None):\n items = context.SendIteratorRequest(\"ListHuntApprovals\",\n hunt_pb2.ApiListHuntApprovalsArgs())\n def MapHuntApproval(data):\n return HuntApproval(data=data, username=context.username, context=context)\n return utils.MapItemsIterator(MapHuntApproval, items)", - "docstring": "List all hunt approvals belonging to requesting user." - }, - { - "code": "def _login_request(self, username=None, secret=None):\n url = 'http://' + self._host + '/login_sid.lua'\n params = {}\n if username:\n params['username'] = username\n if secret:\n params['response'] = secret\n plain = self._request(url, params)\n dom = xml.dom.minidom.parseString(plain)\n sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)\n challenge = get_text(\n dom.getElementsByTagName('Challenge')[0].childNodes)\n return (sid, challenge)", - "docstring": "Send a login request with paramerters." - }, - { - "code": "def add(self, src):\n if not audio.get_type(src):\n raise TypeError('The type of this file is not supported.')\n return super().add(src)", - "docstring": "store an audio file to storage dir\n\n :param src: audio file path\n :return: checksum value" - }, - { - "code": "def get_isa(self, oneq_type: str = 'Xhalves',\n twoq_type: str = 'CZ') -> ISA:\n return self.device.get_isa(oneq_type=oneq_type, twoq_type=twoq_type)", - "docstring": "Return a target ISA for this QuantumComputer's device.\n\n See :py:func:`AbstractDevice.get_isa` for more.\n\n :param oneq_type: The family of one-qubit gates to target\n :param twoq_type: The family of two-qubit gates to target" - }, - { - "code": "def all(self, *, collection, attribute, word, func=None, operation=None):\n return self.iterable('all', collection=collection, attribute=attribute,\n word=word, func=func, operation=operation)", - "docstring": "Performs a filter with the OData 'all' keyword on the collection\n\n For example:\n q.any(collection='email_addresses', attribute='address',\n operation='eq', word='george@best.com')\n\n will transform to a filter such as:\n\n emailAddresses/all(a:a/address eq 'george@best.com')\n\n :param str collection: the collection to apply the any keyword on\n :param str attribute: the attribute of the collection to check\n :param str word: the word to check\n :param str func: the logical function to apply to the attribute\n inside the collection\n :param str operation: the logical operation to apply to the\n attribute inside the collection\n :rtype: Query" - }, - { - "code": "def set_logger(name,\n level='INFO',\n fmt=None,\n datefmt=None,\n propagate=1,\n remove_handlers=False):\n logger = logging.getLogger(name)\n logger.setLevel(getattr(logging, level))\n logger.propagate = propagate\n if remove_handlers:\n logger.handlers = []\n return\n handler = None\n for h in logger.handlers:\n if isinstance(h, logging.StreamHandler):\n handler = h\n break\n if not handler:\n handler = logging.StreamHandler()\n logger.addHandler(handler)\n formatter_kwgs = {}\n for i in ('fmt', 'datefmt'):\n if locals()[i] is not None:\n formatter_kwgs[i] = locals()[i]\n handler.setFormatter(BaseFormatter(**formatter_kwgs))", - "docstring": "This function will clear the previous handlers and set only one handler,\n which will only be StreamHandler for the logger.\n\n This function is designed to be able to called multiple times in a context.\n\n Note that if a logger has no handlers, it will be added a handler automatically when it is used." - }, - { - "code": "def read_cpp_source_file(self, source_file):\n xml_file = ''\n try:\n ffname = self.__file_full_name(source_file)\n self.logger.debug(\"Reading source file: [%s].\", ffname)\n decls = self.__dcache.cached_value(ffname, self.__config)\n if not decls:\n self.logger.debug(\n \"File has not been found in cache, parsing...\")\n xml_file = self.create_xml_file(ffname)\n decls, files = self.__parse_xml_file(xml_file)\n self.__dcache.update(\n ffname, self.__config, decls, files)\n else:\n self.logger.debug((\n \"File has not been changed, reading declarations \" +\n \"from cache.\"))\n except Exception:\n if xml_file:\n utils.remove_file_no_raise(xml_file, self.__config)\n raise\n if xml_file:\n utils.remove_file_no_raise(xml_file, self.__config)\n return decls", - "docstring": "Reads C++ source file and returns declarations tree\n\n :param source_file: path to C++ source file\n :type source_file: str" - }, - { - "code": "def update_spec(self):\n if self.datafile.exists:\n with self.datafile.reader as r:\n self.header_lines = r.info['header_rows']\n self.comment_lines = r.info['comment_rows']\n self.start_line = r.info['data_start_row']\n self.end_line = r.info['data_end_row']", - "docstring": "Update the source specification with information from the row intuiter, but only if the spec values\n are not already set." - }, - { - "code": "def _build_color_table():\n FG = FOREGROUND_COLOR\n BG = BACKROUND_COLOR\n return [\n (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),\n (0x00, 0x00, 0xaa, FG.BLUE, BG.BLUE),\n (0x00, 0xaa, 0x00, FG.GREEN, BG.GREEN),\n (0x00, 0xaa, 0xaa, FG.CYAN, BG.CYAN),\n (0xaa, 0x00, 0x00, FG.RED, BG.RED),\n (0xaa, 0x00, 0xaa, FG.MAGENTA, BG.MAGENTA),\n (0xaa, 0xaa, 0x00, FG.YELLOW, BG.YELLOW),\n (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),\n (0x44, 0x44, 0xff, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),\n (0x44, 0xff, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),\n (0x44, 0xff, 0xff, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),\n (0xff, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),\n (0xff, 0x44, 0xff, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),\n (0xff, 0xff, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),\n (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),\n (0xff, 0xff, 0xff, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),\n ]", - "docstring": "Build an RGB-to-256 color conversion table" - }, - { - "code": "def parallel(func, inputs, n_jobs, expand_args=False):\n if expand_args:\n return Parallel(n_jobs=n_jobs)(delayed(func)(*args) for args in inputs)\n else:\n return Parallel(n_jobs=n_jobs)(delayed(func)(arg) for arg in inputs)", - "docstring": "Convenience wrapper around joblib's parallelization." - }, - { - "code": "def _default_styles_xml(cls):\n path = os.path.join(\n os.path.split(__file__)[0], '..', 'templates',\n 'default-styles.xml'\n )\n with open(path, 'rb') as f:\n xml_bytes = f.read()\n return xml_bytes", - "docstring": "Return a bytestream containing XML for a default styles part." - }, - { - "code": "def get_version(path) -> str:\n if path and os.path.exists(path):\n with open(path) as pkg:\n package_dict = json.load(pkg)\n version = package_dict.get('version')\n else:\n version = 'not available'\n return version", - "docstring": "Reads the version field from a package file\n\n :param path: the path to a valid package.json file\n :return: the version string or \"unknown\"" - }, - { - "code": "def on_failure(self, exc, task_id, args, kwargs, einfo):\n super().on_failure(exc, task_id, args, kwargs, einfo)\n if isinstance(exc, OperationRunDoesNotExist):\n return\n self._operation_run.on_failure()", - "docstring": "Update query status and send email notification to a user" - }, - { - "code": "def ilist(self, in_list=[]):\n new_list = IList(in_list)\n new_list.set_std(self.features.get('casemapping'))\n if not self._casemap_set:\n self._imaps.append(new_list)\n return new_list", - "docstring": "Return a list that uses this server's IRC casemapping.\n\n All strings in this list are lowercased using the server's casemapping before inserting\n them into the list, and the ``in`` operator takes casemapping into account." - }, - { - "code": "def remove(self, obj, safe=None):\n\t\tif safe is None:\n\t\t\tsafe = self.safe\n\t\tremove = RemoveDocumentOp(self.transaction_id, self, obj, safe)\n\t\tself.queue.append(remove)\n\t\tif self.autoflush:\n\t\t\treturn self.flush()", - "docstring": "Remove a particular object from the database. If the object has\n\t\t\tno mongo ID set, the method just returns. If this is a partial\n\t\t\tdocument without the mongo ID field retrieved a ``FieldNotRetrieved``\n\t\t\twill be raised\n\n\t\t\t:param obj: the object to save\n\t\t\t:param safe: whether to wait for the operation to complete. Defaults \\\n\t\t\t\tto the session's ``safe`` value." - }, - { - "code": "def confirm_tell(self, data, success):\n logger.info(\"confirm_tell(success=%s) [lid=\\\"%s\\\",pid=\\\"%s\\\"]\", success, data[P_ENTITY_LID], data[P_LID])\n evt = self._request_point_confirm_tell(R_CONTROL, data[P_ENTITY_LID], data[P_LID], success, data['requestId'])\n self._wait_and_except_if_failed(evt)", - "docstring": "Confirm that you've done as you were told. Call this from your control callback to confirm action.\n Used when you are advertising a control and you want to tell the remote requestor that you have\n done what they asked you to.\n\n `Example:` this is a minimal example to show the idea. Note - no Exception handling and ugly use of globals\n\n #!python\n client = None\n\n def controlreq_cb(args):\n global client # the client object you connected with\n\n # perform your action with the data they sent\n success = do_control_action(args['data'])\n\n if args['confirm']: # you've been asked to confirm\n client.confirm_tell(args, success)\n # else, if you do not confirm_tell() this causes a timeout at the requestor's end.\n\n client = IOT.Client(config='test.ini')\n thing = client.create_thing('test321')\n control = thing.create_control('control', controlreq_cb)\n\n Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)\n containing the error if the infrastructure detects a problem\n\n Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)\n if there is a communications problem between you and the infrastructure\n\n `data` (mandatory) (dictionary) The `\"args\"` dictionary that your callback was called with\n\n `success` (mandatory) (boolean) Whether or not the action you have been asked to do has been\n sucessful.\n\n More details on the contents of the `data` dictionary for controls see:\n [create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control)" - }, - { - "code": "def _with_inline(func, admin_site, metadata_class, inline_class):\n def register(model_or_iterable, admin_class=None, **options):\n func(model_or_iterable, admin_class, **options)\n _monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)\n return register", - "docstring": "Decorator for register function that adds an appropriate inline." - }, - { - "code": "def _tool_classpath(self, tool, products, scheduler):\n classpath = self.tool_classpath_from_products(products,\n self.versioned_tool_name(tool, self.version),\n scope=self.options_scope)\n classpath = tuple(fast_relpath(c, get_buildroot()) for c in classpath)\n return self._memoized_scalac_classpath(classpath, scheduler)", - "docstring": "Return the proper classpath based on products and scala version." - }, - { - "code": "def tmp(p_queue, host=None):\n if host is not None:\n return _path(_c.FSQ_TMP, root=_path(host, root=hosts(p_queue)))\n return _path(p_queue, _c.FSQ_TMP)", - "docstring": "Construct a path to the tmp dir for a queue" - }, - { - "code": "def _api_type(self, value):\n if isinstance(value, six.string_types):\n return 'string'\n elif isinstance(value, six.integer_types):\n return 'integer'\n elif type(value) is datetime.datetime:\n return 'date'", - "docstring": "Returns the API type of the given value based on its python type." - }, - { - "code": "def split_message(message, max_length):\n if len(message) > max_length:\n for message in textwrap.wrap(message, max_length):\n yield message\n else:\n yield message.rstrip(STRIPPED_CHARS)", - "docstring": "Split long messages" - }, - { - "code": "def output_aliases(aliases):\n for alias in aliases:\n cmd = '!legit ' + alias\n click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))", - "docstring": "Display git aliases" - }, - { - "code": "def add_data(self, data):\n df = pd.DataFrame(data)\n name, dtype = self.get_singular_and_plural_dtype(self.dtype)\n if name in df.columns:\n df.index = df[name]\n df.index.name = name + \" name\"\n self.df = df", - "docstring": "Add df to a MagicDataFrame using a data list.\n\n Parameters\n ----------\n data : list of dicts\n data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]\n dtype : str\n MagIC table type" - }, - { - "code": "def _Insert(cursor, table, values):\n precondition.AssertIterableType(values, dict)\n if not values:\n return\n column_names = list(sorted(values[0]))\n for value_dict in values:\n if set(column_names) != set(value_dict):\n raise ValueError(\"Given value dictionaries must have identical keys. \"\n \"Expecting columns {!r}, but got value {!r}\".format(\n column_names, value_dict))\n query = \"INSERT IGNORE INTO %s {cols} VALUES {vals}\" % table\n query = query.format(\n cols=mysql_utils.Columns(column_names),\n vals=mysql_utils.Placeholders(num=len(column_names), values=len(values)))\n values_list = []\n for values_dict in values:\n values_list.extend(values_dict[column] for column in column_names)\n cursor.execute(query, values_list)", - "docstring": "Inserts one or multiple rows into the given table.\n\n Args:\n cursor: The MySQL cursor to perform the insertion.\n table: The table name, where rows should be inserted.\n values: A list of dicts, associating column names to values." - }, - { - "code": "def wbmax(self, value=None):\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `wbmax`'.format(value))\n self._wbmax = value", - "docstring": "Corresponds to IDD Field `wbmax`\n Extreme maximum wet-bulb temperature\n\n Args:\n value (float): value for IDD Field `wbmax`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\n Raises:\n ValueError: if `value` is not a valid value" - }, - { - "code": "def resolve(self, pid):\n client = d1_cli.impl.client.CLICNClient(\n **self._cn_client_connect_params_from_session()\n )\n object_location_list_pyxb = client.resolve(pid)\n for location in object_location_list_pyxb.objectLocation:\n d1_cli.impl.util.print_info(location.url)", - "docstring": "Get Object Locations for Object." - }, - { - "code": "def sharp(self) -> 'Channel':\n r\n N = self.qubit_nb\n tensor = self.tensor\n tensor = bk.reshape(tensor, [2**N] * 4)\n tensor = bk.transpose(tensor, (0, 2, 1, 3))\n tensor = bk.reshape(tensor, [2] * 4 * N)\n return Channel(tensor, self.qubits)", - "docstring": "r\"\"\"Return the 'sharp' transpose of the superoperator.\n\n The transpose :math:`S^\\#` switches the two covariant (bra)\n indices of the superoperator. (Which in our representation\n are the 2nd and 3rd super-indices)\n\n If :math:`S^\\#` is Hermitian, then :math:`S` is a Hermitian-map\n (i.e. transforms Hermitian operators to hJrmitian operators)\n\n Flattening the :math:`S^\\#` superoperator to a matrix gives\n the Choi matrix representation. (See channel.choi())" - }, - { - "code": "def cell_sizes_vecs(self):\n csizes = []\n for ax, cvec in enumerate(self.grid.coord_vectors):\n if len(cvec) == 1:\n csizes.append(np.array([0.0]))\n else:\n csize = np.empty_like(cvec)\n csize[1:-1] = (cvec[2:] - cvec[:-2]) / 2.0\n csize[0] = (cvec[0] + cvec[1]) / 2 - self.min()[ax]\n csize[-1] = self.max()[ax] - (cvec[-2] + cvec[-1]) / 2\n csizes.append(csize)\n return tuple(csizes)", - "docstring": "Return the cell sizes as coordinate vectors.\n\n Returns\n -------\n csizes : tuple of `numpy.ndarray`'s\n The cell sizes per axis. The length of the vectors is the\n same as the corresponding ``grid.coord_vectors``.\n For axes with 1 grid point, cell size is set to 0.0.\n\n Examples\n --------\n We create a partition of the rectangle [0, 1] x [-1, 2] into\n 2 x 3 cells with the grid points [0, 1] x [-1, 0, 2]. This\n implies that the cell boundaries are given as\n [0, 0.5, 1] x [-1, -0.5, 1, 2], hence the cell size vectors\n are [0.5, 0.5] x [0.5, 1.5, 1]:\n\n >>> rect = odl.IntervalProd([0, -1], [1, 2])\n >>> grid = odl.RectGrid([0, 1], [-1, 0, 2])\n >>> part = odl.RectPartition(rect, grid)\n >>> part.cell_boundary_vecs\n (array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ]))\n >>> part.cell_sizes_vecs\n (array([ 0.5, 0.5]), array([ 0.5, 1.5, 1. ]))" - }, - { - "code": "def _control_resp(self, data, state):\n if _is_control_response(data):\n ack_state = bytes([data[22]])\n if state == ack_state:\n _LOGGER.debug(\"Received state ack from %s, state: %s\",\n self.host, ord(ack_state))\n return ack_state", - "docstring": "Handle a control response.\n\n :param data: Payload.\n :param state: Requested state.\n :returns: Acknowledged state." - }, - { - "code": "def install_git(path):\n hook = op.join(path, 'pre-commit')\n with open(hook, 'w') as fd:\n fd.write(\n)\n chmod(hook, 484)", - "docstring": "Install hook in Git repository." - }, - { - "code": "def _shuffle(y, labels, random_state):\n if labels is None:\n ind = random_state.permutation(len(y))\n else:\n ind = np.arange(len(labels))\n for label in np.unique(labels):\n this_mask = (labels == label)\n ind[this_mask] = random_state.permutation(ind[this_mask])\n return y[ind]", - "docstring": "Return a shuffled copy of y eventually shuffle among same labels." - }, - { - "code": "def run_migration(connection, queries, engine):\n with connection.cursor() as cursorMig:\n queries = parse_statements(queries, engine)\n for query in queries:\n cursorMig.execute(query)\n connection.commit()\n return True", - "docstring": "Apply a migration to the SQL server" - }, - { - "code": "def primitives(self):\n prim_molecules = [\n p.primitive for p in self._molecules if hasattr(p, 'primitive')]\n prim_assembly = Assembly(molecules=prim_molecules, assembly_id=self.id)\n return prim_assembly", - "docstring": "Generates a new `Assembly` containing the primitives of each Polymer.\n\n Notes\n -----\n Metadata is not currently preserved from the parent object.\n\n Returns\n -------\n prim_assembly : ampal.Protein\n `Assembly` containing only the primitives of the `Polymers`\n in the original `Assembly`." - }, - { - "code": "def transform_incoming(self, son, collection):\n if not \"_id\" in son:\n return son\n transformed = SON({\"_id\": son[\"_id\"]})\n transformed.update(son)\n return transformed", - "docstring": "Move _id to the front if it's there." - }, - { - "code": "def response_path_as_list(path: ResponsePath) -> List[Union[str, int]]:\n flattened: List[Union[str, int]] = []\n append = flattened.append\n curr: Optional[ResponsePath] = path\n while curr:\n append(curr.key)\n curr = curr.prev\n return flattened[::-1]", - "docstring": "Get response path as a list.\n\n Given a ResponsePath (found in the `path` entry in the information provided as the\n last argument to a field resolver), return a list of the path keys." - }, - { - "code": "def get(self, title):\n key = '/library/sections/%s/all' % self.key\n return self.fetchItem(key, title__iexact=title)", - "docstring": "Returns the media item with the specified title.\n\n Parameters:\n title (str): Title of the item to return." - }, - { - "code": "def _info(self, args, **extra_args):\n if not isinstance(args, argparse.Namespace):\n raise logger.error(Exception(\"args should of an instance of argparse.Namespace\"))\n logger.info(\"Freight Forwarder: {0}\".format(VERSION))\n logger.info(\"docker-py: {0}\".format(docker_py_version))\n logger.info(\"Docker Api: {0}\".format(DOCKER_API_VERSION))\n logger.info(\"{0} version: {1}\".format(platform.python_implementation(), platform.python_version()))", - "docstring": "Print freight forwarder info to the user." - }, - { - "code": "def concat(cartesians, ignore_index=False, keys=None):\n frames = [molecule._frame for molecule in cartesians]\n new = pd.concat(frames, ignore_index=ignore_index, keys=keys,\n verify_integrity=True)\n if type(ignore_index) is bool:\n new = pd.concat(frames, ignore_index=ignore_index, keys=keys,\n verify_integrity=True)\n else:\n new = pd.concat(frames, ignore_index=True, keys=keys,\n verify_integrity=True)\n if type(ignore_index) is int:\n new.index = range(ignore_index,\n ignore_index + len(new))\n else:\n new.index = ignore_index\n return cartesians[0].__class__(new)", - "docstring": "Join list of cartesians into one molecule.\n\n Wrapper around the :func:`pandas.concat` function.\n Default values are the same as in the pandas function except for\n ``verify_integrity`` which is set to true in case of this library.\n\n Args:\n ignore_index (sequence, bool, int): If it is a boolean, it\n behaves like in the description of\n :meth:`pandas.DataFrame.append`.\n If it is a sequence, it becomes the new index.\n If it is an integer,\n ``range(ignore_index, ignore_index + len(new))``\n becomes the new index.\n keys (sequence): If multiple levels passed, should contain tuples.\n Construct hierarchical index using the passed keys as\n the outermost level\n\n Returns:\n Cartesian:" - }, - { - "code": "def cancel(self, run_id):\n postresult = requests.post(\"%s://%s/ga4gh/wes/v1/runs/%s/cancel\" % (self.proto, self.host, run_id),\n headers=self.auth)\n return wes_reponse(postresult)", - "docstring": "Cancel a running workflow.\n\n :param run_id: String (typically a uuid) identifying the run.\n :param str auth: String to send in the auth header.\n :param proto: Schema where the server resides (http, https)\n :param host: Port where the post request will be sent and the wes server listens at (default 8080)\n :return: The body of the delete result as a dictionary." - }, - { - "code": "def one_mask(self):\n accum = 0\n for i in range(self.data.itemsize):\n accum += (0xAA << (i << 3))\n return accum", - "docstring": "Return a mask to determine whether an array chunk has any ones." - }, - { - "code": "def discretize_bspline(control,\n knots,\n count=None,\n scale=1.0):\n from scipy.interpolate import splev\n control = np.asanyarray(control, dtype=np.float64)\n degree = len(knots) - len(control) - 1\n if count is None:\n norm = np.linalg.norm(np.diff(control, axis=0), axis=1).sum()\n count = int(np.clip(norm / (res.seg_frac * scale),\n res.min_sections * len(control),\n res.max_sections * len(control)))\n ipl = np.linspace(knots[0], knots[-1], count)\n discrete = splev(ipl, [knots, control.T, degree])\n discrete = np.column_stack(discrete)\n return discrete", - "docstring": "Given a B-Splines control points and knot vector, return\n a sampled version of the curve.\n\n Parameters\n ----------\n control : (o, d) float\n Control points of the b- spline\n knots : (j,) float\n B-spline knots\n count : int\n Number of line segments to discretize the spline\n If not specified will be calculated as something reasonable\n\n Returns\n ----------\n discrete : (count, dimension) float\n Points on a polyline version of the B-spline" - }, - { - "code": "def peddy_general_stats_table(self):\n family_ids = [ x.get('family_id') for x in self.peddy_data.values() ]\n headers = OrderedDict()\n headers['family_id'] = {\n 'title': 'Family ID',\n 'hidden': True if all([v == family_ids[0] for v in family_ids]) else False\n }\n headers['ancestry-prediction'] = {\n 'title': 'Ancestry',\n 'description': 'Ancestry Prediction',\n }\n headers['ancestry-prob_het_check'] = {\n 'title': 'P(Ancestry)',\n 'description': 'Probability predicted ancestry is correct.'\n }\n headers['sex_het_ratio'] = {\n 'title': 'Sex / Het Ratio',\n }\n headers['error_sex_check'] = {\n 'title': 'Correct Sex',\n 'description': 'Displays False if error in sample sex prediction',\n }\n headers['predicted_sex_sex_check'] = {\n 'title': 'Sex',\n 'description': 'Predicted sex'\n }\n self.general_stats_addcols(self.peddy_data, headers)", - "docstring": "Take the parsed stats from the Peddy report and add it to the\n basic stats table at the top of the report" - }, - { - "code": "def _read_pug_fixed_grid(projection, distance_multiplier=1.0):\n a = projection.semi_major_axis\n h = projection.perspective_point_height\n b = projection.semi_minor_axis\n lon_0 = projection.longitude_of_projection_origin\n sweep_axis = projection.sweep_angle_axis[0]\n proj_dict = {'a': float(a) * distance_multiplier,\n 'b': float(b) * distance_multiplier,\n 'lon_0': float(lon_0),\n 'h': float(h) * distance_multiplier,\n 'proj': 'geos',\n 'units': 'm',\n 'sweep': sweep_axis}\n return proj_dict", - "docstring": "Read from recent PUG format, where axes are in meters" - }, - { - "code": "def can_play_notes(self, notes):\n if hasattr(notes, 'notes'):\n notes = notes.notes\n if type(notes) != list:\n notes = [notes]\n for n in notes:\n if not self.note_in_range(n):\n return False\n return True", - "docstring": "Test if the notes lie within the range of the instrument.\n\n Return True if so, False otherwise." - }, - { - "code": "def generate_type(self):\n types = enforce_list(self._definition['type'])\n try:\n python_types = ', '.join(JSON_TYPE_TO_PYTHON_TYPE[t] for t in types)\n except KeyError as exc:\n raise JsonSchemaDefinitionException('Unknown type: {}'.format(exc))\n extra = ''\n if 'integer' in types:\n extra += ' and not (isinstance({variable}, float) and {variable}.is_integer())'.format(\n variable=self._variable,\n )\n if ('number' in types or 'integer' in types) and 'boolean' not in types:\n extra += ' or isinstance({variable}, bool)'.format(variable=self._variable)\n with self.l('if not isinstance({variable}, ({})){}:', python_types, extra):\n self.l('raise JsonSchemaException(\"{name} must be {}\")', ' or '.join(types))", - "docstring": "Validation of type. Can be one type or list of types.\n\n Since draft 06 a float without fractional part is an integer.\n\n .. code-block:: python\n\n {'type': 'string'}\n {'type': ['string', 'number']}" - }, - { - "code": "def find_ribosomal(rps, scaffolds, s2rp, min_hits, max_hits_rp, max_errors):\n for scaffold, proteins in list(s2rp.items()):\n hits = {p: [i for i in sorted(hits, key = itemgetter(10))][0:max_hits_rp]\n for p, hits in list(proteins.items()) if len(hits) > 0}\n if len(hits) < min_hits:\n continue\n best = sorted([hit[0] + [p]\n for p, hit in list(hits.items())], key = itemgetter(10))[0]\n block = find_block(rps, scaffolds[scaffold], hits, best, max_errors)\n if (len(block) - 1) >= min_hits:\n yield scaffold, block", - "docstring": "determine which hits represent real ribosomal proteins, identify each in syntenic block\n max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold" - }, - { - "code": "def export_compact(model_path):\n pred_config = PredictConfig(\n session_init=get_model_loader(model_path),\n model=Model(),\n input_names=['input_img'],\n output_names=['prediction_img'])\n ModelExporter(pred_config).export_compact('/tmp/compact_graph.pb')", - "docstring": "Export trained model to use it as a frozen and pruned inference graph in\n mobile applications." - }, - { - "code": "def raise_error(error_type: str) -> None:\n try:\n error = next((v for k, v in ERROR_CODES.items() if k in error_type))\n except StopIteration:\n error = AirVisualError\n raise error(error_type)", - "docstring": "Raise the appropriate error based on error message." - }, - { - "code": "def create_from_eflux(cls, params, emin, emax, eflux, scale=1.0):\n params = params.copy()\n params[0] = 1.0\n params[0] = eflux / cls.eval_eflux(emin, emax, params, scale=scale)\n return cls(params, scale)", - "docstring": "Create a spectral function instance given its energy flux." - }, - { - "code": "def get_center(self):\n lr_x, lr_y = self.get_vec_lr()\n tb_x, tb_y = self.get_vec_tb()\n center_x = self.x + (lr_x + tb_x) / 2.0\n center_y = self.y + (lr_y + tb_y) / 2.0\n return center_x, center_y", - "docstring": "Returns rectangle center" - }, - { - "code": "def parse_timestamp(x):\n dt = dateutil.parser.parse(x)\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return dt", - "docstring": "Parse ISO8601 formatted timestamp." - }, - { - "code": "def set_to_public(self, request, queryset):\n queryset.update(is_public=True, modified=now())", - "docstring": "Set one or several releases to public" - }, - { - "code": "def set_acl(self, acl, mode):\n if not isinstance(acl, basestring):\n raise TypeError(\"acl can only be an instance of type basestring\")\n if not isinstance(mode, baseinteger):\n raise TypeError(\"mode can only be an instance of type baseinteger\")\n self._call(\"setACL\",\n in_p=[acl, mode])", - "docstring": "Sets the ACL of this file.\n\n in acl of type str\n The ACL specification string. To-be-defined.\n\n in mode of type int\n UNIX-style mode mask to use if @a acl is empty. As mention in\n :py:func:`IGuestSession.directory_create` this is realized on\n a best effort basis and the exact behavior depends on the Guest OS.\n\n raises :class:`OleErrorNotimpl`\n The method is not implemented yet." - }, - { - "code": "def get_current_orga(request, hproject, availableOrga):\n if len(availableOrga) == 0:\n raise Http404\n currentOrgaId = request.session.get('plugit-orgapk-' + str(hproject.pk), None)\n if currentOrgaId is None:\n (tmpOrga, _) = availableOrga[0]\n currentOrgaId = tmpOrga.pk\n else:\n availableOrgaIds = [o.pk for (o, r) in availableOrga]\n if currentOrgaId not in availableOrgaIds:\n (tmpOrga, _) = availableOrga[0]\n currentOrgaId = tmpOrga.pk\n from organizations.models import Organization\n realCurrentOrga = get_object_or_404(Organization, pk=currentOrgaId)\n return realCurrentOrga", - "docstring": "Return the current orga to use" - }, - { - "code": "def pin_add(self, path, *paths, **kwargs):\n if \"recursive\" in kwargs:\n kwargs.setdefault(\"opts\", {\"recursive\": kwargs.pop(\"recursive\")})\n args = (path,) + paths\n return self._client.request('/pin/add', args, decoder='json', **kwargs)", - "docstring": "Pins objects to local storage.\n\n Stores an IPFS object(s) from a given path locally to disk.\n\n .. code-block:: python\n\n >>> c.pin_add(\"QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d\")\n {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']}\n\n Parameters\n ----------\n path : str\n Path to object(s) to be pinned\n recursive : bool\n Recursively unpin the object linked to by the specified object(s)\n\n Returns\n -------\n dict : List of IPFS objects that have been pinned" - }, - { - "code": "def remove_plugin(self, name, force=False):\n url = self._url('/plugins/{0}', name)\n res = self._delete(url, params={'force': force})\n self._raise_for_status(res)\n return True", - "docstring": "Remove an installed plugin.\n\n Args:\n name (string): Name of the plugin to remove. The ``:latest``\n tag is optional, and is the default if omitted.\n force (bool): Disable the plugin before removing. This may\n result in issues if the plugin is in use by a container.\n\n Returns:\n ``True`` if successful" - }, - { - "code": "def hkeys(self, key, *, encoding=_NOTSET):\n return self.execute(b'HKEYS', key, encoding=encoding)", - "docstring": "Get all the fields in a hash." - }, - { - "code": "def read_busiest_date(path: str) -> Tuple[datetime.date, FrozenSet[str]]:\n feed = load_raw_feed(path)\n return _busiest_date(feed)", - "docstring": "Find the earliest date with the most trips" - }, - { - "code": "def SetSerializersProfiler(self, serializers_profiler):\n self._serializers_profiler = serializers_profiler\n if self._storage_file:\n self._storage_file.SetSerializersProfiler(serializers_profiler)", - "docstring": "Sets the serializers profiler.\n\n Args:\n serializers_profiler (SerializersProfiler): serializers profiler." - }, - { - "code": "def textgetter(path: str, *,\n default: T=NO_DEFAULT,\n strip: bool=False) -> t.Callable[[Element], t.Union[str, T]]:\n find = compose(\n str.strip if strip else identity,\n partial(_raise_if_none, exc=LookupError(path)),\n methodcaller('findtext', path)\n )\n return (find if default is NO_DEFAULT else lookup_defaults(find, default))", - "docstring": "shortcut for making an XML element text getter" - }, - { - "code": "def normalize_tuple_slice(node):\n if not any(isinstance(elt, ast.Slice) for elt in node.elts):\n return ast.Index(value=node)\n return ast.ExtSlice(\n [\n elt if isinstance(elt, ast.Slice) else ast.Index(value=elt)\n for elt in node.elts\n ]\n )", - "docstring": "Normalize an ast.Tuple node representing the internals of a slice.\n\n Returns the node wrapped in an ast.Index.\n Returns an ExtSlice node built from the tuple elements if there are any\n slices." - }, - { - "code": "def visit_List(self, node):\n if node.elts:\n return list(set(sum([self.visit(elt) for elt in node.elts], [])))\n else:\n return [frozenset()]", - "docstring": "List construction depend on each elements type dependency." - }, - { - "code": "def removeService(self, service):\n for name, wrapper in self.services.iteritems():\n if service in (name, wrapper.service):\n del self.services[name]\n return\n raise NameError(\"Service %r not found\" % (service,))", - "docstring": "Removes a service from the gateway.\n\n @param service: Either the name or t of the service to remove from the\n gateway, or .\n @type service: C{callable} or a class instance\n @raise NameError: Service not found." - }, - { - "code": "def filesystem_from_config_dict(config_fs):\n if \"module\" not in config_fs:\n print(\"Key 'module' should be defined for the filesystem provider ('fs' configuration option)\", file=sys.stderr)\n exit(1)\n filesystem_providers = get_filesystems_providers()\n if config_fs[\"module\"] not in filesystem_providers:\n print(\"Unknown filesystem provider \"+config_fs[\"module\"], file=sys.stderr)\n exit(1)\n fs_class = filesystem_providers[config_fs[\"module\"]]\n fs_args_needed = fs_class.get_needed_args()\n fs_args = {}\n for arg_name, (arg_type, arg_required, _) in fs_args_needed.items():\n if arg_name in config_fs:\n fs_args[arg_name] = arg_type(config_fs[arg_name])\n elif arg_required:\n print(\"fs option {} is required\".format(arg_name), file=sys.stderr)\n exit(1)\n try:\n return fs_class.init_from_args(**fs_args)\n except:\n print(\"Unable to load class \" + config_fs[\"module\"], file=sys.stderr)\n raise", - "docstring": "Given a dict containing an entry \"module\" which contains a FSProvider identifier, parse the configuration and returns a fs_provider.\n Exits if there is an error." - }, - { - "code": "def json_decoder_hook(dct, str_decoders=STRING_DECODERS,\n converters=MappingProxyType(dict())) -> dict:\n for k, v in dct.items():\n if k in converters:\n parse_func = converters[k]\n dct[k] = parse_func(v)\n elif isinstance(v, str):\n for decode_func in str_decoders:\n v = decode_func(v)\n if not isinstance(v, str):\n break\n dct[k] = v\n elif isinstance(v, collections.Mapping):\n dct[k] = json_decoder_hook(v, str_decoders, converters)\n return dct", - "docstring": "Decoder for parsing typical objects like uuid's and dates." - }, - { - "code": "def safe_unicode(string):\n if not isinstance(string, basestring):\n string = unicode(string)\n if isinstance(string, unicode):\n string = string.encode('utf8')\n return string", - "docstring": "Safely transform any object into utf8 encoded bytes" - }, - { - "code": "def standard_cl_params(items):\n out = []\n def _skip_duplicates(data):\n return (dd.get_coverage_interval(data) == \"amplicon\" or\n (dd.get_aligner(data) and not dd.get_mark_duplicates(data)))\n if any(_skip_duplicates(d) for d in items):\n broad_runner = broad.runner_from_config(items[0][\"config\"])\n gatk_type = broad_runner.gatk_type()\n if gatk_type == \"gatk4\":\n out += [\"--disable-read-filter\", \"NotDuplicateReadFilter\"]\n elif LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion(\"3.5\"):\n out += [\"-drf\", \"DuplicateRead\"]\n return out", - "docstring": "Shared command line parameters for GATK programs.\n\n Handles no removal of duplicate reads for amplicon or\n non mark duplicate experiments. If we have pre-aligned inputs we\n ignore the value or mark duplicates (since they may already be\n marked in the input BAM)." - }, - { - "code": "def calc_normal_std_he_forward(inmaps, outmaps, kernel=(1, 1)):\n r\n return np.sqrt(2. / (np.prod(kernel) * inmaps))", - "docstring": "r\"\"\"Calculates the standard deviation proposed by He et al.\n\n .. math::\n \\sigma = \\sqrt{\\frac{2}{NK}}\n\n Args:\n inmaps (int): Map size of an input Variable, :math:`N`.\n outmaps (int): Map size of an output Variable, :math:`M`.\n kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.\n In above definition, :math:`K` is the product of shape dimensions.\n In Affine, the default value should be used.\n\n Example:\n\n .. code-block:: python\n\n import nnabla as nn\n import nnabla.parametric_functions as PF\n import nnabla.initializer as I\n\n x = nn.Variable([60,1,28,28])\n s = I.calc_normal_std_he_forward(x.shape[1],64)\n w = I.NormalInitializer(s)\n b = I.ConstantInitializer(0)\n h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')\n\n References:\n * `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level\n Performance on ImageNet Classification.\n `_" - }, - { - "code": "def membuf_tempfile(memfile):\n memfile.seek(0, 0)\n tmpfd, tmpname = mkstemp(suffix='.rar')\n tmpf = os.fdopen(tmpfd, \"wb\")\n try:\n while True:\n buf = memfile.read(BSIZE)\n if not buf:\n break\n tmpf.write(buf)\n tmpf.close()\n except:\n tmpf.close()\n os.unlink(tmpname)\n raise\n return tmpname", - "docstring": "Write in-memory file object to real file." - }, - { - "code": "def getLogger(self, component_name: str = None) -> logging.Logger:\n logger_name = self.root + (component_name if component_name else 'generic')\n _logger = self.loggers.get(logger_name)\n if not _logger:\n _logger = logging.getLogger(logger_name)\n stdio_handler = logging.StreamHandler()\n stdio_handler.setFormatter(LogFormatter())\n stdio_handler.setLevel(logging.INFO)\n _logger.addHandler(stdio_handler)\n _logger.setLevel(logging.DEBUG)\n self.loggers[logger_name] = _logger\n return _logger", - "docstring": "Get the logger instance matching ``component_name`` or create a new one if non-existent.\n\n Args:\n component_name: a neo-python component name. e.g. network, vm, db\n\n Returns:\n a logger for the specified component." - }, - { - "code": "def search(self):\n params = self.solr_params()\n logging.info(\"PARAMS=\" + str(params))\n results = self.solr.search(**params)\n logging.info(\"Docs found: {}\".format(results.hits))\n return self._process_search_results(results)", - "docstring": "Execute solr search query" - }, - { - "code": "def node(name, **kwargs):\n cfg = _setup_conn(**kwargs)\n try:\n api_instance = kubernetes.client.CoreV1Api()\n api_response = api_instance.list_node()\n except (ApiException, HTTPError) as exc:\n if isinstance(exc, ApiException) and exc.status == 404:\n return None\n else:\n log.exception('Exception when calling CoreV1Api->list_node')\n raise CommandExecutionError(exc)\n finally:\n _cleanup(**cfg)\n for k8s_node in api_response.items:\n if k8s_node.metadata.name == name:\n return k8s_node.to_dict()\n return None", - "docstring": "Return the details of the node identified by the specified name\n\n CLI Examples::\n\n salt '*' kubernetes.node name='minikube'" - }, - { - "code": "def extend_substation(grid, critical_stations, grid_level):\n load_factor_lv_trans_lc_normal = cfg_ding0.get(\n 'assumptions',\n 'load_factor_lv_trans_lc_normal')\n load_factor_lv_trans_fc_normal = cfg_ding0.get(\n 'assumptions',\n 'load_factor_lv_trans_fc_normal')\n trafo_params = grid.network._static_data['{grid_level}_trafos'.format(\n grid_level=grid_level)]\n trafo_s_max_max = max(trafo_params['S_nom'])\n for station in critical_stations:\n if station['s_max'][0] > station['s_max'][1]:\n case = 'load'\n lf_lv_trans_normal = load_factor_lv_trans_lc_normal\n else:\n case = 'gen'\n lf_lv_trans_normal = load_factor_lv_trans_fc_normal\n s_max_trafos = sum([_.s_max_a\n for _ in station['station']._transformers])\n s_trafo_missing = max(station['s_max']) - (\n s_max_trafos * lf_lv_trans_normal)\n extendable_trafos = [_ for _ in station['station']._transformers\n if _.s_max_a < trafo_s_max_max]\n while (s_trafo_missing > 0) and extendable_trafos:\n trafo = extendable_trafos[0]\n trafo_s_max_a_before = trafo.s_max_a\n extend_trafo_power(extendable_trafos, trafo_params)\n s_trafo_missing -= ((trafo.s_max_a * lf_lv_trans_normal) -\n trafo_s_max_a_before)\n extendable_trafos = [_ for _ in station['station']._transformers\n if _.s_max_a < trafo_s_max_max]\n if s_trafo_missing > 0:\n trafo_type, trafo_cnt = select_transformers(grid, s_max={\n 's_max': s_trafo_missing,\n 'case': case\n })\n for t in range(0, trafo_cnt):\n lv_transformer = TransformerDing0(\n grid=grid,\n id_db=id,\n v_level=0.4,\n s_max_longterm=trafo_type['S_nom'],\n r=trafo_type['R'],\n x=trafo_type['X'])\n grid._station.add_transformer(lv_transformer)\n logger.info(\"{stations_cnt} have been reinforced due to overloading \"\n \"issues.\".format(stations_cnt=len(critical_stations)))", - "docstring": "Reinforce MV or LV substation by exchanging the existing trafo and\n installing a parallel one if necessary.\n\n First, all available transformers in a `critical_stations` are extended to\n maximum power. If this does not solve all present issues, additional\n transformers are build.\n\n Parameters\n ----------\n grid: GridDing0\n Ding0 grid container\n critical_stations : :any:`list`\n List of stations with overloading\n grid_level : str\n Either \"LV\" or \"MV\". Basis to select right equipment.\n \n Notes\n -----\n Curently straight forward implemented for LV stations\n\n Returns\n -------\n type \n #TODO: Description of return. Change type in the previous line accordingly" - }, - { - "code": "def set_source(self, source):\r\n route = Navigator(source=self.source).navigate_to_source(source)\r\n self.source = source\r\n return self._send_keystroke(route, wait=True)", - "docstring": "Selects and saves source." - }, - { - "code": "def run_container(self, conf, images, **kwargs):\n with self._run_container(conf, images, **kwargs):\n pass", - "docstring": "Run this image and all dependency images" - }, - { - "code": "def get(self, identifier):\n for provider in self._providers:\n if provider.identifier == identifier:\n return provider\n return None", - "docstring": "get provider by id" - }, - { - "code": "def send_message(ctx, scheduler_rpc, project, message):\n if isinstance(scheduler_rpc, six.string_types):\n scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc)\n if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'):\n scheduler_rpc = connect_rpc(ctx, None, 'http://%s/' % (\n os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):]))\n if scheduler_rpc is None:\n scheduler_rpc = connect_rpc(ctx, None, 'http://127.0.0.1:23333/')\n return scheduler_rpc.send_task({\n 'taskid': utils.md5string('data:,on_message'),\n 'project': project,\n 'url': 'data:,on_message',\n 'fetch': {\n 'save': ('__command__', message),\n },\n 'process': {\n 'callback': '_on_message',\n }\n })", - "docstring": "Send Message to project from command line" - }, - { - "code": "def _backup_file(path):\n backup_base = '/var/local/woven-backup'\n backup_path = ''.join([backup_base,path])\n if not exists(backup_path):\n directory = ''.join([backup_base,os.path.split(path)[0]])\n sudo('mkdir -p %s'% directory)\n sudo('cp %s %s'% (path,backup_path))", - "docstring": "Backup a file but never overwrite an existing backup file" - }, - { - "code": "def process_request(self, method, data=None):\n self._validate_request_method(method)\n attempts = 3\n for i in range(attempts):\n response = self._send_request(method, data)\n if self._is_token_response(response):\n if i < attempts - 1:\n time.sleep(2)\n continue\n else:\n raise UpdatingTokenResponse\n break\n resp = BaseResponse(response)\n if response.headers.get('content-type') == 'application/json':\n if not resp.json.get('status'):\n if all([\n resp.json.get('error_code') == 1,\n resp.json.get('error_message') == u\"We are currently \"\n \"undergoing maintenance, please check back shortly.\",\n ]):\n raise MaintenanceResponse(response=resp.json)\n else:\n raise ResponseError(response=resp.json)\n return resp", - "docstring": "Process request over HTTP to ubersmith instance.\n\n method: Ubersmith API method string\n data: dict of method arguments" - }, - { - "code": "def swap_channels(self, channel_swap):\n if len(channel_swap) != 2:\n raise ValueError('Illegal value for channel swap')\n ci = channel_swap[0]\n cj = channel_swap[1]\n if ci < 0 or ci > 2 or cj < 0 or cj > 2:\n raise ValueError('Channels must be between 0 and 1')\n new_data = self.data.copy()\n new_data[:, :, ci] = self.data[:, :, cj]\n new_data[:, :, cj] = self.data[:, :, ci]\n return ColorImage(new_data, frame=self._frame)", - "docstring": "Swaps the two channels specified in the tuple.\n\n Parameters\n ----------\n channel_swap : :obj:`tuple` of int\n the two channels to swap\n\n Returns\n -------\n :obj:`ColorImage`\n color image with cols swapped" - }, - { - "code": "def add_user(self, name, password=None, read_only=None, **kwargs):\n if not isinstance(name, string_type):\n raise TypeError(\"name must be an \"\n \"instance of %s\" % (string_type.__name__,))\n if password is not None:\n if not isinstance(password, string_type):\n raise TypeError(\"password must be an \"\n \"instance of %s\" % (string_type.__name__,))\n if len(password) == 0:\n raise ValueError(\"password can't be empty\")\n if read_only is not None:\n read_only = common.validate_boolean('read_only', read_only)\n if 'roles' in kwargs:\n raise ConfigurationError(\"Can not use \"\n \"read_only and roles together\")\n try:\n uinfo = self.command(\"usersInfo\", name)\n self._create_or_update_user(\n (not uinfo[\"users\"]), name, password, read_only, **kwargs)\n except OperationFailure as exc:\n if exc.code in common.COMMAND_NOT_FOUND_CODES:\n self._legacy_add_user(name, password, read_only, **kwargs)\n return\n elif exc.code == 13:\n self._create_or_update_user(\n True, name, password, read_only, **kwargs)\n else:\n raise", - "docstring": "Create user `name` with password `password`.\n\n Add a new user with permissions for this :class:`Database`.\n\n .. note:: Will change the password if user `name` already exists.\n\n :Parameters:\n - `name`: the name of the user to create\n - `password` (optional): the password of the user to create. Can not\n be used with the ``userSource`` argument.\n - `read_only` (optional): if ``True`` the user will be read only\n - `**kwargs` (optional): optional fields for the user document\n (e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See\n ``_\n for more information.\n\n .. note:: The use of optional keyword arguments like ``userSource``,\n ``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0\n\n .. versionchanged:: 2.5\n Added kwargs support for optional fields introduced in MongoDB 2.4\n\n .. versionchanged:: 2.2\n Added support for read only users" - }, - { - "code": "def device_state_attributes(self):\n attr = {}\n attr['active_time'] = self.smartplug.active_time\n attr['voltage'] = self.smartplug.voltage\n attr['active_time'] = self.smartplug.active_time\n attr['weekly_energy_total'] = self.smartplug.weekly_energy_total\n attr['monthly_energy_total'] = self.smartplug.monthly_energy_total\n attr['yearly_energy_total'] = self.smartplug.yearly_energy_total\n return attr", - "docstring": "Return the state attributes of the device." - }, - { - "code": "def convert_sparse_to_dataframe(spark, context, sparse_matrix):\n m = sparse_matrix.tocoo()\n data = context.parallelize(numpy.array([m.row, m.col, m.data]).T,\n numSlices=len(m.row)/1024)\n return spark.createDataFrame(data.map(lambda p: Row(row=int(p[0]),\n col=int(p[1]),\n data=float(p[2]))))", - "docstring": "Converts a scipy sparse matrix to a spark dataframe" - }, - { - "code": "def set_affinity(pid, cpuset):\n _cpuset = cpu_set_t()\n __CPU_ZERO(_cpuset)\n for i in cpuset:\n if i in range(0, sizeof(cpu_set_t) * 8):\n __CPU_SET(i, _cpuset)\n if libnuma.sched_setaffinity(pid, sizeof(cpu_set_t), byref(_cpuset)) < 0:\n raise RuntimeError()", - "docstring": "Sets the CPU affinity mask of the process whose ID is pid to the value specified by mask.\n\n If pid is zero, then the calling process is used.\n\n @param pid: process PID (0 == current process)\n @type pid: C{int}\n @param cpuset: set of CPU ids\n @type cpuset: C{set}" - }, - { - "code": "def access_elementusers(self, elementuser_id, access_id=None, tenant_id=None, api_version=\"v2.0\"):\n if tenant_id is None and self._parent_class.tenant_id:\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n if not access_id:\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/elementusers/{}/access\".format(api_version,\n tenant_id,\n elementuser_id)\n else:\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/elementusers/{}/access/{}\".format(api_version,\n tenant_id,\n elementuser_id,\n access_id)\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"get\")", - "docstring": "Get all accesses for a particular user\n\n **Parameters:**:\n\n - **elementuser_id**: Element User ID\n - **access_id**: (optional) Access ID\n - **tenant_id**: Tenant ID\n - **api_version**: API version to use (default v2.0)\n\n **Returns:** requests.Response object extended with cgx_status and cgx_content properties." - }, - { - "code": "def is_opendap(url):\n if url.endswith('\n das_url = url.replace('\n else:\n das_url = url + '.das'\n response = requests.get(das_url, allow_redirects=True)\n if 'xdods-server' in response.headers:\n return True\n if response.status_code == 401 and \\\n 'text/html' in response.headers['content-type'] and \\\n 'The following URL requires authentication:' in response.text:\n return True\n return False", - "docstring": "Returns True if the URL is a valid OPeNDAP URL\n\n :param str url: URL for a remote OPeNDAP endpoint" - }, - { - "code": "def list_models(self, **kwargs):\n headers = {}\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n sdk_headers = get_sdk_headers('natural-language-understanding', 'V1',\n 'list_models')\n headers.update(sdk_headers)\n params = {'version': self.version}\n url = '/v1/models'\n response = self.request(\n method='GET',\n url=url,\n headers=headers,\n params=params,\n accept_json=True)\n return response", - "docstring": "List models.\n\n Lists Watson Knowledge Studio [custom\n models](https://cloud.ibm.com/docs/services/natural-language-understanding/customizing.html)\n that are deployed to your Natural Language Understanding service.\n\n :param dict headers: A `dict` containing the request headers\n :return: A `DetailedResponse` containing the result, headers and HTTP status code.\n :rtype: DetailedResponse" - }, - { - "code": "def cmake_setup():\n cmake_exe = shutil.which('cmake')\n if not cmake_exe:\n raise FileNotFoundError('CMake not available')\n wopts = ['-G', 'MinGW Makefiles', '-DCMAKE_SH=\"CMAKE_SH-NOTFOUND'] if os.name == 'nt' else []\n subprocess.check_call([cmake_exe] + wopts + [str(SRCDIR)],\n cwd=BINDIR)\n ret = subprocess.run([cmake_exe, '--build', str(BINDIR)],\n stderr=subprocess.PIPE,\n universal_newlines=True)\n result(ret)", - "docstring": "attempt to build using CMake >= 3" - }, - { - "code": "def smart_scrub(df,col_name,error_rate = 0):\n scrubf = smart_scrubf(df,col_name,error_rate)\n scrubb = smart_scrubb(df,col_name,error_rate)\n return (scrubf, scrubb)", - "docstring": "Scrubs from the front and back of an 'object' column in a DataFrame\n until the scrub would semantically alter the contents of the column. If only a \n subset of the elements in the column are scrubbed, then a boolean array indicating which\n elements have been scrubbed is appended to the dataframe. Returns a tuple of the strings removed\n from the front and back of the elements\n df - DataFrame\n DataFrame to scrub\n col_name - string\n Name of column to scrub\n error_rate - number, default 0\n The maximum amount of values this function can ignore while scrubbing, expressed as a\n fraction of the total amount of rows in the dataframe." - }, - { - "code": "def tox_get_python_executable(envconfig):\n try:\n pyenv = (getattr(py.path.local.sysfind('pyenv'), 'strpath', 'pyenv')\n or 'pyenv')\n cmd = [pyenv, 'which', envconfig.basepython]\n pipe = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True\n )\n out, err = pipe.communicate()\n except OSError:\n err = '\\'pyenv\\': command not found'\n LOG.warning(\n \"pyenv doesn't seem to be installed, you probably \"\n \"don't want this plugin installed either.\"\n )\n else:\n if pipe.poll() == 0:\n return out.strip()\n else:\n if not envconfig.tox_pyenv_fallback:\n raise PyenvWhichFailed(err)\n LOG.debug(\"`%s` failed thru tox-pyenv plugin, falling back. \"\n \"STDERR: \\\"%s\\\" | To disable this behavior, set \"\n \"tox_pyenv_fallback=False in your tox.ini or use \"\n \" --tox-pyenv-no-fallback on the command line.\",\n ' '.join([str(x) for x in cmd]), err)", - "docstring": "Return a python executable for the given python base name.\n\n The first plugin/hook which returns an executable path will determine it.\n\n ``envconfig`` is the testenv configuration which contains\n per-testenv configuration, notably the ``.envname`` and ``.basepython``\n setting." - }, - { - "code": "def add_result_handler(self, handler):\n self._result_handlers.append(handler)\n if self._sorted_handlers:\n self._sorted_handlers = None", - "docstring": "Register a new result handler." - }, - { - "code": "def _is_missing_tags_strict(self):\n val = self.missing_tags\n if val == MissingTags.strict:\n return True\n elif val == MissingTags.ignore:\n return False\n raise Exception(\"Unsupported 'missing_tags' value: %s\" % repr(val))", - "docstring": "Return whether missing_tags is set to strict." - }, - { - "code": "def _extract_return(self, data):\n if isinstance(data, dict):\n data = data.get('return', data)\n return data", - "docstring": "Extracts return data from the results.\n\n :param data:\n :return:" - }, - { - "code": "def __updateNavButtons(self):\n navButtons = None\n for v in self.views:\n if v.getId() == 'com.android.systemui:id/nav_buttons':\n navButtons = v\n break\n if navButtons:\n self.navBack = self.findViewById('com.android.systemui:id/back', navButtons)\n self.navHome = self.findViewById('com.android.systemui:id/home', navButtons)\n self.navRecentApps = self.findViewById('com.android.systemui:id/recent_apps', navButtons)\n else:\n if self.uiAutomatorHelper:\n print >> sys.stderr, \"WARNING: nav buttons not found. Perhaps the device has hardware buttons.\"\n self.navBack = None\n self.navHome = None\n self.navRecentApps = None", - "docstring": "Updates the navigation buttons that might be on the device screen." - }, - { - "code": "def x_rotate(rotationAmt):\n ma4 = Matrix4((1, 0, 0, 0),\n (0, math.cos(rotationAmt), -math.sin(rotationAmt), 0),\n (0, math.sin(rotationAmt), math.cos(rotationAmt), 0),\n (0, 0, 0, 1))\n return ma4", - "docstring": "Create a matrix that rotates around the x axis." - }, - { - "code": "def _collect_for_instance(self, instance, connection):\n with connection.cursor() as cursor:\n for queue, metrics in self.get_queue_info(instance, cursor):\n for name, metric in metrics.items():\n self.publish('.'.join((instance, queue, name)), metric)\n with connection.cursor() as cursor:\n consumers = self.get_consumer_info(instance, cursor)\n for queue, consumer, metrics in consumers:\n for name, metric in metrics.items():\n key_parts = (instance, queue, 'consumers', consumer, name)\n self.publish('.'.join(key_parts), metric)", - "docstring": "Collects metrics for a named connection." - }, - { - "code": "def _convert_dict_inputs(inputs, tensor_info_map):\n dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map)\n return tensor_info.convert_dict_to_compatible_tensor(dict_inputs,\n tensor_info_map)", - "docstring": "Converts from inputs into dict of input tensors.\n\n This handles:\n - putting inputs into a dict, per _prepare_dict_inputs(),\n - converting all input values into tensors compatible with the\n expected input tensor (dtype, shape).\n - check sparse/non-sparse tensor types.\n\n Args:\n inputs: inputs fed to Module.__call__().\n tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`\n describing the signature inputs.\n\n Returns:\n A dict of tensors to feed to the signature instantiation.\n\n Raises:\n TypeError: If it fails to convert the input values into a dict of tensors\n to feed to the signature instantiation." - }, - { - "code": "def remove_duplicate_sg(security_groups):\n for each_sg, duplicate_sg_name in SECURITYGROUP_REPLACEMENTS.items():\n if each_sg in security_groups and duplicate_sg_name in security_groups:\n LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)\n security_groups.remove(duplicate_sg_name)\n return security_groups", - "docstring": "Removes duplicate Security Groups that share a same name alias\n\n Args:\n security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS\n\n Returns:\n security_groups (list): A list of security groups with duplicate aliases removed" - }, - { - "code": "def verified_email_required(function=None,\n login_url=None,\n redirect_field_name=REDIRECT_FIELD_NAME):\n def decorator(view_func):\n @login_required(redirect_field_name=redirect_field_name,\n login_url=login_url)\n def _wrapped_view(request, *args, **kwargs):\n if not EmailAddress.objects.filter(user=request.user,\n verified=True).exists():\n send_email_confirmation(request, request.user)\n return render(request,\n 'account/verified_email_required.html')\n return view_func(request, *args, **kwargs)\n return _wrapped_view\n if function:\n return decorator(function)\n return decorator", - "docstring": "Even when email verification is not mandatory during signup, there\n may be circumstances during which you really want to prevent\n unverified users to proceed. This decorator ensures the user is\n authenticated and has a verified email address. If the former is\n not the case then the behavior is identical to that of the\n standard `login_required` decorator. If the latter does not hold,\n email verification mails are automatically resend and the user is\n presented with a page informing them they needs to verify their email\n address." - }, - { - "code": "def use_file(self, enabled=True,\n file_name=None,\n level=logging.WARNING,\n when='d',\n interval=1,\n backup_count=30,\n delay=False,\n utc=False,\n at_time=None,\n log_format=None,\n date_format=None):\n if enabled:\n if not self.__file_handler:\n assert file_name, 'File name is missing!'\n kwargs = {\n 'filename': file_name,\n 'when': when,\n 'interval': interval,\n 'backupCount': backup_count,\n 'encoding': 'UTF-8',\n 'delay': delay,\n 'utc': utc,\n }\n if sys.version_info[0] >= 3:\n kwargs['atTime'] = at_time\n self.__file_handler = TimedRotatingFileHandler(**kwargs)\n if not log_format:\n log_format = '%(asctime)s %(name)s[%(process)d] ' \\\n '%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' \\\n '%(levelname)s %(message)s'\n formatter = logging.Formatter(fmt=log_format, datefmt=date_format)\n self.__file_handler.setFormatter(fmt=formatter)\n self.__file_handler.setLevel(level=level)\n self.add_handler(hdlr=self.__file_handler)\n elif self.__file_handler:\n self.remove_handler(hdlr=self.__file_handler)\n self.__file_handler = None", - "docstring": "Handler for logging to a file, rotating the log file at certain timed intervals." - }, - { - "code": "def decyear2dt(t):\n year = int(t)\n rem = t - year \n base = datetime(year, 1, 1)\n dt = base + timedelta(seconds=(base.replace(year=base.year+1) - base).total_seconds() * rem)\n return dt", - "docstring": "Convert decimal year to datetime" - }, - { - "code": "def _createoddslicespec(bounds,scs,min_matrix_radius):\n bounds_xcenter,bounds_ycenter=bounds.centroid()\n sheet_rows,sheet_cols = scs.shape\n center_row,center_col = sheet_rows/2,sheet_cols/2\n unit_xcenter,unit_ycenter=scs.matrixidx2sheet(center_row,\n center_col)\n bounds.translate(unit_xcenter-bounds_xcenter,\n unit_ycenter-bounds_ycenter)\n r1,r2,c1,c2 = Slice._boundsspec2slicespec(bounds.lbrt(),scs)\n xrad=max(c2-center_col-1,min_matrix_radius)\n yrad=max(r2-center_row-1,min_matrix_radius)\n r2=center_row+yrad+1\n c2=center_col+xrad+1\n r1=center_row-yrad\n c1=center_col-xrad\n return (r1,r2,c1,c2)", - "docstring": "Create the 'odd' Slice that best approximates the specified\n sheet-coordinate bounds.\n\n The supplied bounds are translated to have a center at the\n center of one of the sheet's units (we arbitrarily use the\n center unit), and then these bounds are converted to a slice\n in such a way that the slice exactly includes all units whose\n centers are within the bounds (see boundsspec2slicespec()).\n However, to ensure that the bounds are treated symmetrically,\n we take the right and bottom bounds and reflect these about\n the center of the slice (i.e. we take the 'xradius' to be\n right_col-center_col and the 'yradius' to be\n bottom_col-center_row). Hence, if the bounds happen to go\n through units, if the units are included on the right and\n bottom bounds, they will be included on the left and top\n bounds. This ensures that the slice has odd dimensions." - }, - { - "code": "def rbdd(*keywords):\n settings = _personal_settings().data\n settings[\"engine\"][\"rewrite\"] = True\n _storybook(settings[\"engine\"]).with_params(\n **{\"python version\": settings[\"params\"][\"python version\"]}\n ).only_uninherited().shortcut(*keywords).play()", - "docstring": "Run story matching keywords and rewrite story if code changed." - }, - { - "code": "def disqus_sso_script(context):\n settings = context[\"settings\"]\n public_key = getattr(settings, \"COMMENTS_DISQUS_API_PUBLIC_KEY\", \"\")\n secret_key = getattr(settings, \"COMMENTS_DISQUS_API_SECRET_KEY\", \"\")\n user = context[\"request\"].user\n if public_key and secret_key and user.is_authenticated():\n context[\"public_key\"] = public_key\n context[\"sso_data\"] = _get_disqus_sso(user, public_key, secret_key)\n return context", - "docstring": "Provides a generic context variable which adds single-sign-on\n support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and\n ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified." - }, - { - "code": "def labels(self):\n return tuple(_Label(label.get('id'), label.get('color'), label.text) for label\n in self.root.iter('label'))", - "docstring": "Tuple of labels." - }, - { - "code": "def remove(self, identified):\n by_val = isinstance(identified, Identified)\n if by_val:\n key = identified.key\n if not isinstance(identified, self._class):\n raise self.Error(\"Such instance could never exist here\",\n self.Error.INVALID_INSTANCE_CLASS, instance=identified)\n else:\n key = identified\n try:\n popped = self._objects.pop(key)\n if by_val and popped != identified:\n raise self.Error(\"Trying to pop a different object which also has key '%s'\" % popped.key,\n self.Error.NOT_SAME_OBJECT, instance=identified, current=popped)\n self._events.remove.trigger(list=self, instance=identified, by_val=by_val)\n except KeyError:\n raise self.Error(\"No object with key '%s' exists here\",\n self.Error.KEY_NOT_EXISTS, key=key, instance=identified if by_val else None)", - "docstring": "Removes an already-created identified object.\n A key may be passed instead of an identified object.\n If an object is passed, and its key is held by another\n object inside the record, an error is triggered.\n Returns the removed object." - }, - { - "code": "def get_unused_resource_config_paths(self, resource_fqns, disabled):\n disabled_fqns = frozenset(tuple(fqn) for fqn in disabled)\n resource_config_paths = self.get_resource_config_paths()\n unused_resource_config_paths = []\n for resource_type, config_paths in resource_config_paths.items():\n used_fqns = resource_fqns.get(resource_type, frozenset())\n fqns = used_fqns | disabled_fqns\n for config_path in config_paths:\n if not _is_config_used(config_path, fqns):\n unused_resource_config_paths.append(\n (resource_type,) + config_path\n )\n return unused_resource_config_paths", - "docstring": "Return a list of lists of strings, where each inner list of strings\n represents a type + FQN path of a resource configuration that is not\n used." - }, - { - "code": "def method(request_message=message_types.VoidMessage,\n response_message=message_types.VoidMessage,\n name=None,\n path=None,\n http_method='POST',\n scopes=None,\n audiences=None,\n allowed_client_ids=None,\n auth_level=None,\n api_key_required=None,\n metric_costs=None,\n use_request_uri=None):\n if auth_level is not None:\n _logger.warn(_AUTH_LEVEL_WARNING)\n DEFAULT_HTTP_METHOD = 'POST'\n def apiserving_method_decorator(api_method):\n request_body_class = None\n request_params_class = None\n if isinstance(request_message, resource_container.ResourceContainer):\n remote_decorator = remote.method(request_message.combined_message_class,\n response_message)\n request_body_class = request_message.body_message_class()\n request_params_class = request_message.parameters_message_class()\n else:\n remote_decorator = remote.method(request_message, response_message)\n remote_method = remote_decorator(api_method)\n def invoke_remote(service_instance, request):\n users_id_token._maybe_set_current_user_vars(\n invoke_remote, api_info=getattr(service_instance, 'api_info', None),\n request=request)\n return remote_method(service_instance, request)\n invoke_remote.remote = remote_method.remote\n if isinstance(request_message, resource_container.ResourceContainer):\n resource_container.ResourceContainer.add_to_cache(\n invoke_remote.remote, request_message)\n invoke_remote.method_info = _MethodInfo(\n name=name or api_method.__name__, path=path or api_method.__name__,\n http_method=http_method or DEFAULT_HTTP_METHOD,\n scopes=scopes, audiences=audiences,\n allowed_client_ids=allowed_client_ids, auth_level=auth_level,\n api_key_required=api_key_required, metric_costs=metric_costs,\n use_request_uri=use_request_uri,\n request_body_class=request_body_class,\n request_params_class=request_params_class)\n invoke_remote.__name__ = invoke_remote.method_info.name\n return invoke_remote\n endpoints_util.check_list_type(scopes, (basestring, endpoints_types.OAuth2Scope), 'scopes')\n endpoints_util.check_list_type(allowed_client_ids, basestring,\n 'allowed_client_ids')\n _CheckEnum(auth_level, AUTH_LEVEL, 'auth_level')\n _CheckAudiences(audiences)\n _CheckType(metric_costs, dict, 'metric_costs')\n return apiserving_method_decorator", - "docstring": "Decorate a ProtoRPC Method for use by the framework above.\n\n This decorator can be used to specify a method name, path, http method,\n scopes, audiences, client ids and auth_level.\n\n Sample usage:\n @api_config.method(RequestMessage, ResponseMessage,\n name='insert', http_method='PUT')\n def greeting_insert(request):\n ...\n return response\n\n Args:\n request_message: Message type of expected request.\n response_message: Message type of expected response.\n name: string, Name of the method, prepended with . to make it\n unique. (Default: python method name)\n path: string, Path portion of the URL to the method, for RESTful methods.\n http_method: string, HTTP method supported by the method. (Default: POST)\n scopes: list of string, OAuth2 token must contain one of these scopes.\n audiences: list of string, IdToken must contain one of these audiences.\n allowed_client_ids: list of string, Client IDs allowed to call the method.\n If None and auth_level is REQUIRED, no calls will be allowed.\n auth_level: enum from AUTH_LEVEL, Frontend auth level for the method.\n api_key_required: bool, whether a key is required to call the method\n metric_costs: dict with keys matching an API limit metric and values\n representing the cost for each successful call against that metric.\n use_request_uri: if true, match requests against REQUEST_URI instead of PATH_INFO\n\n Returns:\n 'apiserving_method_wrapper' function.\n\n Raises:\n TypeError: if the request_type or response_type parameters are not\n proper subclasses of messages.Message." - }, - { - "code": "def filter_inactive_ports(query):\n port_model = models_v2.Port\n query = (query\n .filter(port_model.status == n_const.PORT_STATUS_ACTIVE))\n return query", - "docstring": "Filter ports that aren't in active status" - }, - { - "code": "def set_cache(\n self,\n instance=None,\n translation=None,\n language=None,\n field_name=None,\n field_value=None,\n ):\n if instance is not None and translation is not None:\n cached_obj = CachedTranslation.from_object(translation)\n instance._linguist_translations[translation.field_name][\n translation.language\n ] = cached_obj\n return cached_obj\n if instance is None:\n instance = self.instance\n cached_obj = self.get_cache(\n instance,\n translation=translation,\n field_value=field_value,\n language=language,\n field_name=field_name,\n )\n if field_value is None and cached_obj.field_value:\n cached_obj.deleted = True\n if field_value != cached_obj.field_value:\n cached_obj.has_changed = True\n cached_obj.field_value = field_value\n return cached_obj", - "docstring": "Add a new translation into the cache." - }, - { - "code": "def checkvalid(s, m, pk):\n if len(s) != b // 4:\n raise ValueError(\"signature length is wrong\")\n if len(pk) != b // 8:\n raise ValueError(\"public-key length is wrong\")\n s = bytearray(s)\n m = bytearray(m)\n pk = bytearray(pk)\n R = decodepoint(s[: b // 8])\n A = decodepoint(pk)\n S = decodeint(s[b // 8 : b // 4])\n h = Hint(encodepoint(R) + pk + m)\n (x1, y1, z1, t1) = P = scalarmult_B(S)\n (x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))\n if (\n not isoncurve(P)\n or not isoncurve(Q)\n or (x1 * z2 - x2 * z1) % q != 0\n or (y1 * z2 - y2 * z1) % q != 0\n ):\n raise SignatureMismatch(\"signature does not pass verification\")", - "docstring": "Not safe to use when any argument is secret.\n See module docstring. This function should be used only for\n verifying public signatures of public messages." - }, - { - "code": "def put(self, key, value):\n if self._link_for_value(value):\n super(SymlinkDatastore, self).put(key, value)\n return\n current_value = super(SymlinkDatastore, self).get(key)\n link_key = self._link_for_value(current_value)\n if link_key:\n self.put(link_key, value)\n else:\n super(SymlinkDatastore, self).put(key, value)", - "docstring": "Stores the object named by `key`. Follows links." - }, - { - "code": "def breadth_first(start, expand):\n ensure_callable(expand)\n def generator():\n queue = deque([start])\n while queue:\n node = queue.popleft()\n yield node\n queue.extend(expand(node))\n return generator()", - "docstring": "Performs a breadth-first search of a graph-like structure.\n\n :param start: Node to start the search from\n :param expand: Function taking a node as an argument and returning iterable\n of its child nodes\n\n :return: Iterable of nodes in the BFS order\n\n Example::\n\n tree = json.loads(some_data)\n for item in breadth_first(tree, key_func('children', default=())):\n do_something_with(item)" - }, - { - "code": "def end_entry(self):\n if self.in_progress is None:\n return Error.NO_ERROR\n if self.in_progress.data_space() == 2:\n return Error.INPUT_BUFFER_WRONG_SIZE\n for entry in self.entries:\n if entry.target == self.in_progress.target and entry.var_id == self.in_progress.var_id:\n entry.valid = False\n self.entries.append(self.in_progress)\n self.data_index += self.in_progress.data_space() - 2\n self.in_progress = None\n return Error.NO_ERROR", - "docstring": "Finish a previously started config database entry.\n\n This commits the currently in progress entry. The expected flow is\n that start_entry() is called followed by 1 or more calls to add_data()\n followed by a single call to end_entry().\n\n Returns:\n int: An error code" - }, - { - "code": "def get_currencies_info() -> Element:\n response = requests.get(const.CBRF_API_URLS['info'])\n return XML(response.text)", - "docstring": "Get META information about currencies\n\n url: http://www.cbr.ru/scripts/XML_val.asp\n\n :return: :class: `Element ` object\n :rtype: ElementTree.Element" - }, - { - "code": "def knuth_morris_pratt(s, t):\n sep = '\\x00'\n assert sep not in t and sep not in s\n f = maximum_border_length(t + sep + s)\n n = len(t)\n for i, fi in enumerate(f):\n if fi == n:\n return i - 2 * n\n return -1", - "docstring": "Find a substring by Knuth-Morris-Pratt\n\n :param s: the haystack string\n :param t: the needle string\n :returns: index i such that s[i: i + len(t)] == t, or -1\n :complexity: O(len(s) + len(t))" - }, - { - "code": "def write_script(script, tempdir):\n name = \"script\" + self.suffix\n path = os.path.join(tempdir, name)\n with open(path, \"w\") as f:\n f.write(\"\\n\".join(script))\n return path", - "docstring": "Write script to a temporary directory\n\n Arguments:\n script (list): Commands which to put into a file\n\n Returns:\n Absolute path to script" - }, - { - "code": "def sq_dist(X1,X2=None):\n if X2==None:\n X2 = X1\n assert X1.shape[1]==X2.shape[1], 'dimensions do not match'\n n = X1.shape[0]\n m = X2.shape[0]\n d = X1.shape[1]\n X1sq = sp.reshape((X1**2).sum(1),n,1)\n X2sq = sp.reshape((X2**2).sum(1),m,1)\n K = sp.tile((X1*X1).sum(1),(m,1)).T + sp.tile((X2*X2).sum(1),(n,1)) - 2*sp.dot(X1,X2.T)\n return K", - "docstring": "computes a matrix of all pariwise squared distances" - }, - { - "code": "def senqueue(trg_queue, item_s, *args, **kwargs):\n return vsenqueue(trg_queue, item_s, args, **kwargs)", - "docstring": "Enqueue a string, or string-like object to queue with arbitrary\n arguments, senqueue is to enqueue what sprintf is to printf, senqueue\n is to vsenqueue what sprintf is to vsprintf." - }, - { - "code": "def histogram2d(data1, data2, bins=None, *args, **kwargs):\n import dask\n if \"axis_names\" not in kwargs:\n if hasattr(data1, \"name\") and hasattr(data2, \"name\"):\n kwargs[\"axis_names\"] = [data1.name, data2.name]\n if not hasattr(data1, \"dask\"):\n data1 = dask.array.from_array(data1, chunks=data1.size() / 100)\n if not hasattr(data2, \"dask\"):\n data2 = dask.array.from_array(data2, chunks=data2.size() / 100)\n data = dask.array.stack([data1, data2], axis=1)\n kwargs[\"dim\"] = 2\n return histogramdd(data, bins, *args, **kwargs)", - "docstring": "Facade function to create 2D histogram using dask." - }, - { - "code": "def _GetAccessToken(self):\n d = {\n 'assertion': self._GenerateAssertion(),\n 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',\n }\n try:\n body = parse.urlencode(d)\n except AttributeError:\n body = urllib.urlencode(d)\n req = urllib_request.Request(RpcHelper.TOKEN_ENDPOINT)\n req.add_header('Content-type', 'application/x-www-form-urlencoded')\n binary_body = body.encode('utf-8')\n raw_response = urllib_request.urlopen(req, binary_body)\n return simplejson.loads(raw_response.read())['access_token']", - "docstring": "Gets oauth2 access token for Gitkit API using service account.\n\n Returns:\n string, oauth2 access token." - }, - { - "code": "def run_sparser(fname, output_fmt, outbuf=None, timeout=600):\n if not sparser_path or not os.path.exists(sparser_path):\n logger.error('Sparser executable not set in %s' % sparser_path_var)\n return None\n if output_fmt == 'xml':\n format_flag = '-x'\n suffix = '.xml'\n elif output_fmt == 'json':\n format_flag = '-j'\n suffix = '.json'\n else:\n logger.error('Unknown output format: %s' % output_fmt)\n return None\n sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh')\n output_path = fname.split('.')[0] + '-semantics' + suffix\n for fpath in [sparser_exec_path, fname]:\n if not os.path.exists(fpath):\n raise Exception(\"'%s' is not a valid path.\" % fpath)\n cmd_list = [sparser_exec_path, format_flag, fname]\n with sp.Popen(cmd_list, stdout=sp.PIPE) as proc:\n try:\n stdout, stderr = proc.communicate(timeout=timeout)\n except sp.TimeoutExpired:\n sp.check_call(['pkill', '-f', 'r3.core.*%s' % fname])\n stdout, stderr = proc.communicate()\n raise sp.TimeoutExpired(proc.args, timeout, output=stdout,\n stderr=stderr)\n except BaseException:\n sp.check_call(['pkill', '-f', fname])\n proc.wait()\n raise\n retcode = proc.poll()\n if retcode:\n raise sp.CalledProcessError(retcode, proc.args, output=stdout,\n stderr=stderr)\n if outbuf is not None:\n outbuf.write(stdout)\n outbuf.flush()\n assert os.path.exists(output_path),\\\n 'No output file \\\"%s\\\" created by sparser.' % output_path\n return output_path", - "docstring": "Return the path to reading output after running Sparser reading.\n\n Parameters\n ----------\n fname : str\n The path to an input file to be processed. Due to the Spaser\n executable's assumptions, the file name needs to start with PMC\n and should be an NXML formatted file.\n output_fmt : Optional[str]\n The format in which Sparser should produce its output, can either be\n 'json' or 'xml'.\n outbuf : Optional[file]\n A file like object that the Sparser output is written to.\n timeout : int\n The number of seconds to wait until giving up on this one reading. The\n default is 600 seconds (i.e. 10 minutes). Sparcer is a fast reader and\n the typical type to read a single full text is a matter of seconds.\n\n Returns\n -------\n output_path : str\n The path to the output file created by Sparser." - }, - { - "code": "def ExamineEvent(self, mediator, event):\n if event.data_type not in self._DATATYPES:\n return\n url = getattr(event, 'url', None)\n if url is None:\n return\n parsed_url = urlparse.urlparse(url)\n domain = getattr(parsed_url, 'netloc', None)\n if domain in self._domains:\n return\n self._domains.append(domain)", - "docstring": "Analyzes an event and extracts domains from it.\n\n We only evaluate straightforward web history events, not visits which can\n be inferred by TypedURLs, cookies or other means.\n\n Args:\n mediator (AnalysisMediator): mediates interactions between\n analysis plugins and other components, such as storage and dfvfs.\n event (EventObject): event to examine." - }, - { - "code": "def initialize(self, configfile=None):\n method = \"initialize\"\n A = None\n metadata = {method: configfile}\n send_array(self.socket, A, metadata)\n A, metadata = recv_array(\n self.socket, poll=self.poll, poll_timeout=self.poll_timeout,\n flags=self.zmq_flags)", - "docstring": "Initialize the module" - }, - { - "code": "def delete(self, key, noreply=None):\n if noreply is None:\n noreply = self.default_noreply\n cmd = b'delete ' + self.check_key(key)\n if noreply:\n cmd += b' noreply'\n cmd += b'\\r\\n'\n results = self._misc_cmd([cmd], b'delete', noreply)\n if noreply:\n return True\n return results[0] == b'DELETED'", - "docstring": "The memcached \"delete\" command.\n\n Args:\n key: str, see class docs for details.\n noreply: optional bool, True to not wait for the reply (defaults to\n self.default_noreply).\n\n Returns:\n If noreply is True, always returns True. Otherwise returns True if\n the key was deleted, and False if it wasn't found." - }, - { - "code": "def cumulative_sum(self):\n from .. import extensions\n agg_op = \"__builtin__cum_sum__\"\n return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))", - "docstring": "Return the cumulative sum of the elements in the SArray.\n\n Returns an SArray where each element in the output corresponds to the\n sum of all the elements preceding and including it. The SArray is\n expected to be of numeric type (int, float), or a numeric vector type.\n\n Returns\n -------\n out : sarray[int, float, array.array]\n\n Notes\n -----\n - Missing values are ignored while performing the cumulative\n aggregate operation.\n - For SArray's of type array.array, all entries are expected to\n be of the same size.\n\n Examples\n --------\n >>> sa = SArray([1, 2, 3, 4, 5])\n >>> sa.cumulative_sum()\n dtype: int\n rows: 3\n [1, 3, 6, 10, 15]" - }, - { - "code": "def with_git(repo,\n target_dir=None,\n limit=None,\n refspec=\"HEAD\",\n clone=True,\n rev_list_args=None,\n version_filter=lambda version: True):\n if not rev_list_args:\n rev_list_args = []\n def git_decorator(cls):\n from benchbuild.utils.cmd import git\n @staticmethod\n def versions_impl():\n directory = cls.SRC_FILE if target_dir is None else target_dir\n repo_prefix = local.path(str(CFG[\"tmp_dir\"]))\n repo_loc = local.path(repo_prefix) / directory\n if source_required(repo_loc):\n if not clone:\n return []\n git(\"clone\", repo, repo_loc)\n update_hash(repo_loc)\n with local.cwd(repo_loc):\n rev_list = git(\"rev-list\", \"--abbrev-commit\", \"--abbrev=10\",\n refspec, *rev_list_args).strip().split('\\n')\n latest = git(\"rev-parse\", \"--short=10\",\n refspec).strip().split('\\n')\n cls.VERSION = latest[0]\n if limit:\n return list(filter(version_filter, rev_list))[:limit]\n return list(filter(version_filter, rev_list))\n def download_impl(self):\n nonlocal target_dir, git\n directory = cls.SRC_FILE if target_dir is None else target_dir\n Git(self.repository, directory)\n with local.cwd(directory):\n git(\"checkout\", self.version)\n cls.versions = versions_impl\n cls.download = download_impl\n cls.repository = repo\n return cls\n return git_decorator", - "docstring": "Decorate a project class with git-based version information.\n\n This adds two attributes to a project class:\n - A `versions` method that returns a list of available versions\n for this project.\n - A `repository` attribute that provides a repository string to\n download from later.\n We use the `git rev-list` subcommand to list available versions.\n\n Args:\n repo (str): Repository to download from, this will be stored\n in the `repository` attribute of the decorated class.\n target_dir (str): An optional path where we should put the clone.\n If unspecified, we will use the `SRC_FILE` attribute of\n the decorated class.\n limit (int): Limit the number of commits to consider for available\n versions. Versions are 'ordered' from latest to oldest.\n refspec (str): A git refspec string to start listing the versions from.\n clone (bool): Should we clone the repo if it isn't already available\n in our tmp dir? Defaults to `True`. You can set this to False to\n avoid time consuming clones, when the project has not been accessed\n at least once in your installation.\n ref_list_args (list of str): Additional arguments you want to pass to\n `git rev-list`.\n version_filter (class filter): Filter function to remove unwanted\n project versions." - }, - { - "code": "def _detect(self):\n results = []\n for contract in self.contracts:\n shadows = self.detect_shadowing_definitions(contract)\n if shadows:\n for shadow in shadows:\n local_parent_name = shadow[1]\n local_variable = shadow[2]\n overshadowed = shadow[3]\n info = '{}.{}.{} (local variable @ {}) shadows:\\n'.format(contract.name,\n local_parent_name,\n local_variable.name,\n local_variable.source_mapping_str)\n for overshadowed_entry in overshadowed:\n info += \"\\t- {}.{} ({} @ {})\\n\".format(overshadowed_entry[1],\n overshadowed_entry[2],\n overshadowed_entry[0],\n overshadowed_entry[2].source_mapping_str)\n json = self.generate_json_result(info)\n self.add_variable_to_json(local_variable, json)\n for overshadowed_entry in overshadowed:\n if overshadowed_entry[0] in [self.OVERSHADOWED_FUNCTION, self.OVERSHADOWED_MODIFIER,\n self.OVERSHADOWED_EVENT]:\n self.add_function_to_json(overshadowed_entry[2], json)\n elif overshadowed_entry[0] == self.OVERSHADOWED_STATE_VARIABLE:\n self.add_variable_to_json(overshadowed_entry[2], json)\n results.append(json)\n return results", - "docstring": "Detect shadowing local variables\n\n Recursively visit the calls\n Returns:\n list: {'vuln', 'filename,'contract','func', 'shadow'}" - }, - { - "code": "def fix_style(style='basic', ax=None, **kwargs):\n style = _read_style(style)\n for s in style:\n if not s in style_params.keys():\n avail = [f.replace('.mplstyle', '') for f in os.listdir(\n _get_lib()) if f.endswith('.mplstyle')]\n raise ValueError('{0} is not a valid style. '.format(s) +\n 'Please pick a style from the list available in ' +\n '{0}: {1}'.format(_get_lib(), avail))\n _fix_style(style, ax, **kwargs)", - "docstring": "Add an extra formatting layer to an axe, that couldn't be changed directly \n in matplotlib.rcParams or with styles. Apply this function to every axe \n you created.\n\n Parameters\n ---------- \n ax: a matplotlib axe. \n If None, the last axe generated is used \n style: string or list of string\n ['basic', 'article', 'poster', 'B&W','talk','origin'] \n one of the styles previously defined. It should match the style you \n chose in set_style but nothing forces you to.\n kwargs: dict\n edit any of the style_params keys. ex:\n \n >>> tight_layout=False\n\n Examples\n --------\n plb.set_style('poster')\n plt.plot(a,np.cos(a))\n plb.fix_style('poster',**{'draggable_legend':False}) \n \n See Also\n --------\n \n :func:`~publib.publib.set_style`\n :func:`~publib.tools.tools.reset_defaults`" - }, - { - "code": "def service_param_string(params):\r\n p = []\r\n k = []\r\n for param in params:\r\n name = fix_param_name(param['name'])\r\n if 'required' in param and param['required'] is True:\r\n p.append(name)\r\n else:\r\n if 'default' in param:\r\n k.append('{name}={default}'.format(name=name, default=param['default']))\r\n else:\r\n k.append('{name}=None'.format(name=name))\r\n p.sort(lambda a, b: len(a) - len(b))\r\n k.sort()\r\n a = p + k\r\n return ', '.join(a)", - "docstring": "Takes a param section from a metadata class and returns a param string for the service method" - }, - { - "code": "def get_output_dir(self, nb):\n self.package_dir, self.package_name = self.get_package_dir_name(nb)\n return join(self.package_dir, self.package_name)", - "docstring": "Open a notebook and determine the output directory from the name" - }, - { - "code": "def _get_model_fields(self, field_names, declared_fields, extra_kwargs):\n model = getattr(self.Meta, 'model')\n model_fields = {}\n for field_name in field_names:\n if field_name in declared_fields:\n field = declared_fields[field_name]\n source = field.source or field_name\n else:\n try:\n source = extra_kwargs[field_name]['source']\n except KeyError:\n source = field_name\n if '.' in source or source == '*':\n continue\n try:\n field = model._meta.get_field(source)\n if isinstance(field, DjangoModelField):\n model_fields[source] = field\n except FieldDoesNotExist:\n pass\n return model_fields", - "docstring": "Returns all the model fields that are being mapped to by fields\n on the serializer class.\n Returned as a dict of 'model field name' -> 'model field'.\n Used internally by `get_uniqueness_field_options`." - }, - { - "code": "def cudnnCreatePoolingDescriptor():\n poolingDesc = ctypes.c_void_p()\n status = _libcudnn.cudnnCreatePoolingDescriptor(ctypes.byref(poolingDesc))\n cudnnCheckStatus(status)\n return poolingDesc.value", - "docstring": "Create pooling descriptor.\n\n This function creates a pooling descriptor object by allocating the memory needed to\n hold its opaque structure,\n\n Returns\n -------\n poolingDesc : cudnnPoolingDescriptor\n Newly allocated pooling descriptor." - }, - { - "code": "def stop(self):\n with self.lock:\n self.halting = True\n self.go.clear()", - "docstring": "Stops the playing thread and close" - }, - { - "code": "def _checkout(self):\n cmd = [\"atomic\", \"mount\", \"--storage\", \"ostree\", self.ref_image_name, self.mount_point]\n self._run_and_log(cmd, self.ostree_path,\n \"Failed to mount selected image as an ostree repo.\")", - "docstring": "check out the image filesystem on self.mount_point" - }, - { - "code": "def getCurrentFadeColor(self, bBackground):\n fn = self.function_table.getCurrentFadeColor\n result = fn(bBackground)\n return result", - "docstring": "Get current fade color value." - }, - { - "code": "def calc_mass_from_z0(z0, w0):\n T0 = 300\n mFromEquipartition = Boltzmann*T0/(w0**2 * z0**2)\n return mFromEquipartition", - "docstring": "Calculates the mass of the particle using the equipartition\n from the angular frequency of the z signal and the average\n amplitude of the z signal in nms.\n\n Parameters\n ----------\n z0 : float\n Physical average amplitude of motion in nms\n w0 : float\n Angular Frequency of z motion\n\n Returns\n -------\n mass : float\n mass in kgs" - }, - { - "code": "def create_read_replica(name, source_name, db_instance_class=None,\n availability_zone=None, port=None,\n auto_minor_version_upgrade=None, iops=None,\n option_group_name=None, publicly_accessible=None,\n tags=None, db_subnet_group_name=None,\n storage_type=None, copy_tags_to_snapshot=None,\n monitoring_interval=None, monitoring_role_arn=None,\n region=None, key=None, keyid=None, profile=None):\n if not backup_retention_period:\n raise SaltInvocationError('backup_retention_period is required')\n res = __salt__['boto_rds.exists'](source_name, tags, region, key, keyid, profile)\n if not res.get('exists'):\n return {'exists': bool(res), 'message':\n 'RDS instance source {0} does not exists.'.format(source_name)}\n res = __salt__['boto_rds.exists'](name, tags, region, key, keyid, profile)\n if res.get('exists'):\n return {'exists': bool(res), 'message':\n 'RDS replica instance {0} already exists.'.format(name)}\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n kwargs = {}\n for key in ('OptionGroupName', 'MonitoringRoleArn'):\n if locals()[key] is not None:\n kwargs[key] = str(locals()[key])\n for key in ('MonitoringInterval', 'Iops', 'Port'):\n if locals()[key] is not None:\n kwargs[key] = int(locals()[key])\n for key in ('CopyTagsToSnapshot', 'AutoMinorVersionUpgrade'):\n if locals()[key] is not None:\n kwargs[key] = bool(locals()[key])\n taglist = _tag_doc(tags)\n rds_replica = conn.create_db_instance_read_replica(DBInstanceIdentifier=name,\n SourceDBInstanceIdentifier=source_name,\n DBInstanceClass=db_instance_class,\n AvailabilityZone=availability_zone,\n PubliclyAccessible=publicly_accessible,\n Tags=taglist, DBSubnetGroupName=db_subnet_group_name,\n StorageType=storage_type,\n **kwargs)\n return {'exists': bool(rds_replica)}\n except ClientError as e:\n return {'error': __utils__['boto3.get_error'](e)}", - "docstring": "Create an RDS read replica\n\n CLI example to create an RDS read replica::\n\n salt myminion boto_rds.create_read_replica replicaname source_name" - }, - { - "code": "def _add_gene_disease(self, row):\n col = self.files['developmental_disorders']['columns']\n if len(row) != len(col):\n raise ValueError(\"Unexpected number of fields for row {}\".format(row))\n variant_label = \"variant of {}\".format(row[col.index('gene_symbol')])\n disease_omim_id = row[col.index('disease_omim_id')]\n if disease_omim_id == 'No disease mim':\n disease_label = row[col.index('disease_label')]\n if disease_label in self.mondo_map:\n disease_id = self.mondo_map[disease_label]\n else:\n return\n else:\n disease_id = 'OMIM:' + disease_omim_id\n hgnc_curie = 'HGNC:' + row[col.index('hgnc_id')]\n relation_curie = self.resolve(row[col.index('g2p_relation_label')])\n mutation_consequence = row[col.index('mutation_consequence')]\n if mutation_consequence not in ('uncertain', ''):\n consequence_relation = self.resolve(\n self._get_consequence_predicate(mutation_consequence))\n consequence_curie = self.resolve(mutation_consequence)\n variant_label = \"{} {}\".format(mutation_consequence, variant_label)\n else:\n consequence_relation = None\n consequence_curie = None\n allelic_requirement = row[col.index('allelic_requirement')]\n if allelic_requirement != '':\n requirement_curie = self.resolve(allelic_requirement)\n else:\n requirement_curie = None\n pmids = row[col.index('pmids')]\n if pmids != '':\n pmid_list = ['PMID:' + pmid for pmid in pmids.split(';')]\n else:\n pmid_list = []\n self._build_gene_disease_model(\n hgnc_curie,\n relation_curie,\n disease_id,\n variant_label,\n consequence_relation,\n consequence_curie,\n requirement_curie,\n pmid_list\n )", - "docstring": "Parse and add gene variant disease model\n Model building happens in _build_gene_disease_model\n\n :param row {List}: single row from DDG2P.csv\n :return: None" - }, - { - "code": "def _finish_futures(self, responses):\n exception_args = None\n if len(self._target_objects) != len(responses):\n raise ValueError(\"Expected a response for every request.\")\n for target_object, subresponse in zip(self._target_objects, responses):\n if not 200 <= subresponse.status_code < 300:\n exception_args = exception_args or subresponse\n elif target_object is not None:\n try:\n target_object._properties = subresponse.json()\n except ValueError:\n target_object._properties = subresponse.content\n if exception_args is not None:\n raise exceptions.from_http_response(exception_args)", - "docstring": "Apply all the batch responses to the futures created.\n\n :type responses: list of (headers, payload) tuples.\n :param responses: List of headers and payloads from each response in\n the batch.\n\n :raises: :class:`ValueError` if no requests have been deferred." - }, - { - "code": "def _has_name(soup_obj):\n try:\n name = soup_obj.name\n if name == None:\n return False\n return True\n except AttributeError:\n return False", - "docstring": "checks if soup_obj is really a soup object or just a string\n If it has a name it is a soup object" - }, - { - "code": "def _clean_kwargs(keep_name=False, **kwargs):\n if 'name' in kwargs and not keep_name:\n kwargs['name_or_id'] = kwargs.pop('name')\n return __utils__['args.clean_kwargs'](**kwargs)", - "docstring": "Sanatize the the arguments for use with shade" - }, - { - "code": "def filter(self, endpoint, params):\n params = self.parse_params(params)\n params = urlencode(params)\n path = '{0}?{1}'.format(endpoint, params)\n return self.get(path)", - "docstring": "Makes a get request by construction\n the path from an endpoint and a dict\n with filter query params\n\n e.g.\n params = {'category__in': [1,2]}\n response = self.client.filter('/experiences/', params)" - }, - { - "code": "def similarity(self, other: Trigram) -> Tuple[float, L]:\n return max(\n ((t % other, l) for t, l in self.trigrams),\n key=lambda x: x[0],\n )", - "docstring": "Returns the best matching score and the associated label." - }, - { - "code": "def on_key_press(self, event):\n key = event.key\n if event.modifiers:\n return\n if self.enable_keyboard_pan and key in self._arrows:\n self._pan_keyboard(key)\n if key in self._pm:\n self._zoom_keyboard(key)\n if key == 'R':\n self.reset()", - "docstring": "Pan and zoom with the keyboard." - }, - { - "code": "def on_join(self, connection, event):\n nickname = self.get_nickname(event)\n nickname_color = color(nickname)\n self.nicknames[nickname] = nickname_color\n self.namespace.emit(\"join\")\n self.namespace.emit(\"message\", nickname, \"joins\", nickname_color)\n self.emit_nicknames()", - "docstring": "Someone joined the channel - send the nicknames list to the\n WebSocket." - }, - { - "code": "def update_model(self, words):\n extended_words = DefaultCompleter._DefaultCompleter__tokens[self.__language][:]\n extended_words.extend((word for word in set(words)\n if word not in DefaultCompleter._DefaultCompleter__tokens[self.__language]))\n self.setModel(QStringListModel(extended_words))\n return True", - "docstring": "Updates the completer model.\n\n :param words: Words to update the completer with.\n :type words: tuple or list\n :return: Method success.\n :rtype: bool" - }, - { - "code": "def get_option_columns(self, typ, element):\n inter = self.get_typ_interface(typ)\n return inter.get_option_columns(element)", - "docstring": "Return the column of the model to show for each level\n\n Because each level might be displayed in a combobox. So you might want to provide the column\n to show.\n\n :param typ: the typ of options. E.g. Asset, Alembic, Camera etc\n :type typ: str\n :param element: The element for wich the options should be fetched.\n :type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`\n :returns: a list of columns\n :rtype: list\n :raises: None" - }, - { - "code": "def fin(self):\n self.connection.fin(self.id)\n self.processed = True", - "docstring": "Indicate that this message is finished processing" - }, - { - "code": "def cfg(self):\n config = LStruct(self.defaults)\n module = config['CONFIG'] = os.environ.get(\n CONFIGURATION_ENVIRON_VARIABLE, config['CONFIG'])\n if module:\n try:\n module = import_module(module)\n config.update({\n name: getattr(module, name) for name in dir(module)\n if name == name.upper() and not name.startswith('_')\n })\n except ImportError as exc:\n config.CONFIG = None\n self.logger.error(\"Error importing %s: %s\", module, exc)\n for name in config:\n if name.startswith('_') or name != name.upper() or name not in os.environ:\n continue\n try:\n config[name] = json.loads(os.environ[name])\n except ValueError:\n pass\n return config", - "docstring": "Load the application configuration.\n\n This method loads configuration from python module." - }, - { - "code": "def merge_dicts(dict1, dict2, append_lists=False):\n for key in dict2:\n if isinstance(dict2[key], dict):\n if key in dict1 and key in dict2:\n merge_dicts(dict1[key], dict2[key], append_lists)\n else:\n dict1[key] = dict2[key]\n elif isinstance(dict2[key], list) and append_lists:\n if key in dict1 and isinstance(dict1[key], list):\n dict1[key].extend(\n [k for k in dict2[key] if k not in dict1[key]])\n else:\n dict1[key] = dict2[key]\n else:\n dict1[key] = dict2[key]", - "docstring": "Merge the second dict into the first\n Not intended to merge list of dicts.\n\n :param append_lists: If true, instead of clobbering a list with the\n new value, append all of the new values onto the original list." - }, - { - "code": "def _should_proxy(self, attr):\n if attr in type(self).__notproxied__:\n return False\n if _oga(self, \"__notproxied__\") is True:\n return False\n return True", - "docstring": "Determines whether `attr` should be looked up on the proxied object, or\n the proxy itself." - }, - { - "code": "def _job_sorting_key(self, job):\n MAX_BLOCKS_PER_FUNCTION = 1000000\n task_functions = list(reversed(\n list(task.function_address for task in self._task_stack if isinstance(task, FunctionAnalysis))\n ))\n try:\n function_pos = task_functions.index(job.func_addr)\n except ValueError:\n l.warning('Function address %\n return 0\n try:\n block_in_function_pos = self._ordered_node_addrs(job.func_addr).index(job.addr)\n except ValueError:\n block_in_function_pos = min(job.addr - job.func_addr, MAX_BLOCKS_PER_FUNCTION - 1)\n return block_in_function_pos + MAX_BLOCKS_PER_FUNCTION * function_pos", - "docstring": "Get the sorting key of a VFGJob instance.\n\n :param VFGJob job: the VFGJob object.\n :return: An integer that determines the order of this job in the queue.\n :rtype: int" - }, - { - "code": "def preprocess_gold(self, docs_golds):\n for name, proc in self.pipeline:\n if hasattr(proc, \"preprocess_gold\"):\n docs_golds = proc.preprocess_gold(docs_golds)\n for doc, gold in docs_golds:\n yield doc, gold", - "docstring": "Can be called before training to pre-process gold data. By default,\n it handles nonprojectivity and adds missing tags to the tag map.\n\n docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.\n YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects." - }, - { - "code": "async def get_participant(self, p_id: int, force_update=False) -> Participant:\n found_p = self._find_participant(p_id)\n if force_update or found_p is None:\n await self.get_participants()\n found_p = self._find_participant(p_id)\n return found_p", - "docstring": "get a participant by its id\n\n |methcoro|\n\n Args:\n p_id: participant id\n force_update (dfault=False): True to force an update to the Challonge API\n\n Returns:\n Participant: None if not found\n\n Raises:\n APIException" - }, - { - "code": "def hdfs_path(ctx, path):\n HADOOP_SCHEMES = ['adl://',\n 'file://',\n 'hdfs://',\n 'oss://',\n 's3://',\n 's3a://',\n 's3n://',\n 'swift://',\n 'viewfs://',\n 'wasb://']\n if (any(path.startswith(scheme) for scheme in HADOOP_SCHEMES)):\n return path\n elif path.startswith(\"/\"):\n return ctx.defaultFS + path\n else:\n if ctx.defaultFS.startswith(\"hdfs://\") or ctx.defaultFS.startswith(\"viewfs://\"):\n return \"{0}/user/{1}/{2}\".format(ctx.defaultFS, getpass.getuser(), path)\n elif ctx.defaultFS.startswith(\"file://\"):\n return \"{0}/{1}/{2}\".format(ctx.defaultFS, ctx.working_dir[1:], path)\n else:\n logging.warn(\"Unknown scheme {0} with relative path: {1}\".format(ctx.defaultFS, path))\n return \"{0}/{1}\".format(ctx.defaultFS, path)", - "docstring": "Convenience function to create a Tensorflow-compatible absolute HDFS path from relative paths\n\n Args:\n :ctx: TFNodeContext containing the metadata specific to this node in the cluster.\n :path: path to convert\n\n Returns:\n An absolute path prefixed with the correct filesystem scheme." - }, - { - "code": "def _notify_add_at(self, index, length=1):\n slice_ = self._slice_at(index, length)\n self._notify_add(slice_)", - "docstring": "Notify about an AddChange at a caertain index and length." - }, - { - "code": "def get_metadata(url, validate_cert=True):\n valid = False\n if validate_cert:\n response = urllib2.urlopen(url)\n else:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n response = urllib2.urlopen(url, context=ctx)\n xml = response.read()\n if xml:\n try:\n dom = fromstring(xml, forbid_dtd=True)\n idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor')\n if idp_descriptor_nodes:\n valid = True\n except Exception:\n pass\n if not valid:\n raise Exception('Not valid IdP XML found from URL: %s' % (url))\n return xml", - "docstring": "Gets the metadata XML from the provided URL\n\n :param url: Url where the XML of the Identity Provider Metadata is published.\n :type url: string\n\n :param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.\n :type validate_cert: bool\n\n :returns: metadata XML\n :rtype: string" - }, - { - "code": "def get(key, profile=None):\n conn = salt.utils.memcached.get_conn(profile)\n return salt.utils.memcached.get(conn, key)", - "docstring": "Get a value from memcached" - }, - { - "code": "def _get_instance(project_id, instance_zone, name, service):\n return service.instances().get(project=project_id,\n zone=instance_zone,\n instance=name).execute()", - "docstring": "Get instance details" - }, - { - "code": "def clean(mapping, bind, values):\n categories = {'C': ' '}\n for value in values:\n if isinstance(value, six.string_types):\n value = normality.normalize(value, lowercase=False, collapse=True,\n decompose=False,\n replace_categories=categories)\n yield value", - "docstring": "Perform several types of string cleaning for titles etc.." - }, - { - "code": "def logsumexp(X, axis=0):\n mx = X.max(axis=axis)\n if (X.ndim > 1):\n mx = np.atleast_2d(mx).T if axis == 1 else np.atleast_2d(mx)\n return np.log(np.exp(X - mx).sum(axis=axis)) + np.ravel(mx)", - "docstring": "Log-sum-exp trick for matrix X for summation along a specified axis.\n\n This performs the following operation in a stable fashion,\n\n .. math::\n\n \\log \\sum^K_{k=1} \\exp\\{x_k\\}\n\n Parameters\n ----------\n X: ndarray\n 2D array of shape (N, D) to apply the log-sum-exp trick.\n axis: int, optional\n Axis to apply the summation along (works the same as axis in\n numpy.sum).\n\n Returns\n -------\n lseX: ndarray\n results of applying the log-sum-exp trick, this will be shape (D,)\n if :code:`axis=0` or shape (N,) if :code:`axis=1`." - }, - { - "code": "def search():\n redis_key = 's_%s' % request.args['query'].lower()\n cached = redis_ro_conn.get(redis_key)\n if cached:\n return Response(cached)\n else:\n try:\n found = get_on_tmdb(u'/search/movie', query=request.args['query'])\n movies = []\n for movie in found['results']:\n cast = get_on_tmdb(u'/movie/%s/casts' % movie['id'])\n year = datetime.strptime(movie['release_date'], '%Y-%m-%d').year if movie['release_date'] else None\n movies.append({'title': movie['original_title'],\n 'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],\n 'year': year,\n '_tmdb_id': movie['id']})\n except requests.HTTPError as err:\n return Response('TMDB API error: %s' % str(err), status=err.response.status_code)\n json_response = json.dumps({'movies': movies})\n redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)\n return Response(json_response)", - "docstring": "Search a movie on TMDB." - }, - { - "code": "def fill(self, paths):\n for path in paths:\n tree = self.tree\n parts = tuple(path.split('/'))\n dir_parts = parts[:-1]\n built = ()\n for part in dir_parts:\n self.cache[built] = tree\n built += (part, )\n parent = tree\n tree = parent.folders.get(part, empty)\n if tree is empty:\n tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)\n self.cache[dir_parts] = tree\n tree.files.add(parts[-1])", - "docstring": "Initialise the tree.\n\n paths is a list of strings where each string is the relative path to some\n file." - }, - { - "code": "def receive_request(self, transaction):\n with transaction:\n transaction.separate_timer = self._start_separate_timer(transaction)\n self._blockLayer.receive_request(transaction)\n if transaction.block_transfer:\n self._stop_separate_timer(transaction.separate_timer)\n self._messageLayer.send_response(transaction)\n self.send_datagram(transaction.response)\n return\n self._observeLayer.receive_request(transaction)\n self._requestLayer.receive_request(transaction)\n if transaction.resource is not None and transaction.resource.changed:\n self.notify(transaction.resource)\n transaction.resource.changed = False\n elif transaction.resource is not None and transaction.resource.deleted:\n self.notify(transaction.resource)\n transaction.resource.deleted = False\n self._observeLayer.send_response(transaction)\n self._blockLayer.send_response(transaction)\n self._stop_separate_timer(transaction.separate_timer)\n self._messageLayer.send_response(transaction)\n if transaction.response is not None:\n if transaction.response.type == defines.Types[\"CON\"]:\n self._start_retransmission(transaction, transaction.response)\n self.send_datagram(transaction.response)", - "docstring": "Handle requests coming from the udp socket.\n\n :param transaction: the transaction created to manage the request" - }, - { - "code": "def getch():\n if sys.platform in ['darwin', 'linux']:\n import termios\n import tty\n file_descriptor = sys.stdin.fileno()\n settings = termios.tcgetattr(file_descriptor)\n try:\n tty.setraw(file_descriptor)\n return sys.stdin.read(1)\n finally:\n termios.tcsetattr(file_descriptor, termios.TCSADRAIN, settings)\n elif sys.platform in ['cygwin', 'win32']:\n import msvcrt\n return msvcrt.getwch()", - "docstring": "Request a single character input from the user." - }, - { - "code": "def __interrupt_search(self):\n if self.__search_worker_thread:\n self.__search_worker_thread.quit()\n self.__search_worker_thread.wait()\n self.__container.engine.stop_processing(warning=False)", - "docstring": "Interrupt the current search." - }, - { - "code": "def handle(self, data, **kwargs):\n if self.many:\n return self.mapper.many(raw=self.raw, **self.mapper_kwargs).serialize(\n data, role=self.role\n )\n else:\n return self.mapper(obj=data, raw=self.raw, **self.mapper_kwargs).serialize(\n role=self.role\n )", - "docstring": "Run serialization for the specified mapper_class.\n\n Supports both .serialize and .many().serialize Kim interfaces.\n\n :param data: Objects to be serialized.\n :returns: Serialized data according to mapper configuration" - }, - { - "code": "def gen_time_intervals(start, end, delta):\n curr = start\n while curr < end:\n yield curr\n curr += delta", - "docstring": "Create time intervals with timedelta periods using datetime for start\n and end" - }, - { - "code": "def init_interface(field):\n interface_shape = np.array(field.shape); interface_shape[-1] += 1\n interfaces = np.tile(False,len(interface_shape)); interfaces[-1] = True\n interface_zero = Field(np.zeros(interface_shape), domain=field.domain, interfaces=interfaces)\n return interface_zero", - "docstring": "Return a Field object defined at the vertical interfaces of the input Field object." - }, - { - "code": "def _post_process_output(res):\n if isinstance(res, list):\n if len(res) == 0:\n return res\n elif len(res) == 1:\n return _post_process_output(res[0])\n elif isinstance(res[0], numpy.ndarray):\n return numpy.array(res)\n elif isinstance(res[0], dict):\n import pandas\n return pandas.DataFrame(res).values\n else:\n ls = [len(r) for r in res]\n mi = min(ls)\n if mi != max(ls):\n raise NotImplementedError(\"Unable to postprocess various number of outputs in [{0}, {1}]\".format(min(ls), max(ls)))\n if mi > 1:\n output = []\n for i in range(mi):\n output.append(_post_process_output([r[i] for r in res]))\n return output\n elif isinstance(res[0], list):\n if isinstance(res[0][0], list):\n return numpy.array(res)\n elif len(res[0]) == 1 and isinstance(res[0][0], dict):\n return _post_process_output([r[0] for r in res])\n elif len(res) == 1:\n return res\n else:\n if len(res[0]) != 1:\n raise NotImplementedError(\"Not conversion implemented for {0}\".format(res))\n st = [r[0] for r in res]\n return numpy.vstack(st)\n else:\n return res\n else:\n return res", - "docstring": "Applies post processings before running the comparison\n such as changing type from list to arrays." - }, - { - "code": "def _index_to_ansi_values(self, index):\n if self.__class__.__name__[0] == 'F':\n if index < 8:\n index += ANSI_FG_LO_BASE\n else:\n index += (ANSI_FG_HI_BASE - 8)\n else:\n if index < 8:\n index += ANSI_BG_LO_BASE\n else:\n index += (ANSI_BG_HI_BASE - 8)\n return [str(index)]", - "docstring": "Converts an palette index to the corresponding ANSI color.\n\n Arguments:\n index - an int (from 0-15)\n Returns:\n index as str in a list for compatibility with values." - }, - { - "code": "def _update_job(self, target, args, kwargs):\n target_path, options = get_function_path_and_options(target)\n assert isinstance(args, (tuple, list)) or args is None\n assert isinstance(kwargs, dict) or kwargs is None\n if options:\n self.update_options(**options)\n self._options['job'] = (target_path, args, kwargs)", - "docstring": "Specify the function this async job is to execute when run." - }, - { - "code": "def get_repository_ids_by_asset(self, asset_id):\n mgr = self._get_provider_manager('REPOSITORY', local=True)\n lookup_session = mgr.get_asset_lookup_session(proxy=self._proxy)\n lookup_session.use_federated_repository_view()\n asset = lookup_session.get_asset(asset_id)\n id_list = []\n for idstr in asset._my_map['assignedRepositoryIds']:\n id_list.append(Id(idstr))\n return IdList(id_list)", - "docstring": "Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``.\n\n arg: asset_id (osid.id.Id): ``Id`` of an ``Asset``\n return: (osid.id.IdList) - list of repository ``Ids``\n raise: NotFound - ``asset_id`` is not found\n raise: NullArgument - ``asset_id`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def drop_capabilities(keep=[]):\n capdata = (libc.CapData * 2)()\n for cap in keep:\n capdata[0].effective |= (1 << cap)\n capdata[0].permitted |= (1 << cap)\n libc.capset(ctypes.byref(libc.CapHeader(version=libc.LINUX_CAPABILITY_VERSION_3, pid=0)),\n ctypes.byref(capdata))", - "docstring": "Drop all capabilities this process has.\n @param keep: list of capabilities to not drop" - }, - { - "code": "def generate_chunks(data, chunk_size=DEFAULT_CHUNK_SIZE):\n iterator = iter(repeated.getvalues(data))\n while True:\n chunk = list(itertools.islice(iterator, chunk_size))\n if not chunk:\n return\n yield chunk", - "docstring": "Yield 'chunk_size' items from 'data' at a time." - }, - { - "code": "def handle(self, environ, start_response):\n request = Request(environ)\n request.max_content_length = self.max_content_length\n access_control_headers = {\n 'Access-Control-Allow-Methods': 'POST',\n 'Access-Control-Allow-Origin': self.allow_origin,\n 'Access-Control-Allow-Headers': \\\n 'Content-Type, X-Requested-With, Accept, Origin'\n }\n if request.method == 'OPTIONS':\n response = Response(headers=access_control_headers)\n elif request.method == 'POST':\n msg = request.stream.read()\n context = self._queue_class()\n self.messages.put((context, msg))\n response = Response(context.get(), headers=access_control_headers)\n else:\n response = Response('Only POST supported', 405)\n return response(environ, start_response)", - "docstring": "WSGI handler function.\n\n The transport will serve a request by reading the message and putting\n it into an internal buffer. It will then block until another\n concurrently running function sends a reply using :py:meth:`send_reply`.\n\n The reply will then be sent to the client being handled and handle will\n return." - }, - { - "code": "def GetDeviceIntProperty(dev_ref, key):\n cf_key = CFStr(key)\n type_ref = iokit.IOHIDDeviceGetProperty(dev_ref, cf_key)\n cf.CFRelease(cf_key)\n if not type_ref:\n return None\n if cf.CFGetTypeID(type_ref) != cf.CFNumberGetTypeID():\n raise errors.OsHidError('Expected number type, got {}'.format(\n cf.CFGetTypeID(type_ref)))\n out = ctypes.c_int32()\n ret = cf.CFNumberGetValue(type_ref, K_CF_NUMBER_SINT32_TYPE,\n ctypes.byref(out))\n if not ret:\n return None\n return out.value", - "docstring": "Reads int property from the HID device." - }, - { - "code": "def fastp_filtered_reads_chart(self):\n keys = OrderedDict()\n keys['filtering_result_passed_filter_reads'] = { 'name': 'Passed Filter' }\n keys['filtering_result_low_quality_reads'] = { 'name': 'Low Quality' }\n keys['filtering_result_too_many_N_reads'] = { 'name': 'Too Many N' }\n keys['filtering_result_too_short_reads'] = { 'name': 'Too short' }\n pconfig = {\n 'id': 'fastp_filtered_reads_plot',\n 'title': 'Fastp: Filtered Reads',\n 'ylab': '\n 'cpswitch_counts_label': 'Number of Reads',\n 'hide_zero_cats': False,\n }\n return bargraph.plot(self.fastp_data, keys, pconfig)", - "docstring": "Function to generate the fastp filtered reads bar plot" - }, - { - "code": "def remover(self, id_divisiondc):\n if not is_valid_int_param(id_divisiondc):\n raise InvalidParameterError(\n u'The identifier of Division Dc is invalid or was not informed.')\n url = 'divisiondc/' + str(id_divisiondc) + '/'\n code, xml = self.submit(None, 'DELETE', url)\n return self.response(code, xml)", - "docstring": "Remove Division Dc from by the identifier.\n\n :param id_divisiondc: Identifier of the Division Dc. Integer value and greater than zero.\n\n :return: None\n\n :raise InvalidParameterError: The identifier of Division Dc is null and invalid.\n :raise DivisaoDcNaoExisteError: Division Dc not registered.\n :raise DataBaseError: Networkapi failed to access the database.\n :raise XMLError: Networkapi failed to generate the XML response." - }, - { - "code": "def get_name(self):\n if len(self.taxid2asscs) == 1:\n return '{BASE}_{TAXID}'.format(\n BASE=self.name, TAXID=next(iter(self.taxid2asscs.keys())))\n return '{BASE}_various'.format(BASE=self.name)", - "docstring": "Get name using taxid" - }, - { - "code": "def subscribe(self, socket_id, channel):\n con = self._get_connection(socket_id)\n self.subscriptions.setdefault(channel, set()).add(socket_id)\n con.subscriptions.add(channel)", - "docstring": "Subscribes a socket to a channel." - }, - { - "code": "def pretty_print(self, carrot=True):\n output = ['\\n']\n output.extend([line.pretty_print() for line in\n self.partpyobj.get_surrounding_lines(1, 0)])\n if carrot:\n output.append('\\n' +\n (' ' * (self.partpyobj.col + 5)) + '^' + '\\n')\n if self.partpymsg:\n output.append(self.partpymsg)\n return ''.join(output)", - "docstring": "Print the previous and current line with line numbers and\n a carret under the current character position.\n\n Will also print a message if one is given to this exception." - }, - { - "code": "def _vec_b(self, donor_catchments):\n p = len(donor_catchments)\n b = 0.1175 * np.ones(p)\n for i in range(p):\n b[i] *= self._model_error_corr(self.catchment, donor_catchments[i])\n return b", - "docstring": "Return vector ``b`` of model error covariances to estimate weights\n\n Methodology source: Kjeldsen, Jones and Morris, 2009, eqs 3 and 10\n\n :param donor_catchments: Catchments to use as donors\n :type donor_catchments: list of :class:`Catchment`\n :return: Model error covariance vector\n :rtype: :class:`numpy.ndarray`" - }, - { - "code": "def transfer_causal_edges(graph, source: BaseEntity, target: BaseEntity) -> Iterable[str]:\n for _, v, data in graph.out_edges(source, data=True):\n if data[RELATION] not in CAUSAL_RELATIONS:\n continue\n yield graph.add_qualified_edge(\n target,\n v,\n relation=data[RELATION],\n evidence=data[EVIDENCE],\n citation=data[CITATION],\n annotations=data.get(ANNOTATIONS),\n subject_modifier=data.get(SUBJECT),\n object_modifier=data.get(OBJECT),\n )\n for u, _, data in graph.in_edges(source, data=True):\n if data[RELATION] not in CAUSAL_RELATIONS:\n continue\n yield graph.add_qualified_edge(\n u,\n target,\n relation=data[RELATION],\n evidence=data[EVIDENCE],\n citation=data[CITATION],\n annotations=data.get(ANNOTATIONS),\n subject_modifier=data.get(SUBJECT),\n object_modifier=data.get(OBJECT),\n )", - "docstring": "Transfer causal edges that the source has to the target and yield the resulting hashes." - }, - { - "code": "def update_fallbackserver(self, serverid, data):\n return self.api_call(\n ENDPOINTS['fallbackservers']['update'],\n dict(serverid=serverid),\n body=data)", - "docstring": "Update Fallback server" - }, - { - "code": "def hasannotationlayer(self, annotationtype=None,set=None):\n l = self.layers(annotationtype, set)\n return (len(l) > 0)", - "docstring": "Does the specified annotation layer exist?" - }, - { - "code": "def getcolors(spec, n, cmap=None, value=None):\n if cmap is not None and spec is not None:\n from matplotlib.colors import LinearSegmentedColormap\n from matplotlib.cm import get_cmap\n if isinstance(cmap, LinearSegmentedColormap):\n return cmap(value)[:, 0:3]\n if isinstance(cmap, str):\n return get_cmap(cmap, n)(value)[:, 0:3]\n if isinstance(spec, str):\n return [getcolor(spec) for i in range(n)]\n elif isinstance(spec, list) and isinstance(spec[0], str):\n return [getcolor(s) for s in spec]\n elif (isinstance(spec, list) or isinstance(spec, ndarray)) and asarray(spec).shape == (3,):\n return [spec for i in range(n)]\n else:\n return spec", - "docstring": "Turn list of color specs into list of arrays." - }, - { - "code": "def update_image_location(self, timeline_json):\n if not timeline_json:\n return False\n if isinstance(timeline_json, (tuple, list)):\n timeline_json = timeline_json[0]\n event_code = timeline_json.get('event_code')\n if event_code != TIMELINE.CAPTURE_IMAGE['event_code']:\n raise AbodeException((ERROR.CAM_TIMELINE_EVENT_INVALID))\n file_path = timeline_json.get('file_path')\n if not file_path:\n raise AbodeException((ERROR.CAM_IMAGE_REFRESH_NO_FILE))\n url = CONST.BASE_URL + file_path\n response = self._abode.send_request(\"head\", url)\n if response.status_code != 302:\n _LOGGER.warning(\"Unexected response code %s with body: %s\",\n str(response.status_code), response.text)\n raise AbodeException((ERROR.CAM_IMAGE_UNEXPECTED_RESPONSE))\n location = response.headers.get('location')\n if not location:\n raise AbodeException((ERROR.CAM_IMAGE_NO_LOCATION_HEADER))\n self._image_url = location\n return True", - "docstring": "Update the image location." - }, - { - "code": "def read_config(path=None, final=False):\n try:\n with open(config_path(path, final=final), 'r') as configfile:\n return yaml.safe_load(configfile) or {}\n except FileNotFoundError:\n return {}", - "docstring": "Read Renku configuration." - }, - { - "code": "def get_sigma(imt):\n if imt.period < 0.2:\n return np.log(10**0.23)\n elif imt.period > 1.0:\n return np.log(10**0.27)\n else:\n return np.log(10**(0.23 + (imt.period - 0.2)/0.8 * 0.04))", - "docstring": "Return the value of the total sigma\n\n :param float imt:\n An :class:`openquake.hazardlib.imt.IMT` instance\n :returns:\n A float representing the total sigma value" - }, - { - "code": "def colordict(self):\n d = {}\n i=0\n n = len(self.constraints)\n for c in self.constraints:\n d[c] = cm.jet(1.*i/n)\n i+=1\n return d", - "docstring": "Dictionary holding colors that correspond to constraints." - }, - { - "code": "def get_records(self):\n if self.exhausted:\n return []\n if self.empty_responses >= CALLS_TO_REACH_HEAD:\n return self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))\n while self.empty_responses < CALLS_TO_REACH_HEAD and not self.exhausted:\n records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))\n if records:\n return records\n return []", - "docstring": "Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted.\n\n :returns: A list of reformatted records. May be empty." - }, - { - "code": "def convert_time_to_hour_minute(hour, minute, convention):\n if hour is None:\n hour = 0\n if minute is None:\n minute = 0\n if convention is None:\n convention = 'am'\n hour = int(hour)\n minute = int(minute)\n if convention.lower() == 'pm':\n hour += 12\n return {'hours': hour, 'minutes': minute}", - "docstring": "Convert time to hour, minute" - }, - { - "code": "def findLabel(self, query, create=False):\n if isinstance(query, six.string_types):\n query = query.lower()\n for label in self._labels.values():\n if (isinstance(query, six.string_types) and query == label.name.lower()) or \\\n (isinstance(query, Pattern) and query.search(label.name)):\n return label\n return self.createLabel(query) if create and isinstance(query, six.string_types) else None", - "docstring": "Find a label with the given name.\n\n Args:\n name (Union[_sre.SRE_Pattern, str]): A str or regular expression to match against the name.\n create (bool): Whether to create the label if it doesn't exist (only if name is a str).\n\n Returns:\n Union[gkeepapi.node.Label, None]: The label." - }, - { - "code": "def reverse(self):\n enabled = self.lib.iperf_get_test_reverse(self._test)\n if enabled:\n self._reverse = True\n else:\n self._reverse = False\n return self._reverse", - "docstring": "Toggles direction of test\n\n :rtype: bool" - }, - { - "code": "def add_config_path(path):\n if not os.path.isfile(path):\n warnings.warn(\"Config file does not exist: {path}\".format(path=path))\n return False\n _base, ext = os.path.splitext(path)\n if ext and ext[1:] in PARSERS:\n parser = ext[1:]\n else:\n parser = PARSER\n parser_class = PARSERS[parser]\n _check_parser(parser_class, parser)\n if parser != PARSER:\n msg = (\n \"Config for {added} parser added, but used {used} parser. \"\n \"Set up right parser via env var: \"\n \"export LUIGI_CONFIG_PARSER={added}\"\n )\n warnings.warn(msg.format(added=parser, used=PARSER))\n parser_class.add_config_path(path)\n return True", - "docstring": "Select config parser by file extension and add path into parser." - }, - { - "code": "def integrator(integrand,xmin,xmax,n_points,factor=2):\n integral_vector = np.empty([n_points+1])\n dx = (xmax-xmin)/n_points\n for i in xrange(n_points+1):\n xnow = xmin + i * dx\n integral, error = integrate.quad(rotate(integrand,\n xnow), xmin*factor, xmax*factor)\n integral_vector[i] = integral\n normalization = np.average(integral_vector)*(xmax-xmin)\n normalized_vector = integral_vector/normalization\n return normalized_vector", - "docstring": "Creating theoretical curve for 2D model functions\n integrator function" - }, - { - "code": "def inside(points, polygons, short_circuit='any', precision=0.001):\n poly = []\n if isinstance(polygons, PolygonSet):\n poly.extend(polygons.polygons)\n elif isinstance(polygons, CellReference) or isinstance(\n polygons, CellArray):\n poly.extend(polygons.get_polygons())\n else:\n for obj in polygons:\n if isinstance(obj, PolygonSet):\n poly.extend(obj.polygons)\n elif isinstance(obj, CellReference) or isinstance(obj, CellArray):\n poly.extend(obj.get_polygons())\n else:\n poly.append(obj)\n if hasattr(points[0][0], '__iter__'):\n pts = points\n sc = 1 if short_circuit == 'any' else -1\n else:\n pts = (points, )\n sc = 0\n return clipper.inside(pts, poly, sc, 1 / precision)", - "docstring": "Test whether each of the points is within the given set of polygons.\n\n Parameters\n ----------\n points : array-like[N][2] or list of array-like[N][2]\n Coordinates of the points to be tested or groups of points to be\n tested together.\n polygons : polygon or array-like\n Polygons to be tested against. Must be a ``PolygonSet``,\n ``CellReference``, ``CellArray``, or an array. The array may\n contain any of the previous objects or an array-like[N][2] of\n vertices of a polygon.\n short_circuit : {'any', 'all'}\n If `points` is a list of point groups, testing within each group\n will be short-circuited if any of the points in the group is\n inside ('any') or outside ('all') the polygons. If `points` is\n simply a list of points, this parameter has no effect.\n precision : float\n Desired precision for rounding vertice coordinates.\n\n Returns\n -------\n out : tuple\n Tuple of booleans indicating if each of the points or point\n groups is inside the set of polygons." - }, - { - "code": "def weld_iloc_indices(array, weld_type, indices):\n weld_obj = create_empty_weld_object()\n weld_obj_id_array = get_weld_obj_id(weld_obj, array)\n weld_obj_id_indices = get_weld_obj_id(weld_obj, indices)\n weld_template =\n weld_obj.weld_code = weld_template.format(array=weld_obj_id_array,\n indices=weld_obj_id_indices,\n type=weld_type)\n return weld_obj", - "docstring": "Retrieve the values at indices.\n\n Parameters\n ----------\n array : numpy.ndarray or WeldObject\n Input data. Assumed to be bool data.\n weld_type : WeldType\n The WeldType of the array data.\n indices : numpy.ndarray or WeldObject\n The indices to lookup.\n\n Returns\n -------\n WeldObject\n Representation of this computation." - }, - { - "code": "def find_components_without_sbo_terms(model, components):\n return [elem for elem in getattr(model, components) if\n elem.annotation is None or 'sbo' not in elem.annotation]", - "docstring": "Find model components that are not annotated with any SBO terms.\n\n Parameters\n ----------\n model : cobra.Model\n The metabolic model under investigation.\n components : {\"metabolites\", \"reactions\", \"genes\"}\n A string denoting `cobra.Model` components.\n\n Returns\n -------\n list\n The components without any SBO term annotation." - }, - { - "code": "def get_admin_email_link(application):\n url = '%s/applications/%d/' % (settings.ADMIN_BASE_URL, application.pk)\n is_secret = False\n return url, is_secret", - "docstring": "Retrieve a link that can be emailed to the administrator." - }, - { - "code": "def do_image_operations(self):\n def inner(future):\n self.done_callback()\n self.context.thread_pool.queue(\n operation=self.img_operation_worker,\n callback=inner\n )", - "docstring": "If ENGINE_THREADPOOL_SIZE > 0, this will schedule the image operations\n into a threadpool. If not, it just executes them synchronously, and\n calls self.done_callback when it's finished.\n\n The actual work happens in self.img_operation_worker" - }, - { - "code": "def linkify(text, attrs={}):\n def separate_parentheses(s):\n start = re_find(r'^\\(*', s)\n end = re_find(r'\\)*$', s)\n n = min(len(start), len(end))\n if n:\n return s[:n], s[n:-n], s[-n:]\n else:\n return '', s, ''\n def link_repl(url, proto='http://'):\n opening, url, closing = separate_parentheses(url)\n punct = re_find(punct_re, url)\n if punct:\n url = url[:-len(punct)]\n if re.search(proto_re, url):\n href = url\n else:\n href = proto + url\n href = escape_url(href)\n repl = u'{0!s}{3!s}{4!s}{5!s}'\n return repl.format(opening,\n href, attrs_text, url, punct,\n closing)\n def repl(match):\n matches = match.groupdict()\n if matches['url']:\n return link_repl(matches['url'])\n else:\n return link_repl(matches['email'], proto='mailto:')\n attr = ' {0!s}=\"{1!s}\"'\n attrs_text = ''.join(starmap(attr.format, attrs.items()))\n return re.sub(combined_re, repl, force_unicode(text))", - "docstring": "Convert URL-like and email-like strings into links." - }, - { - "code": "def start(self):\n if self.via_ip:\n connect_to = self.via_ip\n self.description = '[%s@%s via %s]' % (self._user,\n self._hostname,\n self.via_ip)\n else:\n connect_to = self._hostname\n self.description = '[%s@%s]' % (self._user,\n self._hostname)\n exception = None\n for i in range(60):\n try:\n self._client.connect(\n connect_to,\n username=self._user,\n allow_agent=True,\n key_filename=self._key_filename)\n self._transport = self._get_transport()\n except (OSError,\n TypeError,\n ssh_exception.SSHException,\n ssh_exception.NoValidConnectionsError) as e:\n exception = e\n LOG.info('%s waiting for %s: %s' %\n (self.description, connect_to, str(exception)))\n time.sleep(1)\n else:\n LOG.debug('%s connected' % self.description)\n self._started = True\n return\n _error = (\"unable to connect to ssh service on '%s': %s\" %\n (self._hostname, str(exception)))\n LOG.error(_error)\n raise exception", - "docstring": "Start the ssh client and connect to the host.\n\n It will wait until the ssh service is available during 90 seconds.\n If it doesn't succed to connect then the function will raise\n an SSHException." - }, - { - "code": "def getbr(self, name):\n for br in self.showall():\n if br.name == name:\n return br\n raise BridgeException(\"Bridge does not exist.\")", - "docstring": "Return a bridge object." - }, - { - "code": "def _get_files(self) -> Iterator[str]:\n path = os.path.abspath(self.path)\n if os.path.isfile(path):\n path = os.path.dirname(path)\n for path in self._get_parents(path):\n for file_path in self._get_files_from_dir(path):\n yield file_path", - "docstring": "Return paths to all requirements files" - }, - { - "code": "def _assemble_influence(stmt):\n subj_str = _assemble_agent_str(stmt.subj.concept)\n obj_str = _assemble_agent_str(stmt.obj.concept)\n if stmt.subj.delta['polarity'] is not None:\n subj_delta_str = ' decrease' if stmt.subj.delta['polarity'] == -1 \\\n else 'n increase'\n subj_str = 'a%s in %s' % (subj_delta_str, subj_str)\n if stmt.obj.delta['polarity'] is not None:\n obj_delta_str = ' decrease' if stmt.obj.delta['polarity'] == -1 \\\n else 'n increase'\n obj_str = 'a%s in %s' % (obj_delta_str, obj_str)\n stmt_str = '%s causes %s' % (subj_str, obj_str)\n return _make_sentence(stmt_str)", - "docstring": "Assemble an Influence statement into text." - }, - { - "code": "def renamer(v, cur_module):\n mname = demangle(v)\n name = v + '_'\n if name in cur_module:\n return name, mname\n else:\n return v, mname", - "docstring": "Rename function path to fit Pythonic naming." - }, - { - "code": "def retract(self):\n if lib.EnvRetract(self._env, self._fact) != 1:\n raise CLIPSError(self._env)", - "docstring": "Retract the fact from the CLIPS environment." - }, - { - "code": "def close_panel(self):\n self.hide()\n self.lineEditReplace.clear()\n self.lineEditSearch.clear()", - "docstring": "Closes the panel" - }, - { - "code": "def get_rtr_name(self, router_id):\n try:\n body = {}\n router = self.neutronclient.show_router(router_id, body=body)\n return router.get('router').get('name')\n except Exception as exc:\n LOG.error(\"Failed to show router interface %(id)s \"\n \"Exc %(exc)s\", {'id': router_id, 'exc': str(exc)})", - "docstring": "Retrieve the router name. Incomplete." - }, - { - "code": "def upload(client, source_dir):\n print('')\n print('upload store listings')\n print('---------------------')\n listings_folder = os.path.join(source_dir, 'listings')\n langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder))\n for language_dir in langfolders:\n language = os.path.basename(language_dir)\n with open(os.path.join(language_dir, 'listing.json')) as listings_file:\n listing = json.load(listings_file)\n listing_response = client.update(\n 'listings', language=language, body=listing)\n print(' Listing for language %s was updated.' %\n listing_response['language'])", - "docstring": "Upload listing files in source_dir. folder herachy." - }, - { - "code": "def paginate(parser, token, paginator_class=None):\n try:\n tag_name, tag_args = token.contents.split(None, 1)\n except ValueError:\n msg = '%r tag requires arguments' % token.contents.split()[0]\n raise template.TemplateSyntaxError(msg)\n match = PAGINATE_EXPRESSION.match(tag_args)\n if match is None:\n msg = 'Invalid arguments for %r tag' % tag_name\n raise template.TemplateSyntaxError(msg)\n kwargs = match.groupdict()\n objects = kwargs.pop('objects')\n if '.' in objects and kwargs['var_name'] is None:\n msg = (\n '%(tag)r tag requires a variable name `as` argumnent if the '\n 'queryset is provided as a nested context variable (%(objects)s). '\n 'You must either pass a direct queryset (e.g. taking advantage '\n 'of the `with` template tag) or provide a new variable name to '\n 'store the resulting queryset (e.g. `%(tag)s %(objects)s as '\n 'objects`).'\n ) % {'tag': tag_name, 'objects': objects}\n raise template.TemplateSyntaxError(msg)\n return PaginateNode(paginator_class, objects, **kwargs)", - "docstring": "Paginate objects.\n\n Usage:\n\n .. code-block:: html+django\n\n {% paginate entries %}\n\n After this call, the *entries* variable in the template context is replaced\n by only the entries of the current page.\n\n You can also keep your *entries* original variable (usually a queryset)\n and add to the context another name that refers to entries of the current\n page, e.g.:\n\n .. code-block:: html+django\n\n {% paginate entries as page_entries %}\n\n The *as* argument is also useful when a nested context variable is provided\n as queryset. In this case, and only in this case, the resulting variable\n name is mandatory, e.g.:\n\n .. code-block:: html+django\n\n {% paginate entries.all as entries %}\n\n The number of paginated entries is taken from settings, but you can\n override the default locally, e.g.:\n\n .. code-block:: html+django\n\n {% paginate 20 entries %}\n\n Of course you can mix it all:\n\n .. code-block:: html+django\n\n {% paginate 20 entries as paginated_entries %}\n\n By default, the first page is displayed the first time you load the page,\n but you can change this, e.g.:\n\n .. code-block:: html+django\n\n {% paginate entries starting from page 3 %}\n\n When changing the default page, it is also possible to reference the last\n page (or the second last page, and so on) by using negative indexes, e.g:\n\n .. code-block:: html+django\n\n {% paginate entries starting from page -1 %}\n\n This can be also achieved using a template variable that was passed to the\n context, e.g.:\n\n .. code-block:: html+django\n\n {% paginate entries starting from page page_number %}\n\n If the passed page number does not exist, the first page is displayed.\n\n If you have multiple paginations in the same page, you can change the\n querydict key for the single pagination, e.g.:\n\n .. code-block:: html+django\n\n {% paginate entries using article_page %}\n\n In this case *article_page* is intended to be a context variable, but you\n can hardcode the key using quotes, e.g.:\n\n .. code-block:: html+django\n\n {% paginate entries using 'articles_at_page' %}\n\n Again, you can mix it all (the order of arguments is important):\n\n .. code-block:: html+django\n\n {% paginate 20 entries\n starting from page 3 using page_key as paginated_entries %}\n\n Additionally you can pass a path to be used for the pagination:\n\n .. code-block:: html+django\n\n {% paginate 20 entries\n using page_key with pagination_url as paginated_entries %}\n\n This way you can easily create views acting as API endpoints, and point\n your Ajax calls to that API. In this case *pagination_url* is considered a\n context variable, but it is also possible to hardcode the URL, e.g.:\n\n .. code-block:: html+django\n\n {% paginate 20 entries with \"/mypage/\" %}\n\n If you want the first page to contain a different number of items than\n subsequent pages, you can separate the two values with a comma, e.g. if\n you want 3 items on the first page and 10 on other pages:\n\n .. code-block:: html+django\n\n {% paginate 3,10 entries %}\n\n You must use this tag before calling the {% show_more %} one." - }, - { - "code": "def namedb_get_all_revealed_namespace_ids( self, current_block ):\n query = \"SELECT namespace_id FROM namespaces WHERE op = ? AND reveal_block < ?;\"\n args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE )\n namespace_rows = namedb_query_execute( cur, query, args )\n ret = []\n for namespace_row in namespace_rows:\n ret.append( namespace_row['namespace_id'] )\n return ret", - "docstring": "Get all non-expired revealed namespaces." - }, - { - "code": "def click_event(self, event):\n self.prevent_refresh = False\n try:\n if self.error_messages:\n button = event[\"button\"]\n if button == 1:\n self.error_index = (self.error_index + 1) % len(self.error_messages)\n error = self.error_messages[self.error_index]\n self.error_output(error)\n if button == 3:\n self.hide_errors()\n if button != 2 or (self.terminated or self.disabled):\n self.prevent_refresh = True\n elif self.click_events:\n click_method = getattr(self.module_class, \"on_click\")\n if self.click_events == self.PARAMS_NEW:\n click_method(event)\n else:\n click_method(\n self.i3status_thread.json_list,\n self.config[\"py3_config\"][\"general\"],\n event,\n )\n self.set_updated()\n else:\n self.prevent_refresh = True\n except Exception:\n msg = \"on_click event in `{}` failed\".format(self.module_full_name)\n self._py3_wrapper.report_exception(msg)", - "docstring": "Execute the 'on_click' method of this module with the given event." - }, - { - "code": "def list(self, host_rec=None, service_rec=None, hostfilter=None):\n return self.send.vuln_list(host_rec, service_rec, hostfilter)", - "docstring": "Returns a list of vulnerabilities based on t_hosts.id or t_services.id.\n If neither are set then statistical results are added\n\n :param host_rec: db.t_hosts.id\n :param service_rec: db.t_services.id\n :param hostfilter: Valid hostfilter or None\n :return: [(vulndata) ...] if host_rec or service_rec set\n :return: [(vulndata, vuln_cnt, [vuln_ip, ...], [services ...]) ...] if nothing sent" - }, - { - "code": "def parse_output(self, line):\n try:\n key, value = line.split(\":\")\n self.update_value(key.strip(), value.strip())\n except ValueError:\n pass", - "docstring": "Convert output to key value pairs" - }, - { - "code": "def get_overlay(self):\n overlay = {}\n dcmlist = self.files_in_serie\n for i in range(len(dcmlist)):\n onefile = dcmlist[i]\n logger.info(\"reading '%s'\" % onefile)\n data = self._read_file(onefile)\n if len(overlay) == 0:\n for i_overlay in range(0, 50):\n try:\n data2d = decode_overlay_slice(data, i_overlay)\n shp2 = data2d.shape\n overlay[i_overlay] = np.zeros([len(dcmlist), shp2[0],\n shp2[1]], dtype=np.int8)\n overlay[i_overlay][-i - 1, :, :] = data2d\n except Exception:\n pass\n else:\n for i_overlay in overlay.keys():\n try:\n data2d = decode_overlay_slice(data, i_overlay)\n overlay[i_overlay][-i - 1, :, :] = data2d\n except Exception:\n logger.warning('Problem with overlay number ' +\n str(i_overlay))\n return overlay", - "docstring": "Function make 3D data from dicom file slices. There are usualy\n more overlays in the data." - }, - { - "code": "def get_refresh_token(self):\n try:\n credentials = OAuth2Credentials.from_json(\n self.credentials_store[g.oidc_id_token['sub']])\n return credentials.refresh_token\n except KeyError:\n logger.debug(\"Expired ID token, credentials missing\",\n exc_info=True)\n return None", - "docstring": "Method to return the current requests' refresh_token.\n\n :returns: Access token or None\n :rtype: str\n\n .. versionadded:: 1.2" - }, - { - "code": "def parse_args(self, args=None, namespace=None):\n assert self.initialized, '`init` must be called before `parse_args`.'\n namespace = self.parser.parse_args(args, namespace)\n handler = self._get_handler(namespace, remove_handler=True)\n if handler:\n return handler(**vars(namespace))", - "docstring": "Parse the command-line arguments and call the associated handler.\n\n The signature is the same as `argparse.ArgumentParser.parse_args\n `_.\n\n Args\n ----\n args : list\n A list of argument strings. If ``None`` the list is taken from\n ``sys.argv``.\n namespace : argparse.Namespace\n A Namespace instance. Defaults to a new empty Namespace.\n\n Returns\n -------\n The return value of the handler called with the populated Namespace as\n kwargs." - }, - { - "code": "def version(self):\n\t\tlines = iter(self._invoke('version').splitlines())\n\t\tversion = next(lines).strip()\n\t\treturn self._parse_version(version)", - "docstring": "Return the underlying version" - }, - { - "code": "def patch(self, nml_patch):\n for sec in nml_patch:\n if sec not in self:\n self[sec] = Namelist()\n self[sec].update(nml_patch[sec])", - "docstring": "Update the namelist from another partial or full namelist.\n\n This is different from the intrinsic `update()` method, which replaces\n a namelist section. Rather, it updates the values within a section." - }, - { - "code": "def utc(self, year, month=1, day=1, hour=0, minute=0, second=0.0):\n if isinstance(year, datetime):\n dt = year\n tai = _utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)\n elif isinstance(year, date):\n d = year\n tai = _utc_date_to_tai(self.leap_dates, self.leap_offsets, d)\n elif hasattr(year, '__len__') and isinstance(year[0], datetime):\n list_of_datetimes = year\n tai = array([\n _utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)\n for dt in list_of_datetimes])\n else:\n tai = _utc_to_tai(self.leap_dates, self.leap_offsets,\n _to_array(year), _to_array(month),\n _to_array(day), _to_array(hour),\n _to_array(minute), _to_array(second))\n t = Time(self, tai + tt_minus_tai)\n t.tai = tai\n return t", - "docstring": "Build a `Time` from a UTC calendar date.\n\n You can either specify the date as separate components, or\n provide a time zone aware Python datetime. The following two\n calls are equivalent (the ``utc`` time zone object can be\n imported from the ``skyfield.api`` module, or from ``pytz`` if\n you have it)::\n\n ts.utc(2014, 1, 18, 1, 35, 37.5)\n ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))\n\n Note that only by passing the components separately can you\n specify a leap second, because a Python datetime will not allow\n the value 60 in its seconds field." - }, - { - "code": "def fix_timedelta_repr(func):\n if version < (3, 7):\n return func\n def fix_timedelta(match):\n values = match.group(1).split(\", \")\n param_repr = \", \".join(\n \"{}={}\".format(param, value)\n for param, value in zip((\"days\", \"seconds\", \"microseconds\"), values)\n if value != \"0\"\n )\n if not param_repr:\n param_repr = \"0\"\n return \"timedelta({})\".format(param_repr)\n func.__doc__ = re.sub(r\"timedelta\\(([^)]+)\\)\", fix_timedelta, func.__doc__)\n return func", - "docstring": "Account repr change for timedelta in Python 3.7 and above in docstrings.\n\n This is needed to make some doctests pass on Python 3.7 and above. This\n change was introduced by `bpo-30302 `_" - }, - { - "code": "def updates(self, id, update_id=None):\n if update_id is None:\n update_id = -1\n schema = UpdateSchema()\n resp = self.service.get_id(self.base, id, params={'updates': update_id})\n return self.service.decode(schema, resp)", - "docstring": "Get updates of a running result via long-polling. If no updates are available, CDRouter waits up to 10 seconds before sending an empty response.\n\n :param id: Result ID as an int.\n :param update_id: (optional) Update ID as an int.\n :return: :class:`results.Update ` object\n :rtype: results.Update" - }, - { - "code": "def get_ffmpeg_exe():\n if 'FFMPEG_PATH' in os.environ:\n ffmpeg_exe = os.environ['FFMPEG_PATH']\n else:\n ffmpeg_exe = which('ffmpeg')\n if not ffmpeg_exe:\n if which('avconv'):\n raise FFmpegNormalizeError(\n \"avconv is not supported. \"\n \"Please install ffmpeg from http://ffmpeg.org instead.\"\n )\n else:\n raise FFmpegNormalizeError(\n \"Could not find ffmpeg in your $PATH or $FFMPEG_PATH. \"\n \"Please install ffmpeg from http://ffmpeg.org\"\n )\n return ffmpeg_exe", - "docstring": "Return path to ffmpeg executable" - }, - { - "code": "def extract_signature(docstring):\n root = publish_doctree(docstring, settings_overrides={\"report_level\": 5})\n fields = get_fields(root)\n return fields.get(SIG_FIELD)", - "docstring": "Extract the signature from a docstring.\n\n :sig: (str) -> Optional[str]\n :param docstring: Docstring to extract the signature from.\n :return: Extracted signature, or ``None`` if there's no signature." - }, - { - "code": "def _format_return_timestamps(self, return_timestamps=None):\n if return_timestamps is None:\n return_timestamps_array = np.arange(\n self.components.initial_time(),\n self.components.final_time() + self.components.saveper(),\n self.components.saveper(), dtype=np.float64\n )\n elif inspect.isclass(range) and isinstance(return_timestamps, range):\n return_timestamps_array = np.array(return_timestamps, ndmin=1)\n elif isinstance(return_timestamps, (list, int, float, np.ndarray)):\n return_timestamps_array = np.array(return_timestamps, ndmin=1)\n elif isinstance(return_timestamps, _pd.Series):\n return_timestamps_array = return_timestamps.as_matrix()\n else:\n raise TypeError('`return_timestamps` expects a list, array, pandas Series, '\n 'or numeric value')\n return return_timestamps_array", - "docstring": "Format the passed in return timestamps value as a numpy array.\n If no value is passed, build up array of timestamps based upon\n model start and end times, and the 'saveper' value." - }, - { - "code": "def create_html_from_fragment(tag):\n try:\n assert isinstance(tag, bs4.element.Tag)\n except AssertionError:\n raise TypeError\n try:\n assert tag.find_all('body') == []\n except AssertionError:\n raise ValueError\n soup = BeautifulSoup('', 'html.parser')\n soup.body.append(tag)\n return soup", - "docstring": "Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not\n\n Args:\n tag: a bs4.element.Tag\n\n Returns:\"\n bs4.element.Tag: A bs4 tag representing a full html document" - }, - { - "code": "def export(self):\n data = {}\n for key in self._specification:\n data[key] = self[key]\n return data", - "docstring": "export dictionary with values" - }, - { - "code": "def extract_auth_vars(request):\n if request.META.get('HTTP_X_SENTRY_AUTH', '').startswith('Sentry'):\n return request.META['HTTP_X_SENTRY_AUTH']\n elif request.META.get('HTTP_AUTHORIZATION', '').startswith('Sentry'):\n return request.META['HTTP_AUTHORIZATION']\n else:\n args = [\n '%s=%s' % i\n for i in request.GET.items()\n if i[0].startswith('sentry_') and i[0] != 'sentry_data'\n ]\n if args:\n return 'Sentry %s' % ', '.join(args)\n return None", - "docstring": "raven-js will pass both Authorization and X-Sentry-Auth depending on the browser\n and server configurations." - }, - { - "code": "def add_variables(self, variables, cardinality, inhibitor_probability):\n if len(variables) == 1:\n if not isinstance(inhibitor_probability[0], (list, tuple)):\n inhibitor_probability = [inhibitor_probability]\n if len(variables) != len(cardinality):\n raise ValueError(\"Size of variables and cardinality should be same\")\n elif any(cardinal != len(prob_array) for prob_array, cardinal in zip(inhibitor_probability, cardinality)) or \\\n len(cardinality) != len(inhibitor_probability):\n raise ValueError(\"Size of variables and inhibitor_probability should be same\")\n elif not all(0 <= item <= 1 for item in chain.from_iterable(inhibitor_probability)):\n raise ValueError(\"Probability values should be between 0 and 1(both inclusive).\")\n else:\n self.variables = np.concatenate((self.variables, variables))\n self.cardinality = np.concatenate((self.cardinality, cardinality))\n self.inhibitor_probability.extend(inhibitor_probability)", - "docstring": "Adds variables to the NoisyOrModel.\n\n Parameters\n ----------\n variables: list, tuple, dict (array like)\n array containing names of the variables that are to be added.\n\n cardinality: list, tuple, dict (array like)\n array containing integers representing the cardinality\n of the variables.\n\n inhibitor_probability: list, tuple, dict (array_like)\n array containing the inhibitor probabilities corresponding to each variable.\n\n Examples\n --------\n >>> from pgmpy.models import NoisyOrModel\n >>> model = NoisyOrModel(['x1', 'x2', 'x3'], [2, 3, 2], [[0.6, 0.4],\n ... [0.2, 0.4, 0.7],\n ... [0.1, 0. 4]])\n >>> model.add_variables(['x4'], [3], [0.1, 0.4, 0.2])" - }, - { - "code": "def _ParseTokenType(self, file_object, file_offset):\n token_type_map = self._GetDataTypeMap('uint8')\n token_type, _ = self._ReadStructureFromFileObject(\n file_object, file_offset, token_type_map)\n return token_type", - "docstring": "Parses a token type.\n\n Args:\n file_object (dfvfs.FileIO): file-like object.\n file_offset (int): offset of the token relative to the start of\n the file-like object.\n\n Returns:\n int: token type" - }, - { - "code": "def derive_annotations(self, annotations):\n cls = type(self)\n return cls(\n self[0],\n self[1],\n self[2],\n self[3],\n annotations,\n self[5]\n )", - "docstring": "Derives a new event from this one setting the ``annotations`` attribute.\n\n Args:\n annotations: (Sequence[Union[amazon.ion.symbols.SymbolToken, unicode]]):\n The annotations associated with the derived event.\n\n Returns:\n IonEvent: The newly generated event." - }, - { - "code": "def read_all(self):\n result = self._cursor.execute(\"SELECT * FROM {}\".format(self.table_name)).fetchall()\n return (SqliteReader._assemble_message(frame) for frame in result)", - "docstring": "Fetches all messages in the database.\n\n :rtype: Generator[can.Message]" - }, - { - "code": "def use_comparative_proficiency_view(self):\n self._object_views['proficiency'] = COMPARATIVE\n for session in self._get_provider_sessions():\n try:\n session.use_comparative_proficiency_view()\n except AttributeError:\n pass", - "docstring": "Pass through to provider ProficiencyLookupSession.use_comparative_proficiency_view" - }, - { - "code": "def run_job(self, job_spec, wait_until_done=True):\n json_string = json.dumps({'lsid': job_spec.lsid, 'params': job_spec.params, 'tags': ['GenePattern Python Client']}, cls=GPJSONEncoder)\n if sys.version_info.major == 3:\n json_string = bytes(json_string, 'utf-8')\n request = urllib.request.Request(self.url + '/rest/v1/jobs')\n if self.authorization_header() is not None:\n request.add_header('Authorization', self.authorization_header())\n request.add_header('Content-Type', 'application/json')\n request.add_header('User-Agent', 'GenePatternRest')\n response = urllib.request.urlopen(request, json_string)\n if response.getcode() != 201:\n print(\" job POST failed, status code = %i\" % response.getcode())\n return None\n data = json.loads(response.read().decode('utf-8'))\n job = GPJob(self, data['jobId'])\n job.get_info()\n self.last_job = job\n if wait_until_done:\n job.wait_until_done()\n return job", - "docstring": "Runs a job defined by jobspec, optionally non-blocking.\n\n Takes a GPJobSpec object that defines a request to run a job, and makes the\n request to the server. By default blocks until the job is finished by\n polling the server, but can also run asynchronously.\n\n Args:\n :param job_spec: A GPJobSpec object that contains the data defining the job to be run.\n :param wait_until_done: Whether to wait until the job is finished before returning.\n :return:\n\n Returns:\n a GPJob object that refers to the running job on the server. If called\n synchronously, this object will contain the info associated with the\n completed job. Otherwise, it will just wrap the URI of the running job." - }, - { - "code": "def intersubject_scores_random_subjects(fm, category, filenumber, n_train,\n n_predict, controls=True,\n scale_factor = 1):\n subjects = np.unique(fm.SUBJECTINDEX)\n if len(subjects) < n_train + n_predict:\n raise ValueError()\n np.random.shuffle(subjects)\n predicted_subjects = subjects[0 : n_predict]\n predicting_subjects = subjects[n_predict : n_predict + n_train]\n assert len(predicting_subjects) == n_train\n assert len(predicted_subjects) == n_predict\n assert [x not in predicting_subjects for x in predicted_subjects]\n return intersubject_scores(fm, category, [filenumber], predicting_subjects,\n [filenumber], predicted_subjects,\n controls, scale_factor)", - "docstring": "Calculates how well the fixations of n random subjects on one image can\n be predicted with the fixations of m other random subjects.\n\n Notes\n Function that uses intersubject_auc for computing auc.\n\n Parameters\n fm : fixmat instance\n category : int\n Category from which the fixations are taken.\n filnumber : int\n Image from which fixations are taken.\n n_train : int\n The number of subjects which are used for prediction.\n n_predict : int\n The number of subjects to predict\n controls : bool, optional\n If True (default), n_predict subjects are chosen from the fixmat.\n If False, 1000 fixations are randomly generated and used for\n testing.\n scale_factor : int, optional\n specifies the scaling of the fdm. Default is 1.\n\n Returns\n tuple : prediction scores" - }, - { - "code": "def remove_rate_limit(self, limiter):\n if limiter in self.rate_limiters:\n self.unsubscribe('capacity', limiter.on_capacity)\n self.rate_limiters.remove(limiter)", - "docstring": "Remove a RateLimit from the connection" - }, - { - "code": "def push(self, yts):\n if not isinstance(yts, (YTStor, YTMetaStor, type(None))):\n raise TypeError(\"Expected YTStor object, YTMetaStor object or None.\")\n k = 0\n while k in self.keys():\n k += 1\n self[k] = yts\n return k", - "docstring": "Search for, add and return new file descriptor.\n\n Parameters\n ----------\n yts : YTStor-obj or None\n ``YTStor`` object for which we want to allocate a descriptor or ``None``, if we allocate descriptor for a\n control file.\n\n Returns\n -------\n k : int\n File descriptor." - }, - { - "code": "def _watch_refresh_source(self, event):\n self.logger.info(\"Sources changed...\")\n try:\n self.sources = self._get_sources()\n self._render_template(self.sources)\n except:\n pass", - "docstring": "Refresh sources then templates" - }, - { - "code": "def locale_escape(string, errors='replace'):\n encoding = locale.getpreferredencoding()\n string = string.encode(encoding, errors).decode('utf8')\n return string", - "docstring": "Mangle non-supported characters, for savages with ascii terminals." - }, - { - "code": "def Range(min=None, max=None, min_message=\"Must be at least {min}\", max_message=\"Must be at most {max}\"):\n @wraps(Range)\n def built(value):\n if not isinstance(value, numbers.Number) or isinstance(value, bool):\n raise Error(\"Not a number\")\n if min is not None and min > value:\n raise Error(min_message.format(min=min, max=max))\n if max is not None and value > max:\n raise Error(max_message.format(min=min, max=max))\n return value\n return built", - "docstring": "Creates a validator that checks if the given numeric value is in the\n specified range, inclusive.\n\n Accepts values specified by ``numbers.Number`` only, excluding booleans.\n\n The error messages raised can be customized with ``min_message`` and\n ``max_message``. The ``min`` and ``max`` arguments are formatted." - }, - { - "code": "def get_link_url(self, datum=None):\n if not self.url:\n raise NotImplementedError('A LinkAction class must have a '\n 'url attribute or define its own '\n 'get_link_url method.')\n if callable(self.url):\n return self.url(datum, **self.kwargs)\n try:\n if datum:\n obj_id = self.table.get_object_id(datum)\n return urls.reverse(self.url, args=(obj_id,))\n else:\n return urls.reverse(self.url)\n except urls.NoReverseMatch as ex:\n LOG.info('No reverse found for \"%(url)s\": %(exception)s',\n {'url': self.url, 'exception': ex})\n return self.url", - "docstring": "Returns the final URL based on the value of ``url``.\n\n If ``url`` is callable it will call the function.\n If not, it will then try to call ``reverse`` on ``url``.\n Failing that, it will simply return the value of ``url`` as-is.\n\n When called for a row action, the current row data object will be\n passed as the first parameter." - }, - { - "code": "def execute(self):\n self.print_info()\n if not self._config.provisioner.playbooks.cleanup:\n msg = 'Skipping, cleanup playbook not configured.'\n LOG.warn(msg)\n return\n self._config.provisioner.cleanup()", - "docstring": "Execute the actions necessary to cleanup the instances and returns\n None.\n\n :return: None" - }, - { - "code": "def import_plugin(package_name, include_available=False):\n available_plugins_dir = MICRODROP_CONDA_SHARE.joinpath('plugins',\n 'available')\n enabled_plugins_dir = MICRODROP_CONDA_ETC.joinpath('plugins', 'enabled')\n search_paths = [enabled_plugins_dir]\n if include_available:\n search_paths += [available_plugins_dir]\n for dir_i in search_paths:\n if dir_i not in sys.path:\n sys.path.insert(0, dir_i)\n module_name = package_name.split('.')[-1].replace('-', '_')\n return importlib.import_module(module_name)", - "docstring": "Import MicroDrop plugin.\n\n Parameters\n ----------\n package_name : str\n Name of MicroDrop plugin Conda package.\n include_available : bool, optional\n If ``True``, import from all available plugins (not just **enabled**\n ones).\n\n By default, only the ``/etc/microdrop/plugins/enabled``\n directory is added to the Python import paths (if necessary).\n\n If ``True``, also add the ``/share/microdrop/plugins/available``\n directory to the Python import paths.\n\n Returns\n -------\n module\n Imported plugin module." - }, - { - "code": "def _apply_rewrites(date_classes, rules):\n for rule in rules:\n date_classes = rule.execute(date_classes)\n return date_classes", - "docstring": "Return a list of date elements by applying rewrites to the initial date element list" - }, - { - "code": "def _make_txn(signer, setting_key, payload):\n serialized_payload = payload.SerializeToString()\n header = TransactionHeader(\n signer_public_key=signer.get_public_key().as_hex(),\n family_name='sawtooth_settings',\n family_version='1.0',\n inputs=_config_inputs(setting_key),\n outputs=_config_outputs(setting_key),\n dependencies=[],\n payload_sha512=hashlib.sha512(serialized_payload).hexdigest(),\n batcher_public_key=signer.get_public_key().as_hex()\n ).SerializeToString()\n return Transaction(\n header=header,\n header_signature=signer.sign(header),\n payload=serialized_payload)", - "docstring": "Creates and signs a sawtooth_settings transaction with with a payload." - }, - { - "code": "def extendedboldqc(auth, label, scan_ids=None, project=None, aid=None):\n if not aid:\n aid = accession(auth, label, project)\n path = '/data/experiments'\n params = {\n 'xsiType': 'neuroinfo:extendedboldqc',\n 'columns': ','.join(extendedboldqc.columns.keys())\n }\n if project:\n params['project'] = project\n params['xnat:mrSessionData/ID'] = aid\n _,result = _get(auth, path, 'json', autobox=True, params=params)\n for result in result['ResultSet']['Result']:\n if scan_ids == None or result['neuroinfo:extendedboldqc/scan/scan_id'] in scan_ids:\n data = dict()\n for k,v in iter(extendedboldqc.columns.items()):\n data[v] = result[k]\n yield data", - "docstring": "Get ExtendedBOLDQC data as a sequence of dictionaries.\n\n Example:\n >>> import yaxil\n >>> import json\n >>> auth = yaxil.XnatAuth(url='...', username='...', password='...')\n >>> for eqc in yaxil.extendedboldqc2(auth, 'AB1234C')\n ... print(json.dumps(eqc, indent=2))\n\n :param auth: XNAT authentication object\n :type auth: :mod:`yaxil.XnatAuth`\n :param label: XNAT MR Session label\n :type label: str\n :param scan_ids: Scan numbers to return\n :type scan_ids: list\n :param project: XNAT MR Session project\n :type project: str\n :param aid: XNAT Accession ID\n :type aid: str\n :returns: Generator of scan data dictionaries\n :rtype: :mod:`dict`" - }, - { - "code": "def list_followers(self):\n evt = self._client._request_point_list_detailed(self._type, self.__lid, self.__pid)\n self._client._wait_and_except_if_failed(evt)\n return evt.payload['subs']", - "docstring": "list followers for this point, i.e. remote follows for feeds and remote attaches for controls.\n\n Returns QAPI subscription list function payload\n\n #!python\n {\n \"\": \"\",\n \"\": \"\"\n }\n\n Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)\n containing the error if the infrastructure detects a problem\n\n Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)\n if there is a communications problem between you and the infrastructure\n\n `limit` (optional) (integer) Return this many value details\n\n `offset` (optional) (integer) Return value details starting at this offset" - }, - { - "code": "def update_ddl(self, ddl_statements, operation_id=\"\"):\n client = self._instance._client\n api = client.database_admin_api\n metadata = _metadata_with_prefix(self.name)\n future = api.update_database_ddl(\n self.name, ddl_statements, operation_id=operation_id, metadata=metadata\n )\n return future", - "docstring": "Update DDL for this database.\n\n Apply any configured schema from :attr:`ddl_statements`.\n\n See\n https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase\n\n :type ddl_statements: Sequence[str]\n :param ddl_statements: a list of DDL statements to use on this database\n :type operation_id: str\n :param operation_id: (optional) a string ID for the long-running operation\n\n :rtype: :class:`google.api_core.operation.Operation`\n :returns: an operation instance\n :raises NotFound: if the database does not exist" - }, - { - "code": "def quantize_datetime(dt, resolution=None):\n resolution = int(resolution or 6)\n if hasattr(dt, 'timetuple'):\n dt = dt.timetuple()\n if isinstance(dt, time.struct_time):\n dt = list(dt)[:6]\n dt += [int((dt[5] - int(dt[5])) * 1000000)]\n dt[5] = int(dt[5])\n return datetime.datetime(*(dt[:resolution] + [1] * max(3 - resolution, 0)))\n if isinstance(dt, tuple) and len(dt) <= 9 and all(isinstance(val, (float, int)) for val in dt):\n dt = list(dt) + [0] * (max(6 - len(dt), 0))\n if len(dt) == 6 and isinstance(dt[5], float):\n dt = list(dt) + [1000000 * (dt[5] - int(dt[5]))]\n dt[5] = int(dt[5])\n dt = tuple(int(val) for val in dt)\n return datetime.datetime(*(dt[:resolution] + [1] * max(resolution - 3, 0)))\n return [quantize_datetime(value) for value in dt]", - "docstring": "Quantize a datetime to integer years, months, days, hours, minutes, seconds or microseconds\n\n Also works with a `datetime.timetuple` or `time.struct_time` or a 1to9-tuple of ints or floats.\n Also works with a sequenece of struct_times, tuples, or datetimes\n\n >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=3)\n datetime.datetime(1970, 1, 2, 0, 0)\n\n Notice that 6 is the highest resolution value with any utility\n >>> quantize_datetime(datetime.datetime(1970,1,2,3,4,5,6), resolution=7)\n datetime.datetime(1970, 1, 2, 3, 4, 5)\n >>> quantize_datetime(datetime.datetime(1971,2,3,4,5,6,7), 1)\n datetime.datetime(1971, 1, 1, 0, 0)" - }, - { - "code": "def get_staged_signatures(vcs):\n staged_path = _get_staged_history_path(vcs)\n known_signatures = []\n if os.path.exists(staged_path):\n with open(staged_path, 'r') as f:\n known_signatures = f.read().split()\n return known_signatures", - "docstring": "Get the list of staged signatures\n\n Args:\n vcs (easyci.vcs.base.Vcs)\n\n Returns:\n list(basestring) - list of signatures" - }, - { - "code": "def get(self, uri):\n return self.send_request(\n \"{0}://{1}:{2}{3}{4}\".format(\n self.get_protocol(),\n self.host,\n self.port,\n uri,\n self.client_id\n )\n )", - "docstring": "Send a request to given uri." - }, - { - "code": "def move_file_to_directory(file_path, directory_path):\n file_name = os.path.basename(file_path)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)\n os.rename(file_path, os.path.join(directory_path,\n file_name))", - "docstring": "Moves file to given directory\n\n :param file_path: path to file to move\n :param directory_path: path to target directory where to move file" - }, - { - "code": "def get_file_mode(self):\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized')\n if self.dr_entries.px_record is None:\n if self.ce_entries.px_record is None:\n raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file mode')\n return self.ce_entries.px_record.posix_file_mode\n return self.dr_entries.px_record.posix_file_mode", - "docstring": "Get the POSIX file mode bits for this Rock Ridge entry.\n\n Parameters:\n None.\n Returns:\n The POSIX file mode bits for this Rock Ridge entry." - }, - { - "code": "def enable_process_breakpoints(self, dwProcessId):\n for bp in self.get_process_code_breakpoints(dwProcessId):\n if bp.is_disabled():\n self.enable_code_breakpoint(dwProcessId, bp.get_address())\n for bp in self.get_process_page_breakpoints(dwProcessId):\n if bp.is_disabled():\n self.enable_page_breakpoint(dwProcessId, bp.get_address())\n if self.system.has_process(dwProcessId):\n aProcess = self.system.get_process(dwProcessId)\n else:\n aProcess = Process(dwProcessId)\n aProcess.scan_threads()\n for aThread in aProcess.iter_threads():\n dwThreadId = aThread.get_tid()\n for bp in self.get_thread_hardware_breakpoints(dwThreadId):\n if bp.is_disabled():\n self.enable_hardware_breakpoint(dwThreadId, bp.get_address())", - "docstring": "Enables all disabled breakpoints for the given process.\n\n @type dwProcessId: int\n @param dwProcessId: Process global ID." - }, - { - "code": "def extract_meta(cls, serializer, resource):\n if hasattr(serializer, 'child'):\n meta = getattr(serializer.child, 'Meta', None)\n else:\n meta = getattr(serializer, 'Meta', None)\n meta_fields = getattr(meta, 'meta_fields', [])\n data = OrderedDict()\n for field_name in meta_fields:\n data.update({\n field_name: resource.get(field_name)\n })\n return data", - "docstring": "Gathers the data from serializer fields specified in meta_fields and adds it to\n the meta object." - }, - { - "code": "def _parse_status(self, output):\n parsed = self._parse_machine_readable_output(output)\n statuses = []\n for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]):\n info = {kind: data for timestamp, _, kind, data in tuples}\n status = Status(name=target, state=info.get('state'),\n provider=info.get('provider-name'))\n statuses.append(status)\n return statuses", - "docstring": "Unit testing is so much easier when Vagrant is removed from the\n equation." - }, - { - "code": "def sign(self, msg, key):\n hasher = hashes.Hash(self.hash_algorithm(), backend=default_backend())\n hasher.update(msg)\n digest = hasher.finalize()\n sig = key.sign(\n digest,\n padding.PSS(\n mgf=padding.MGF1(self.hash_algorithm()),\n salt_length=padding.PSS.MAX_LENGTH),\n utils.Prehashed(self.hash_algorithm()))\n return sig", - "docstring": "Create a signature over a message\n\n :param msg: The message\n :param key: The key\n :return: A signature" - }, - { - "code": "def delete(self, key):\n self.logger.info('%s: delete %s' % (self, key))\n super(LoggingDatastore, self).delete(key)", - "docstring": "Removes the object named by `key`.\n LoggingDatastore logs the access." - }, - { - "code": "def push_state(self, new_file=''):\n 'Saves the current error state to parse subpackages'\n self.subpackages.append({'detected_type': self.detected_type,\n 'message_tree': self.message_tree,\n 'resources': self.pushable_resources,\n 'metadata': self.metadata})\n self.message_tree = {}\n self.pushable_resources = {}\n self.metadata = {'requires_chrome': False,\n 'listed': self.metadata.get('listed'),\n 'validator_version': validator.__version__}\n self.package_stack.append(new_file)", - "docstring": "Saves the current error state to parse subpackages" - }, - { - "code": "def formation_energy(self, chemical_potentials=None, fermi_level=0):\n chemical_potentials = chemical_potentials if chemical_potentials else {}\n chempot_correction = sum([\n chem_pot * (self.bulk_structure.composition[el] - self.defect.defect_composition[el])\n for el, chem_pot in chemical_potentials.items()\n ])\n formation_energy = self.energy + chempot_correction\n if \"vbm\" in self.parameters:\n formation_energy += self.charge * (self.parameters[\"vbm\"] + fermi_level)\n else:\n formation_energy += self.charge * fermi_level\n return formation_energy", - "docstring": "Computes the formation energy for a defect taking into account a given chemical potential and fermi_level" - }, - { - "code": "def cmd_move(db=None):\n if db is None:\n db = connect()\n pg_move_extended(db, args.src, args.dest)", - "docstring": "Rename a database within a server.\n\n When used with --force, an existing database with the same name as DEST is replaced, the original is renamed out of\n place in the form DEST_old_YYYYMMDD (unless --no-backup is specified)." - }, - { - "code": "def mute(returns_output=False):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n saved_stdout = sys.stdout\n sys.stdout = cStringIO.StringIO()\n try:\n out = func(*args, **kwargs)\n if returns_output:\n out = sys.stdout.getvalue().strip().split()\n finally:\n sys.stdout = saved_stdout\n return out\n return wrapper\n return decorator", - "docstring": "`returns_output` - Returns all print output in a list.\n\n Capture or ignore all print output generated by a function.\n\n Usage:\n\n output = mute(returns_output=True)(module.my_func)(args)" - }, - { - "code": "def clear_token(self):\n new_config = ConfigParser(allow_no_value=True)\n new_config.read(self.config_file)\n self.check_sections(new_config)\n new_config.remove_option('auth', 'user')\n new_config.remove_option('auth', 'token')\n filename = os.path.expanduser(self.config_file)\n with open(filename, 'w') as out_file:\n os.chmod(filename, 0o0600)\n new_config.write(out_file)", - "docstring": "Clear Token information on config file." - }, - { - "code": "def _convert_hex_str_to_int(val):\n if val is None:\n return None\n hex_num = int(val, 16)\n if hex_num > 0x7FFFFFFFFFFFFFFF:\n hex_num -= 0x10000000000000000\n assert -9223372036854775808 <= hex_num <= 9223372036854775807\n return hex_num", - "docstring": "Convert hexadecimal formatted ids to signed int64" - }, - { - "code": "def Akmaev_adjustment(theta, q, beta, n_k, theta_k, s_k, t_k):\n L = q.size\n k = 1\n n_k[k-1] = 1\n theta_k[k-1] = theta[k-1]\n l = 2\n while True:\n n = 1\n thistheta = theta[l-1]\n while True:\n if theta_k[k-1] <= thistheta:\n k += 1\n break\n else:\n if n <= 1:\n s = q[l-1]\n t = s*thistheta\n if n_k[k-1] <= 1:\n s_k[k-1] = q[l-n-1]\n t_k[k-1] = s_k[k-1] * theta_k[k-1]\n n += n_k[k-1]\n s += s_k[k-1]\n t += t_k[k-1]\n s_k[k-1] = s\n t_k[k-1] = t\n thistheta = t/s\n if k==1:\n break\n k -= 1\n if l == L:\n break\n l += 1\n n_k[k-1] = n\n theta_k[k-1] = thistheta\n while True:\n while True:\n if n==1:\n break\n while True:\n theta[l-1] = thistheta\n if n==1:\n break\n l -= 1\n n -= 1\n if k==1:\n break\n k -= 1\n l -= 1\n n = n_k[k-1]\n thistheta = theta_k[k-1]\n return theta", - "docstring": "Single column only." - }, - { - "code": "def copy(self):\n parser_copy = self.__class__(self.argument_class, self.namespace_class)\n parser_copy.args = deepcopy(self.args)\n parser_copy.trim = self.trim\n parser_copy.bundle_errors = self.bundle_errors\n return parser_copy", - "docstring": "Creates a copy of this RequestParser with the same set of arguments" - }, - { - "code": "def add_string_as_file(self, content, filename, pred=None):\n summary = content.splitlines()[0] if content else ''\n if not isinstance(summary, six.string_types):\n summary = content.decode('utf8', 'ignore')\n if not self.test_predicate(cmd=False, pred=pred):\n self._log_info(\"skipped string ...'%s' due to predicate (%s)\" %\n (summary, self.get_predicate(pred=pred)))\n return\n self.copy_strings.append((content, filename))\n self._log_debug(\"added string ...'%s' as '%s'\" % (summary, filename))", - "docstring": "Add a string to the archive as a file named `filename`" - }, - { - "code": "def get_account_policy(region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n try:\n info = conn.get_account_password_policy()\n return info.get_account_password_policy_response.get_account_password_policy_result.password_policy\n except boto.exception.BotoServerError as e:\n log.debug(e)\n msg = 'Failed to update the password policy.'\n log.error(msg)\n return False", - "docstring": "Get account policy for the AWS account.\n\n .. versionadded:: 2015.8.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion boto_iam.get_account_policy" - }, - { - "code": "def overlay(ctx):\n args = ctx.obj.get('args')\n filename = args.get('config')\n debug = args.get('debug')\n _setup(filename)\n for c in config.config(filename):\n with fasteners.InterProcessLock(c.lock_file):\n util.print_info('{}:'.format(c.name))\n if not os.path.exists(c.src):\n git.clone(c.name, c.git, c.src, debug=debug)\n if c.dst:\n git.extract(c.src, c.dst, c.version, debug=debug)\n post_commands = {c.dst: c.post_commands}\n else:\n git.overlay(c.src, c.files, c.version, debug=debug)\n post_commands = {\n conf.dst: conf.post_commands\n for conf in c.files\n }\n for dst, commands in post_commands.items():\n for command in commands:\n msg = ' - running `{}` in {}'.format(command, dst)\n util.print_info(msg)\n cmd = util.build_sh_cmd(command, cwd=dst)\n util.run_command(cmd, debug=debug)", - "docstring": "Install gilt dependencies" - }, - { - "code": "def nbcompile(workdir, fileroot, html=True, basenb='', agdir=''):\n from shutil import copy\n from rtpipe import get_notebook\n from subprocess import call\n os.environ['fileroot'] = fileroot\n if agdir:\n os.environ['agdir'] = agdir\n if not basenb:\n basenb = get_notebook('baseinteract.ipynb')\n logger.info('Moving to {0} and building notebook for {1}'.format(workdir, fileroot))\n os.chdir(workdir)\n copy(basenb, '{0}/{1}.ipynb'.format(workdir, fileroot))\n cmd = 'jupyter nbconvert {0}.ipynb --inplace --execute --to notebook --allow-errors --ExecutePreprocessor.timeout=3600'.format(fileroot).split(' ')\n status = call(cmd)\n cmd = 'jupyter trust {0}.ipynb'.format(fileroot).split(' ')\n status = call(cmd)\n if html:\n cmd = 'jupyter nbconvert {0}.ipynb --to html --output {0}.html'.format(fileroot).split(' ')\n status = call(cmd)", - "docstring": "Run analysis pipeline from jupyter base notebook and save as notebook and html.\n\n html will also compile static html version\n basenb can be provided, else will get distributed version.\n agdir is the activegit repo (optional)" - }, - { - "code": "def image_coarsen(xlevel=0, ylevel=0, image=\"auto\", method='average'):\n if image == \"auto\": image = _pylab.gca().images[0]\n Z = _n.array(image.get_array())\n global image_undo_list\n image_undo_list.append([image, Z])\n if len(image_undo_list) > 10: image_undo_list.pop(0)\n image.set_array(_fun.coarsen_matrix(Z, ylevel, xlevel, method))\n _pylab.draw()", - "docstring": "This will coarsen the image data by binning each xlevel+1 along the x-axis\n and each ylevel+1 points along the y-axis\n\n type can be 'average', 'min', or 'max'" - }, - { - "code": "def get_complexes(self):\n qstr = \"$.events.frames[@.type is 'complex-assembly']\"\n res = self.tree.execute(qstr)\n if res is None:\n return\n for r in res:\n epistemics = self._get_epistemics(r)\n if epistemics.get('negated'):\n continue\n if epistemics.get('direct') is None:\n continue\n annotations, context = self._get_annot_context(r)\n args = r['arguments']\n sentence = r['verbose-text']\n members = []\n agent_coordinates = []\n for a in args:\n agent, coords = self._get_agent_from_entity(a['arg'])\n members.append(agent)\n agent_coordinates.append(coords)\n annotations['agents']['coords'] = agent_coordinates\n ev = Evidence(source_api='reach', text=sentence,\n annotations=annotations, pmid=self.citation,\n context=context, epistemics=epistemics)\n stmt = Complex(members, ev)\n self.statements.append(stmt)", - "docstring": "Extract INDRA Complex Statements." - }, - { - "code": "def combine_commands(*commands):\n class CombinedCommand(Command):\n def initialize_options(self):\n self.commands = []\n for C in commands:\n self.commands.append(C(self.distribution))\n for c in self.commands:\n c.initialize_options()\n def finalize_options(self):\n for c in self.commands:\n c.finalize_options()\n def run(self):\n for c in self.commands:\n c.run()\n return CombinedCommand", - "docstring": "Return a Command that combines several commands." - }, - { - "code": "def transmit_agnocomplete_context(self):\n if hasattr(self, AGNOCOMPLETE_USER_ATTRIBUTE):\n user = self.get_agnocomplete_context()\n if user:\n self.agnocomplete.user = user\n return user", - "docstring": "Assign the user context to the agnocomplete class, if any." - }, - { - "code": "def _update_video_volume_cell_attributes(self, key):\n try:\n video_cell_panel = self.grid_renderer.video_cells[key]\n except KeyError:\n return\n old_video_volume = self.code_array.cell_attributes[key][\"video_volume\"]\n new_video_volume = video_cell_panel.volume\n if old_video_volume == new_video_volume:\n return\n selection = Selection([], [], [], [], [key])\n self.actions.set_attr(\"video_volume\", new_video_volume, selection)", - "docstring": "Updates the panel cell attrutes of a panel cell" - }, - { - "code": "def main(args=None):\n if args is None:\n parser = get_argument_parser()\n args = parser.parse_args()\n fasta_file = args.fasta_file\n species = args.species\n chrom_pat = args.chromosome_pattern\n output_file = args.output_file\n log_file = args.log_file\n quiet = args.quiet\n verbose = args.verbose\n log_stream = sys.stdout\n if output_file == '-':\n log_stream = sys.stderr\n logger = misc.get_logger(log_stream=log_stream, log_file=log_file,\n quiet=quiet, verbose=verbose)\n if chrom_pat is None:\n chrom_pat = ensembl.SPECIES_CHROMPAT[species]\n chrom_re = re.compile(chrom_pat)\n with \\\n misc.smart_open_read(\n fasta_file, mode='r', encoding='ascii', try_gzip=True\n ) as fh, \\\n misc.smart_open_write(\n output_file, mode='w', encoding='ascii'\n ) as ofh:\n reader = FastaReader(fh)\n for seq in reader:\n chrom = seq.name.split(' ', 1)[0]\n if chrom_re.match(chrom) is None:\n logger.info('Ignoring chromosome \"%s\"...', chrom)\n continue\n seq.name = chrom\n seq.append_fasta(ofh)\n return 0", - "docstring": "Script body." - }, - { - "code": "def dropfile(cachedir, user=None):\n dfn = os.path.join(cachedir, '.dfn')\n with salt.utils.files.set_umask(0o277):\n log.info('Rotating AES key')\n if os.path.isfile(dfn):\n log.info('AES key rotation already requested')\n return\n if os.path.isfile(dfn) and not os.access(dfn, os.W_OK):\n os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)\n with salt.utils.files.fopen(dfn, 'wb+') as fp_:\n fp_.write(b'')\n os.chmod(dfn, stat.S_IRUSR)\n if user:\n try:\n import pwd\n uid = pwd.getpwnam(user).pw_uid\n os.chown(dfn, uid, -1)\n except (KeyError, ImportError, OSError, IOError):\n pass", - "docstring": "Set an AES dropfile to request the master update the publish session key" - }, - { - "code": "def wait(self):\n if self.is_master():\n raise RuntimeError(\"Master node told to await jobs.\")\n status = MPI.Status()\n while True:\n if self.debug:\n print(\"Worker {0} waiting for task.\".format(self.rank))\n task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)\n if self.debug:\n print(\"Worker {0} got task {1} with tag {2}.\"\n .format(self.rank, type(task), status.tag))\n if isinstance(task, _close_pool_message):\n if self.debug:\n print(\"Worker {0} told to quit.\".format(self.rank))\n break\n if isinstance(task, _function_wrapper):\n self.function = task.function\n if self.debug:\n print(\"Worker {0} replaced its task function: {1}.\"\n .format(self.rank, self.function))\n continue\n result = self.function(task)\n if self.debug:\n print(\"Worker {0} sending answer {1} with tag {2}.\"\n .format(self.rank, type(result), status.tag))\n self.comm.isend(result, dest=0, tag=status.tag)\n if self.exit_on_end:\n sys.exit()", - "docstring": "If this isn't the master process, wait for instructions." - }, - { - "code": "def _get_hangul_syllable_name(hangul_syllable):\n if not _is_hangul_syllable(hangul_syllable):\n raise ValueError(\"Value passed in does not represent a Hangul syllable!\")\n jamo = decompose_hangul_syllable(hangul_syllable, fully_decompose=True)\n result = ''\n for j in jamo:\n if j is not None:\n result += _get_jamo_short_name(j)\n return result", - "docstring": "Function for taking a Unicode scalar value representing a Hangul syllable and converting it to its syllable name as\n defined by the Unicode naming rule NR1. See the Unicode Standard, ch. 04, section 4.8, Names, for more information.\n\n :param hangul_syllable: Unicode scalar value representing the Hangul syllable to convert\n :return: String representing its syllable name as transformed according to naming rule NR1." - }, - { - "code": "def vae(x, z_size, name=None):\n with tf.variable_scope(name, default_name=\"vae\"):\n mu = tf.layers.dense(x, z_size, name=\"mu\")\n log_sigma = tf.layers.dense(x, z_size, name=\"log_sigma\")\n shape = common_layers.shape_list(x)\n epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])\n z = mu + tf.exp(log_sigma / 2) * epsilon\n kl = 0.5 * tf.reduce_mean(\n tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)\n free_bits = z_size // 4\n kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))\n return z, kl_loss, mu, log_sigma", - "docstring": "Simple variational autoencoder without discretization.\n\n Args:\n x: Input to the discretization bottleneck.\n z_size: Number of bits, where discrete codes range from 1 to 2**z_size.\n name: Name for the bottleneck scope.\n\n Returns:\n Embedding function, latent, loss, mu and log_simga." - }, - { - "code": "def from_version(cls, version, op=None):\n lower = None\n upper = None\n if op is None:\n lower = _LowerBound(version, True)\n upper = _UpperBound(version.next(), False)\n elif op in (\"eq\", \"==\"):\n lower = _LowerBound(version, True)\n upper = _UpperBound(version, True)\n elif op in (\"gt\", \">\"):\n lower = _LowerBound(version, False)\n elif op in (\"gte\", \">=\"):\n lower = _LowerBound(version, True)\n elif op in (\"lt\", \"<\"):\n upper = _UpperBound(version, False)\n elif op in (\"lte\", \"<=\"):\n upper = _UpperBound(version, True)\n else:\n raise VersionError(\"Unknown bound operation '%s'\" % op)\n bound = _Bound(lower, upper)\n range = cls(None)\n range.bounds = [bound]\n return range", - "docstring": "Create a range from a version.\n\n Args:\n version: Version object. This is used as the upper/lower bound of\n the range.\n op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<',\n 'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created\n that contains the version superset.\n\n Returns:\n `VersionRange` object." - }, - { - "code": "def data_orientation(self):\n return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],\n [int(a.axis) for a in self.index_axes]))", - "docstring": "return a tuple of my permutated axes, non_indexable at the front" - }, - { - "code": "def get_cluster_config(\n cluster_type,\n cluster_name=None,\n kafka_topology_base_path=None,\n):\n if not kafka_topology_base_path:\n config_dirs = get_conf_dirs()\n else:\n config_dirs = [kafka_topology_base_path]\n topology = None\n for config_dir in config_dirs:\n try:\n topology = TopologyConfiguration(\n cluster_type,\n config_dir,\n )\n except MissingConfigurationError:\n pass\n if not topology:\n raise MissingConfigurationError(\n \"No available configuration for type {0}\".format(cluster_type),\n )\n if cluster_name:\n return topology.get_cluster_by_name(cluster_name)\n else:\n return topology.get_local_cluster()", - "docstring": "Return the cluster configuration.\n Use the local cluster if cluster_name is not specified.\n\n :param cluster_type: the type of the cluster\n :type cluster_type: string\n :param cluster_name: the name of the cluster\n :type cluster_name: string\n :param kafka_topology_base_path: base path to look for .yaml\n :type cluster_name: string\n :returns: the cluster\n :rtype: ClusterConfig" - }, - { - "code": "def create_free_space_request_content():\n root = etree.Element('propfind', xmlns='DAV:')\n prop = etree.SubElement(root, 'prop')\n etree.SubElement(prop, 'quota-available-bytes')\n etree.SubElement(prop, 'quota-used-bytes')\n tree = etree.ElementTree(root)\n return WebDavXmlUtils.etree_to_string(tree)", - "docstring": "Creates an XML for requesting of free space on remote WebDAV server.\n\n :return: the XML string of request content." - }, - { - "code": "def serve(opts):\n resources = _load(opts.resources, opts.output_dir)\n opts.output_dir = resources.output_dir\n if not os.path.exists(opts.output_dir):\n sys.stderr.write(\"Resources dir '{}' not found. Did you fetch?\\n\".format(opts.output_dir))\n return 1\n backend.PyPIResource.build_pypi_indexes(opts.output_dir)\n os.chdir(opts.output_dir)\n HTTPServer.allow_reuse_address = True\n httpd = HTTPServer((opts.host, opts.port), SimpleHTTPRequestHandler)\n if opts.ssl_cert:\n httpd.socket = ssl.wrap_socket(httpd.socket, certfile=opts.ssl_cert, server_side=True)\n print(\"Serving at: http{}://{}:{}/\".format(\n 's' if opts.ssl_cert else '', socket.gethostname(), opts.port))\n httpd.serve_forever()", - "docstring": "Run a light-weight HTTP server hosting previously mirrored resources" - }, - { - "code": "def remap_index_fn(ref_file):\n checks = [os.path.splitext(ref_file)[0].replace(\"/seq/\", \"/novoalign/\"),\n os.path.splitext(ref_file)[0] + \".ndx\",\n ref_file + \".bs.ndx\",\n ref_file + \".ndx\"]\n for check in checks:\n if os.path.exists(check):\n return check\n return checks[0]", - "docstring": "Map sequence references to equivalent novoalign indexes." - }, - { - "code": "def register_by_email(self, email, sender=None, request=None, **kwargs):\n try:\n user = self.user_model.objects.get(email=email)\n except self.user_model.DoesNotExist:\n user = self.user_model.objects.create(\n username=self.get_username(),\n email=email,\n password=self.user_model.objects.make_random_password(),\n )\n user.is_active = False\n user.save()\n self.send_activation(user, sender, **kwargs)\n return user", - "docstring": "Returns a User object filled with dummy data and not active, and sends\n an invitation email." - }, - { - "code": "def _damerau_levenshtein(a, b):\n memo = {}\n def distance(x, y):\n if (x, y) in memo:\n return memo[x, y]\n if not x:\n d = len(y)\n elif not y:\n d = len(x)\n else:\n d = min(\n distance(x[1:], y) + 1,\n distance(x, y[1:]) + 1,\n distance(x[1:], y[1:]) + (x[0] != y[0]))\n if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]:\n t = distance(x[2:], y[2:]) + 1\n if d > t:\n d = t\n memo[x, y] = d\n return d\n return distance(a, b)", - "docstring": "Returns Damerau-Levenshtein edit distance from a to b." - }, - { - "code": "def set_(dic, path, val, seps=PATH_SEPS):\n merge(dic, mk_nested_dic(path, val, seps), ac_merge=MS_DICTS)", - "docstring": "setter for nested dicts.\n\n :param dic: a dict[-like] object support recursive merge operations\n :param path: Path expression to point object wanted\n :param seps: Separator char candidates\n\n >>> d = dict(a=1, b=dict(c=2, ))\n >>> set_(d, 'a.b.d', 3)\n >>> d['a']['b']['d']\n 3" - }, - { - "code": "def from_cif_string(cif_string, transformations=None, primitive=True,\n occupancy_tolerance=1.):\n parser = CifParser.from_string(cif_string, occupancy_tolerance)\n raw_string = re.sub(r\"'\", \"\\\"\", cif_string)\n cif_dict = parser.as_dict()\n cif_keys = list(cif_dict.keys())\n s = parser.get_structures(primitive)[0]\n partial_cif = cif_dict[cif_keys[0]]\n if \"_database_code_ICSD\" in partial_cif:\n source = partial_cif[\"_database_code_ICSD\"] + \"-ICSD\"\n else:\n source = \"uploaded cif\"\n source_info = {\"source\": source,\n \"datetime\": str(datetime.datetime.now()),\n \"original_file\": raw_string,\n \"cif_data\": cif_dict[cif_keys[0]]}\n return TransformedStructure(s, transformations, history=[source_info])", - "docstring": "Generates TransformedStructure from a cif string.\n\n Args:\n cif_string (str): Input cif string. Should contain only one\n structure. For cifs containing multiple structures, please use\n CifTransmuter.\n transformations ([Transformations]): Sequence of transformations\n to be applied to the input structure.\n primitive (bool): Option to set if the primitive cell should be\n extracted. Defaults to True. However, there are certain\n instances where you might want to use a non-primitive cell,\n e.g., if you are trying to generate all possible orderings of\n partial removals or order a disordered structure.\n occupancy_tolerance (float): If total occupancy of a site is\n between 1 and occupancy_tolerance, the occupancies will be\n scaled down to 1.\n\n Returns:\n TransformedStructure" - }, - { - "code": "def sphinx_class(self):\n classdoc = ':class:`{cls} <{pref}.{cls}>`'\n if self.__module__.split('.')[0] == 'properties':\n pref = 'properties'\n else:\n pref = text_type(self.__module__)\n return classdoc.format(cls=self.__class__.__name__, pref=pref)", - "docstring": "Property class name formatted for Sphinx doc linking" - }, - { - "code": "def _bucket_events(self, event_iterable):\n current_bucket_time = None\n current_bucket_events = None\n for event in event_iterable:\n event_bucket_time = self._bucket_time(event[TIMESTAMP_FIELD])\n if current_bucket_time is None or current_bucket_time < event_bucket_time:\n if current_bucket_events is not None:\n yield current_bucket_events\n current_bucket_time = event_bucket_time\n current_bucket_events = []\n current_bucket_events.append(event)\n if current_bucket_events is not None and current_bucket_events != []:\n yield current_bucket_events", - "docstring": "Convert an iterable of events into an iterable of lists of events\n per bucket." - }, - { - "code": "def _read_by_weight(self, F, att_weights, value):\n output = F.batch_dot(att_weights, value)\n return output", - "docstring": "Read from the value matrix given the attention weights.\n\n Parameters\n ----------\n F : symbol or ndarray\n att_weights : Symbol or NDArray\n Attention weights.\n For single-head attention,\n Shape (batch_size, query_length, memory_length).\n For multi-head attention,\n Shape (batch_size, num_heads, query_length, memory_length).\n value : Symbol or NDArray\n Value of the memory. Shape (batch_size, memory_length, total_value_dim)\n\n Returns\n -------\n context_vec: Symbol or NDArray\n Shape (batch_size, query_length, context_vec_dim)" - }, - { - "code": "def calculate_clock_angle(inst):\n clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM']))\n clock_angle[clock_angle < 0.0] += 360.0\n inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index)\n inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 +\n inst['BZ_GSM']**2),\n index=inst.data.index)\n return", - "docstring": "Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane\n\n Parameters\n -----------\n inst : pysat.Instrument\n Instrument with OMNI HRO data" - }, - { - "code": "def implicitly_wait(self, time_to_wait):\n if self.w3c:\n self.execute(Command.SET_TIMEOUTS, {\n 'implicit': int(float(time_to_wait) * 1000)})\n else:\n self.execute(Command.IMPLICIT_WAIT, {\n 'ms': float(time_to_wait) * 1000})", - "docstring": "Sets a sticky timeout to implicitly wait for an element to be found,\n or a command to complete. This method only needs to be called one\n time per session. To set the timeout for calls to\n execute_async_script, see set_script_timeout.\n\n :Args:\n - time_to_wait: Amount of time to wait (in seconds)\n\n :Usage:\n ::\n\n driver.implicitly_wait(30)" - }, - { - "code": "def destroy_sns_event(app_name, env, region):\n session = boto3.Session(profile_name=env, region_name=region)\n sns_client = session.client('sns')\n lambda_subscriptions = get_sns_subscriptions(app_name=app_name, env=env, region=region)\n for subscription_arn in lambda_subscriptions:\n sns_client.unsubscribe(SubscriptionArn=subscription_arn)\n LOG.debug(\"Lambda SNS event deleted\")\n return True", - "docstring": "Destroy all Lambda SNS subscriptions.\n\n Args:\n app_name (str): name of the lambda function\n env (str): Environment/Account for lambda function\n region (str): AWS region of the lambda function\n\n Returns:\n boolean: True if subscription destroyed successfully" - }, - { - "code": "def DiscriminatorLayerLoss():\n data = mx.sym.Variable('data')\n label = mx.sym.Variable('label')\n data = mx.sym.Flatten(data)\n label = mx.sym.Flatten(label)\n label = mx.sym.BlockGrad(label)\n zeros = mx.sym.zeros_like(data)\n output = -GaussianLogDensity(label, data, zeros)\n dloss = mx.symbol.MakeLoss(mx.symbol.mean(output),name='lloss')\n return dloss", - "docstring": "Calculate the discriminator layer loss" - }, - { - "code": "def standardize_images(x):\n with tf.name_scope(\"standardize_images\", values=[x]):\n x_shape = shape_list(x)\n x = to_float(tf.reshape(x, [-1] + x_shape[-3:]))\n x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n x_variance = tf.reduce_mean(\n tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True)\n num_pixels = to_float(x_shape[-2] * x_shape[-3])\n x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))\n return tf.reshape(x, x_shape)", - "docstring": "Image standardization on batches and videos." - }, - { - "code": "def _normalize_interval(start, end, value):\n if not isinstance(start, datetime):\n start = datetime.combine(start, START_OF_DAY)\n end = datetime.combine(end, START_OF_DAY)\n if start.tzinfo is None:\n start = pytz.UTC.localize(start)\n end = pytz.UTC.localize(end)\n else:\n start = start.astimezone(pytz.UTC)\n end = end.astimezone(pytz.UTC)\n return start, end", - "docstring": "Normalize datetime intervals.\n\n Given a pair of datetime.date or datetime.datetime objects,\n returns a 2-tuple of tz-aware UTC datetimes spanning the same interval.\n\n For datetime.date objects, the returned interval starts at 00:00:00.0\n on the first date and ends at 00:00:00.0 on the second.\n\n Naive datetimes are upgraded to UTC.\n\n Timezone-aware datetimes are normalized to the UTC tzdata.\n\n Params:\n - start: A date or datetime\n - end: A date or datetime" - }, - { - "code": "def __decode_dictionary(self, message_type, dictionary):\n message = message_type()\n for key, value in six.iteritems(dictionary):\n if value is None:\n try:\n message.reset(key)\n except AttributeError:\n pass\n continue\n try:\n field = message.field_by_name(key)\n except KeyError:\n variant = self.__find_variant(value)\n if variant:\n message.set_unrecognized_field(key, value, variant)\n continue\n if field.repeated:\n if not isinstance(value, list):\n value = [value]\n valid_value = [self.decode_field(field, item)\n for item in value]\n setattr(message, field.name, valid_value)\n continue\n if value == []:\n continue\n try:\n setattr(message, field.name, self.decode_field(field, value))\n except messages.DecodeError:\n if not isinstance(field, messages.EnumField):\n raise\n variant = self.__find_variant(value)\n if variant:\n message.set_unrecognized_field(key, value, variant)\n return message", - "docstring": "Merge dictionary in to message.\n\n Args:\n message: Message to merge dictionary in to.\n dictionary: Dictionary to extract information from. Dictionary\n is as parsed from JSON. Nested objects will also be dictionaries." - }, - { - "code": "def date_to_string(date):\n if isinstance(date, datetime.datetime):\n date_str = date.strftime('%Y-%m-%dT%H:%M:%S')\n tzstr = date.strftime('%z')\n if tzstr:\n date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5])\n elif isinstance(date, datetime.date):\n date_str = date.strftime('%Y-%m-%d')\n else:\n raise TypeError('Argument is not a date or datetime. ')\n return date_str", - "docstring": "Transform a date or datetime object into a string and return it.\n\n Examples:\n >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC))\n '2012-01-03T12:23:34+00:00'\n >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34))\n '2012-01-03T12:23:34'\n >>> date_to_string(datetime.date(2012, 1, 3))\n '2012-01-03'" - }, - { - "code": "def get_host_and_url(properties, cloud_token):\n host = properties.get(HazelcastCloudDiscovery.CLOUD_URL_BASE_PROPERTY.name,\n HazelcastCloudDiscovery.CLOUD_URL_BASE_PROPERTY.default_value)\n host = host.replace(\"https://\", \"\")\n host = host.replace(\"http://\", \"\")\n return host, HazelcastCloudDiscovery._CLOUD_URL_PATH + cloud_token", - "docstring": "Helper method to get host and url that can be used in HTTPSConnection.\n\n :param properties: Client config properties.\n :param cloud_token: Cloud discovery token.\n :return: Host and URL pair" - }, - { - "code": "def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext):\n serviceVersionDescription = __GetServiceVersionDescription(protocol,\n server,\n port,\n path,\n sslContext)\n if serviceVersionDescription is None:\n return None\n if not isinstance(preferredApiVersions, list):\n preferredApiVersions = [ preferredApiVersions ]\n for desiredVersion in preferredApiVersions:\n if __VersionIsSupported(desiredVersion, serviceVersionDescription):\n return desiredVersion\n return None", - "docstring": "Private method that returns the most preferred API version supported by the\n specified server,\n\n @param protocol: What protocol to use for the connection (e.g. https or http).\n @type protocol: string\n @param server: Which server to connect to.\n @type server: string\n @param port: Port\n @type port: int\n @param path: Path\n @type path: string\n @param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)\n If a list of versions is specified the versions should\n be ordered from most to least preferred.\n @type preferredApiVersions: string or string list\n @param sslContext: SSL Context describing the various SSL options. It is only\n supported in Python 2.7.9 or higher.\n @type sslContext: SSL.Context" - }, - { - "code": "def get_webconfiguration_settings(name, settings, location=''):\n r\n ret = {}\n ps_cmd = []\n ps_cmd_validate = []\n if not settings:\n log.warning('No settings provided')\n return ret\n settings = _prepare_settings(name, settings)\n ps_cmd.append(r'$Settings = New-Object System.Collections.ArrayList;')\n for setting in settings:\n ps_cmd_validate.extend(['Get-WebConfigurationProperty',\n '-PSPath', \"'{0}'\".format(name),\n '-Filter', \"'{0}'\".format(setting['filter']),\n '-Name', \"'{0}'\".format(setting['name']),\n '-Location', \"'{0}'\".format(location),\n '-ErrorAction', 'Stop',\n '|', 'Out-Null;'])\n ps_cmd.append(\"$Property = Get-WebConfigurationProperty -PSPath '{0}'\".format(name))\n ps_cmd.append(\"-Name '{0}' -Filter '{1}' -Location '{2}' -ErrorAction Stop;\".format(setting['name'], setting['filter'], location))\n if setting['name'].split('.')[-1] == 'Collection':\n if 'value' in setting:\n ps_cmd.append(\"$Property = $Property | select -Property {0} ;\"\n .format(\",\".join(list(setting['value'][0].keys()))))\n ps_cmd.append(\"$Settings.add(@{{filter='{0}';name='{1}';location='{2}';value=[System.Collections.ArrayList] @($Property)}})| Out-Null;\"\n .format(setting['filter'], setting['name'], location))\n else:\n ps_cmd.append(r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and')\n ps_cmd.append(r\"($Property.GetType()).Name -eq 'ConfigurationAttribute') {\")\n ps_cmd.append(r'$Property = $Property | Select-Object')\n ps_cmd.append(r'-ExpandProperty Value };')\n ps_cmd.append(\"$Settings.add(@{{filter='{0}';name='{1}';location='{2}';value=[String] $Property}})| Out-Null;\"\n .format(setting['filter'], setting['name'], location))\n ps_cmd.append(r'$Property = $Null;')\n cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True)\n if cmd_ret['retcode'] != 0:\n message = 'One or more invalid property names were specified for the provided container.'\n raise SaltInvocationError(message)\n ps_cmd.append('$Settings')\n cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n try:\n ret = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError('Unable to parse return data as Json.')\n return ret", - "docstring": "r'''\n Get the webconfiguration settings for the IIS PSPath.\n\n Args:\n name (str): The PSPath of the IIS webconfiguration settings.\n settings (list): A list of dictionaries containing setting name and filter.\n location (str): The location of the settings (optional)\n\n Returns:\n dict: A list of dictionaries containing setting name, filter and value.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_iis.get_webconfiguration_settings name='IIS:\\' settings=\"[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication'}]\"" - }, - { - "code": "def _combined_wildcards_iter(flatterm: Iterator[TermAtom]) -> Iterator[TermAtom]:\n last_wildcard = None\n for term in flatterm:\n if isinstance(term, Wildcard) and not isinstance(term, SymbolWildcard):\n if last_wildcard is not None:\n new_min_count = last_wildcard.min_count + term.min_count\n new_fixed_size = last_wildcard.fixed_size and term.fixed_size\n last_wildcard = Wildcard(new_min_count, new_fixed_size)\n else:\n last_wildcard = Wildcard(term.min_count, term.fixed_size)\n else:\n if last_wildcard is not None:\n yield last_wildcard\n last_wildcard = None\n yield term\n if last_wildcard is not None:\n yield last_wildcard", - "docstring": "Combine consecutive wildcards in a flatterm into a single one." - }, - { - "code": "def _make_actor_method_executor(self, method_name, method, actor_imported):\n def actor_method_executor(dummy_return_id, actor, *args):\n self._worker.actor_task_counter += 1\n try:\n if is_class_method(method):\n method_returns = method(*args)\n else:\n method_returns = method(actor, *args)\n except Exception as e:\n if (isinstance(actor, ray.actor.Checkpointable)\n and self._worker.actor_task_counter != 1):\n self._save_and_log_checkpoint(actor)\n raise e\n else:\n if isinstance(actor, ray.actor.Checkpointable):\n if self._worker.actor_task_counter == 1:\n if actor_imported:\n self._restore_and_log_checkpoint(actor)\n else:\n self._save_and_log_checkpoint(actor)\n return method_returns\n return actor_method_executor", - "docstring": "Make an executor that wraps a user-defined actor method.\n\n The wrapped method updates the worker's internal state and performs any\n necessary checkpointing operations.\n\n Args:\n method_name (str): The name of the actor method.\n method (instancemethod): The actor method to wrap. This should be a\n method defined on the actor class and should therefore take an\n instance of the actor as the first argument.\n actor_imported (bool): Whether the actor has been imported.\n Checkpointing operations will not be run if this is set to\n False.\n\n Returns:\n A function that executes the given actor method on the worker's\n stored instance of the actor. The function also updates the\n worker's internal state to record the executed method." - }, - { - "code": "def _write_gml(G, path):\n import networkx as nx\n return nx.write_gml(G, path, stringizer=str)", - "docstring": "Wrapper around nx.write_gml" - }, - { - "code": "def dynacRepresentation(self):\n if self.plane.val == 'H':\n p = 0\n elif self.plane.val == 'V':\n p = 1\n return ['STEER', [[self.field_strength.val], [p]]]", - "docstring": "Return the Dynac representation of this steerer instance." - }, - { - "code": "def write_key(self, name, value, comment=\"\"):\n if value is None:\n self._FITS.write_undefined_key(self._ext+1,\n str(name),\n str(comment))\n elif isinstance(value, bool):\n if value:\n v = 1\n else:\n v = 0\n self._FITS.write_logical_key(self._ext+1,\n str(name),\n v,\n str(comment))\n elif isinstance(value, _stypes):\n self._FITS.write_string_key(self._ext+1,\n str(name),\n str(value),\n str(comment))\n elif isinstance(value, _ftypes):\n self._FITS.write_double_key(self._ext+1,\n str(name),\n float(value),\n str(comment))\n elif isinstance(value, _itypes):\n self._FITS.write_long_key(self._ext+1,\n str(name),\n int(value),\n str(comment))\n elif isinstance(value, (tuple, list)):\n vl = [str(el) for el in value]\n sval = ','.join(vl)\n self._FITS.write_string_key(self._ext+1,\n str(name),\n sval,\n str(comment))\n else:\n sval = str(value)\n mess = (\n \"warning, keyword '%s' has non-standard \"\n \"value type %s, \"\n \"Converting to string: '%s'\")\n warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning)\n self._FITS.write_string_key(self._ext+1,\n str(name),\n sval,\n str(comment))", - "docstring": "Write the input value to the header\n\n parameters\n ----------\n name: string\n Name of keyword to write/update\n value: scalar\n Value to write, can be string float or integer type,\n including numpy scalar types.\n comment: string, optional\n An optional comment to write for this key\n\n Notes\n -----\n Write COMMENT and HISTORY using the write_comment and write_history\n methods" - }, - { - "code": "def create(cls, tx_signers, recipients, metadata=None, asset=None):\n (inputs, outputs) = cls.validate_create(tx_signers, recipients, asset, metadata)\n return cls(cls.CREATE, {'data': asset}, inputs, outputs, metadata)", - "docstring": "A simple way to generate a `CREATE` transaction.\n\n Note:\n This method currently supports the following Cryptoconditions\n use cases:\n - Ed25519\n - ThresholdSha256\n\n Additionally, it provides support for the following BigchainDB\n use cases:\n - Multiple inputs and outputs.\n\n Args:\n tx_signers (:obj:`list` of :obj:`str`): A list of keys that\n represent the signers of the CREATE Transaction.\n recipients (:obj:`list` of :obj:`tuple`): A list of\n ([keys],amount) that represent the recipients of this\n Transaction.\n metadata (dict): The metadata to be stored along with the\n Transaction.\n asset (dict): The metadata associated with the asset that will\n be created in this Transaction.\n\n Returns:\n :class:`~bigchaindb.common.transaction.Transaction`" - }, - { - "code": "def add_related(self, *objects):\n master = None\n slaves = set([])\n solitaire = set([])\n for new in objects:\n if isinstance(new, self.cls):\n if master is None:\n master = new\n else:\n slaves.add(new)\n for item in new.items:\n existing = self.lookup.get(item)\n if existing is not None:\n slaves.add(existing)\n else:\n cluster = self.lookup.get(new)\n if cluster is None:\n solitaire.add(new)\n elif master is None:\n master = cluster\n elif master != cluster:\n slaves.add(cluster)\n if master is None:\n master = self.cls([])\n for slave in slaves:\n master.update(slave)\n for item in solitaire:\n master.add_item(item)\n for item in master.items:\n self.lookup[item] = master", - "docstring": "Add related items\n\n The arguments can be individual items or cluster objects containing\n several items.\n\n When two groups of related items share one or more common members,\n they will be merged into one cluster." - }, - { - "code": "def buff_eval(params):\n specification, sequence, parsed_ind = params\n model = specification(*parsed_ind)\n model.build()\n model.pack_new_sequences(sequence)\n return model.buff_interaction_energy.total_energy", - "docstring": "Builds and evaluates BUFF energy of model in parallelization\n\n Parameters\n ----------\n params: list\n Tuple containing the specification to be built, the sequence,\n and the parameters for model building.\n\n Returns\n -------\n model.bude_score: float\n BUFF score for model to be assigned to particle fitness value." - }, - { - "code": "def _cast(self):\n for configName, configDict in self.configs.items():\n if configDict['cast'] is not None:\n configValue = getattr(self._config, configName)\n if configValue is not None:\n try:\n self._setConfig(configName, configDict['cast'](configValue))\n except:\n raise InvalidConfigurationException(\"%s: %r\" % (configName, configValue))", - "docstring": "Iterates through our parsed configuration options and cast any options with marked cast types." - }, - { - "code": "def max_heapify(arr, end, simulation, iteration):\n last_parent = (end - 1) // 2\n for parent in range(last_parent, -1, -1):\n current_parent = parent\n while current_parent <= last_parent:\n child = 2 * current_parent + 1\n if child + 1 <= end and arr[child] < arr[child + 1]:\n child = child + 1\n if arr[child] > arr[current_parent]:\n arr[current_parent], arr[child] = arr[child], arr[current_parent]\n current_parent = child\n if simulation:\n iteration = iteration + 1\n print(\"iteration\",iteration,\":\",*arr)\n else:\n break\n arr[0], arr[end] = arr[end], arr[0]\n return iteration", - "docstring": "Max heapify helper for max_heap_sort" - }, - { - "code": "def _match(self, x, op, y):\n if (op not in self.condition_mapper):\n raise ValueError('Invalid where condition given')\n func = getattr(self, self.condition_mapper.get(op))\n return func(x, y)", - "docstring": "Compare the given `x` and `y` based on `op`\n\n :@param x, y, op\n :@type x, y: mixed\n :@type op: string\n\n :@return bool\n :@throws ValueError" - }, - { - "code": "def cached(fun):\n _cache = {}\n @wraps(fun)\n def newfun(a, b, distance_function):\n frozen_a = frozenset(a)\n frozen_b = frozenset(b)\n if (frozen_a, frozen_b) not in _cache:\n result = fun(a, b, distance_function)\n _cache[(frozen_a, frozen_b)] = result\n return _cache[(frozen_a, frozen_b)]\n return newfun", - "docstring": "memoizing decorator for linkage functions.\n\n Parameters have been hardcoded (no ``*args``, ``**kwargs`` magic), because,\n the way this is coded (interchangingly using sets and frozensets) is true\n for this specific case. For other cases that is not necessarily guaranteed." - }, - { - "code": "def dispatch(self, request):\n def _wrapped():\n messages = self._get_request_messages(request)\n results = [self._dispatch_and_handle_errors(message) for message in messages]\n non_notification_results = [x for x in results if x is not None]\n if len(non_notification_results) == 0:\n return None\n elif len(messages) == 1:\n return non_notification_results[0]\n else:\n return non_notification_results\n result, _ = self._handle_exceptions(_wrapped)\n if result is not None:\n return self._encode_complete_result(result)", - "docstring": "Takes a request and dispatches its data to a jsonrpc method.\n\n :param request: a werkzeug request with json data\n :type request: werkzeug.wrappers.Request\n :return: json output of the corresponding method\n :rtype: str\n\n .. versionadded:: 0.1.0" - }, - { - "code": "def beeswarm(*args, **kwargs):\n ax, args, kwargs = maybe_get_ax(*args, **kwargs)\n xticklabels = kwargs.pop('xticklabels', None)\n colors = kwargs.pop('colors', None)\n fontsize = kwargs.pop('fontsize', 10)\n gray = _colors.set1[8]\n red = _colors.set1[0]\n blue = kwargs.pop('color', _colors.set1[1])\n kwargs.setdefault('widths', 0.25)\n kwargs.setdefault('sym', \"o\")\n bp = _beeswarm(ax, *args, **kwargs)\n kwargs.setdefault(\"median_color\", gray)\n kwargs.setdefault(\"median_linewidth\", 2)\n if xticklabels:\n ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize)\n show_caps = kwargs.pop('show_caps', True)\n show_ticks = kwargs.pop('show_ticks', False)\n remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks)\n linewidth = 0.75\n plt.setp(bp['boxes'], color=blue, linewidth=linewidth)\n plt.setp(bp['medians'], color=kwargs.pop(\"median_color\"), linewidth=kwargs.pop(\"median_linewidth\"))\n for color, flier in zip(colors, bp['fliers']):\n plt.setp(flier, color=color)\n ax.spines['left']._linewidth = 0.5\n return bp", - "docstring": "Create a R-like beeswarm plot showing the mean and datapoints. \n The difference from matplotlib is only the left axis line is\n shown, and ticklabels labeling each category of data can be added.\n\n @param ax:\n @param x:\n @param kwargs: Besides xticklabels, which is a prettyplotlib-specific\n argument which will label each individual beeswarm, many arguments for\n matplotlib.pyplot.boxplot will be accepted:\n http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot\n Additional arguments include:\n \n *median_color* : (default gray)\n The color of median lines \n \n *median_width* : (default 2)\n Median line width\n\n *colors* : (default None)\n Colors to use when painting a dataseries, for example\n \n list1 = [1,2,3]\n list2 = [5,6,7]\n ppl.beeswarm([list1, list2], colors=[\"red\", \"blue\"], xticklabels=[\"data1\", \"data2\"])\n\n @return:" - }, - { - "code": "def _get_serv(ret=None):\n _options = _get_options(ret)\n global REDIS_POOL\n if REDIS_POOL:\n return REDIS_POOL\n elif _options.get('cluster_mode'):\n REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),\n skip_full_coverage_check=_options.get('skip_full_coverage_check'),\n decode_responses=True)\n else:\n REDIS_POOL = redis.StrictRedis(host=_options.get('host'),\n port=_options.get('port'),\n unix_socket_path=_options.get('unix_socket_path', None),\n db=_options.get('db'),\n decode_responses=True,\n password=_options.get('password'))\n return REDIS_POOL", - "docstring": "Return a redis server object" - }, - { - "code": "def _convert_2_0_0_beta_2(topo, topo_path):\n topo_dir = os.path.dirname(topo_path)\n topo[\"revision\"] = 7\n for node in topo.get(\"topology\", {}).get(\"nodes\", []):\n if node[\"node_type\"] == \"dynamips\":\n node_id = node[\"node_id\"]\n dynamips_id = node[\"properties\"][\"dynamips_id\"]\n dynamips_dir = os.path.join(topo_dir, \"project-files\", \"dynamips\")\n node_dir = os.path.join(dynamips_dir, node_id)\n try:\n os.makedirs(os.path.join(node_dir, \"configs\"), exist_ok=True)\n for path in glob.glob(os.path.join(glob.escape(dynamips_dir), \"*_i{}_*\".format(dynamips_id))):\n shutil.move(path, os.path.join(node_dir, os.path.basename(path)))\n for path in glob.glob(os.path.join(glob.escape(dynamips_dir), \"configs\", \"i{}_*\".format(dynamips_id))):\n shutil.move(path, os.path.join(node_dir, \"configs\", os.path.basename(path)))\n except OSError as e:\n raise aiohttp.web.HTTPConflict(text=\"Can't convert project {}: {}\".format(topo_path, str(e)))\n return topo", - "docstring": "Convert topologies from GNS3 2.0.0 beta 2 to beta 3.\n\n Changes:\n * Node id folders for dynamips" - }, - { - "code": "def render_authenticateLinks(self, ctx, data):\n if self.username is not None:\n return ''\n from xmantissa.signup import _getPublicSignupInfo\n IQ = inevow.IQ(ctx.tag)\n signupPattern = IQ.patternGenerator('signup-link')\n signups = []\n for (prompt, url) in _getPublicSignupInfo(self.store):\n signups.append(signupPattern.fillSlots(\n 'prompt', prompt).fillSlots(\n 'url', url))\n return ctx.tag[signups]", - "docstring": "For unauthenticated users, add login and signup links to the given tag.\n For authenticated users, remove the given tag from the output.\n\n When necessary, the I{signup-link} pattern will be loaded from the tag.\n Each copy of it will have I{prompt} and I{url} slots filled. The list\n of copies will be added as children of the tag." - }, - { - "code": "def next_frame_sv2p_discrete():\n hparams = next_frame_sv2p()\n hparams.action_injection = \"multiplicative\"\n hparams.small_mode = True\n hparams.add_hparam(\"bottleneck_bits\", 128)\n hparams.add_hparam(\"bottleneck_noise\", 0.02)\n hparams.add_hparam(\"discrete_warmup_steps\", 40000)\n hparams.add_hparam(\"full_latent_tower\", False)\n hparams.add_hparam(\"latent_predictor_state_size\", 128)\n hparams.add_hparam(\"latent_predictor_temperature\", 0.5)\n hparams.add_hparam(\"discretize_warmup_steps\", 40000)\n return hparams", - "docstring": "SV2P discrete model hparams." - }, - { - "code": "def write_file(filename, string):\n import sys\n if sys.version_info[0] >= 3:\n with open(filename, 'w', encoding=\"utf-8\") as f:\n f.write(string)\n else:\n with open(filename, 'w') as f:\n f.write(string.encode(\"utf-8\"))", - "docstring": "dump the contents of string to a file called filename" - }, - { - "code": "def compile_file(source, globals_=None):\n if isinstance(source, gast.AST):\n source = quoting.to_source(source)\n tempdir = tempfile.mkdtemp()\n uuid = str(uuid4().hex[:4])\n tmpname = os.path.join(tempdir, 'tangent_%s.py' % uuid)\n with open(tmpname, 'w') as f:\n f.write(source)\n module_name = 'tangent_%s' % uuid\n if six.PY3:\n spec = util.spec_from_file_location(module_name, tmpname)\n m = util.module_from_spec(spec)\n spec.loader.exec_module(m)\n else:\n m = imp.load_source(module_name, tmpname)\n if globals_:\n m.__dict__.update(globals_)\n return m", - "docstring": "Compile by saving to file and importing that.\n\n Compiling the AST/source code this way ensures that the source code is\n readable by e.g. `pdb` or `inspect`.\n\n Args:\n source: The code to compile, either as a string or as an AST.\n globals_: A dictionary of variables that should be available as globals in\n the compiled module. They will be monkey patched after importing the\n module.\n\n Returns:\n A module object containing the compiled source code." - }, - { - "code": "def append(self, key, val, time=0, min_compress_len=0):\n return self._set(\"append\", key, val, time, min_compress_len)", - "docstring": "Append the value to the end of the existing key's value.\n\n Only stores in memcache if key already exists.\n Also see L{prepend}.\n\n @return: Nonzero on success.\n @rtype: int" - }, - { - "code": "def parse_body(self, text):\r\n re_raise = re.findall(r'[ \\t]raise ([a-zA-Z0-9_]*)', text)\r\n if len(re_raise) > 0:\r\n self.raise_list = [x.strip() for x in re_raise]\r\n self.raise_list = list(OrderedDict.fromkeys(self.raise_list))\r\n re_yield = re.search(r'[ \\t]yield ', text)\r\n if re_yield:\r\n self.has_yield = True\r\n pattern_return = r'return |yield '\r\n line_list = text.split('\\n')\r\n is_found_return = False\r\n line_return_tmp = ''\r\n for line in line_list:\r\n line = line.strip()\r\n if is_found_return is False:\r\n if re.match(pattern_return, line):\r\n is_found_return = True\r\n if is_found_return:\r\n line_return_tmp += line\r\n try:\r\n pos_quote = self._find_quote_position(line_return_tmp)\r\n if line_return_tmp[-1] == '\\\\':\r\n line_return_tmp = line_return_tmp[:-1]\r\n continue\r\n self._find_bracket_position(line_return_tmp, '(', ')',\r\n pos_quote)\r\n self._find_bracket_position(line_return_tmp, '{', '}',\r\n pos_quote)\r\n self._find_bracket_position(line_return_tmp, '[', ']',\r\n pos_quote)\r\n except IndexError:\r\n continue\r\n return_value = re.sub(pattern_return, '', line_return_tmp)\r\n self.return_value_in_body.append(return_value)\r\n is_found_return = False\r\n line_return_tmp = ''", - "docstring": "Parse the function body text." - }, - { - "code": "def find_file(self, filename, paths, verbose=None):\n memo_key = self._find_file_key(filename, paths)\n try:\n memo_dict = self._memo['find_file']\n except KeyError:\n memo_dict = {}\n self._memo['find_file'] = memo_dict\n else:\n try:\n return memo_dict[memo_key]\n except KeyError:\n pass\n if verbose and not callable(verbose):\n if not SCons.Util.is_String(verbose):\n verbose = \"find_file\"\n _verbose = u' %s: ' % verbose\n verbose = lambda s: sys.stdout.write(_verbose + s)\n filedir, filename = os.path.split(filename)\n if filedir:\n self.default_filedir = filedir\n paths = [_f for _f in map(self.filedir_lookup, paths) if _f]\n result = None\n for dir in paths:\n if verbose:\n verbose(\"looking for '%s' in '%s' ...\\n\" % (filename, dir))\n node, d = dir.srcdir_find_file(filename)\n if node:\n if verbose:\n verbose(\"... FOUND '%s' in '%s'\\n\" % (filename, d))\n result = node\n break\n memo_dict[memo_key] = result\n return result", - "docstring": "Find a node corresponding to either a derived file or a file that exists already.\n\n Only the first file found is returned, and none is returned if no file is found.\n\n filename: A filename to find\n paths: A list of directory path *nodes* to search in. Can be represented as a list, a tuple, or a callable that is called with no arguments and returns the list or tuple.\n\n returns The node created from the found file." - }, - { - "code": "def _generate_filter_ngrams(self, data, min_size):\n max_size = data[constants.SIZE_FIELDNAME].max()\n kept_ngrams = list(data[data[constants.SIZE_FIELDNAME] == min_size][\n constants.NGRAM_FIELDNAME])\n for size in range(min_size+1, max_size+1):\n pattern = FilteredWitnessText.get_filter_ngrams_pattern(\n kept_ngrams)\n potential_ngrams = list(data[data[constants.SIZE_FIELDNAME] ==\n size][constants.NGRAM_FIELDNAME])\n kept_ngrams.extend([ngram for ngram in potential_ngrams if\n pattern.search(ngram) is None])\n return kept_ngrams", - "docstring": "Returns the n-grams in `data` that do not contain any other n-gram\n in `data`.\n\n :param data: n-gram results data\n :type data: `pandas.DataFrame`\n :param min_size: minimum n-gram size in `data`\n :type min_size: `int`\n :rtype: `list` of `str`" - }, - { - "code": "def get_original_fn(fn):\n fn_type = type(fn)\n if fn_type is classmethod or fn_type is staticmethod:\n return get_original_fn(fn.__func__)\n if hasattr(fn, \"original_fn\"):\n return fn.original_fn\n if hasattr(fn, \"fn\"):\n fn.original_fn = get_original_fn(fn.fn)\n return fn.original_fn\n return fn", - "docstring": "Gets the very original function of a decorated one." - }, - { - "code": "def log_background_messages(self, name=None):\n with self.lock:\n if name:\n self._log_messages_by_thread(name)\n else:\n self._log_all_messages()", - "docstring": "Forwards messages logged on background to Robot Framework log.\n\n By default forwards all messages logged by all threads, but can be\n limited to a certain thread by passing thread's name as an argument.\n\n Logged messages are removed from the message storage." - }, - { - "code": "def _generate_arg_types(coordlist_length, shape_name):\n from .ds9_region_parser import ds9_shape_defs\n from .ds9_attr_parser import ds9_shape_in_comment_defs\n if shape_name in ds9_shape_defs:\n shape_def = ds9_shape_defs[shape_name]\n else:\n shape_def = ds9_shape_in_comment_defs[shape_name]\n initial_arg_types = shape_def.args_list\n arg_repeats = shape_def.args_repeat\n if arg_repeats is None:\n return initial_arg_types\n n1, n2 = arg_repeats\n arg_types = list(initial_arg_types[:n1])\n num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)\n arg_types.extend((num_of_repeats - n1) //\n (n2 - n1) * initial_arg_types[n1:n2])\n arg_types.extend(initial_arg_types[n2:])\n return arg_types", - "docstring": "Find coordinate types based on shape name and coordlist length\n\n This function returns a list of coordinate types based on which\n coordinates can be repeated for a given type of shap\n\n Parameters\n ----------\n coordlist_length : int\n The number of coordinates or arguments used to define the shape.\n\n shape_name : str\n One of the names in `pyregion.ds9_shape_defs`.\n\n Returns\n -------\n arg_types : list\n A list of objects from `pyregion.region_numbers` with a length equal to\n coordlist_length." - }, - { - "code": "def mutate(self):\n section = self.section\n project_name = self.project_name\n section.project_name = project_name\n self.contribute_runtime_dir()\n main = section.main_process\n main.set_naming_params(prefix='[%s] ' % project_name)\n main.set_pid_file(\n self.get_pid_filepath(),\n before_priv_drop=False,\n safe=True,\n )\n section.master_process.set_basic_params(\n fifo_file=self.get_fifo_filepath(),\n )\n apps = section.applications\n apps.set_basic_params(\n manage_script_name=True,\n )\n self.contribute_error_pages()\n self.contribute_static()", - "docstring": "Mutates current section." - }, - { - "code": "def attach(self, events):\n if events is not None:\n events.add_event_listener(\"print\", self._print)\n events.add_event_listener(\"backspace\", self._backspace)\n events.add_event_listener(\"tab\", self._tab)\n events.add_event_listener(\"linefeed\", self._linefeed)\n events.add_event_listener(\"reverse-linefeed\", \n self._reverse_linefeed)\n events.add_event_listener(\"carriage-return\", self._carriage_return)\n events.add_event_listener(\"index\", self._index)\n events.add_event_listener(\"reverse-index\", self._reverse_index)\n events.add_event_listener(\"store-cursor\", self._save_cursor)\n events.add_event_listener(\"restore-cursor\", self._restore_cursor)\n events.add_event_listener(\"cursor-up\", self._cursor_up)\n events.add_event_listener(\"cursor-down\", self._cursor_down)\n events.add_event_listener(\"cursor-right\", self._cursor_forward)\n events.add_event_listener(\"cursor-left\", self._cursor_back)\n events.add_event_listener(\"cursor-move\", self._cursor_position)\n events.add_event_listener(\"erase-in-line\", self._erase_in_line)\n events.add_event_listener(\"erase-in-display\", \n self._erase_in_display)\n events.add_event_listener(\"delete-characters\", \n self._delete_character)\n events.add_event_listener(\"insert-lines\", self._insert_line)\n events.add_event_listener(\"delete-lines\", self._delete_line)\n events.add_event_listener(\"select-graphic-rendition\",\n self._select_graphic_rendition)\n events.add_event_listener(\"charset-g0\", self._charset_g0)\n events.add_event_listener(\"charset-g1\", self._charset_g1)\n events.add_event_listener(\"shift-in\", self._shift_in)\n events.add_event_listener(\"shift-out\", self._shift_out)\n events.add_event_listener(\"bell\", self._bell)", - "docstring": "Attach this screen to a events that processes commands and dispatches \n events. Sets up the appropriate event handlers so that the screen will\n update itself automatically as the events processes data." - }, - { - "code": "def add_nodes(network_id, nodes,**kwargs):\n start_time = datetime.datetime.now()\n names=[]\n for n_i in nodes:\n if n_i.name in names:\n raise HydraError(\"Duplicate Node Name: %s\"%(n_i.name))\n names.append(n_i.name)\n user_id = kwargs.get('user_id')\n try:\n net_i = db.DBSession.query(Network).filter(Network.id == network_id).one()\n net_i.check_write_permission(user_id)\n except NoResultFound:\n raise ResourceNotFoundError(\"Network %s not found\"%(network_id))\n _add_nodes_to_database(net_i, nodes)\n net_i.project_id=net_i.project_id\n db.DBSession.flush()\n node_s = db.DBSession.query(Node).filter(Node.network_id==network_id).all()\n node_id_map = dict()\n iface_nodes = dict()\n for n_i in node_s:\n iface_nodes[n_i.name] = n_i\n for node in nodes:\n node_id_map[node.id] = iface_nodes[node.name]\n _bulk_add_resource_attrs(network_id, 'NODE', nodes, iface_nodes)\n log.info(\"Nodes added in %s\", get_timing(start_time))\n return node_s", - "docstring": "Add nodes to network" - }, - { - "code": "def tag(ctx, input, output):\n log.info('chemdataextractor.pos.tag')\n log.info('Reading %s' % input.name)\n doc = Document.from_file(input)\n for element in doc.elements:\n if isinstance(element, Text):\n for sentence in element.sentences:\n output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))\n output.write(u'\\n')", - "docstring": "Output POS-tagged tokens." - }, - { - "code": "def initialize_environment(app):\n env = app.builder.env\n if not hasattr(env, 'traceability_all_items'):\n env.traceability_all_items = {}\n update_available_item_relationships(app)", - "docstring": "Perform initializations needed before the build process starts." - }, - { - "code": "def smooth_gaussian(image, sigma=1):\n return scipy.ndimage.filters.gaussian_filter(image, sigma=sigma, mode=\"nearest\")", - "docstring": "Returns Gaussian smoothed image.\n\n :param image: numpy array or :class:`jicimagelib.image.Image`\n :param sigma: standard deviation\n :returns: :class:`jicimagelib.image.Image`" - }, - { - "code": "def ln_comment(self, ln):\n if self.keep_lines:\n if not 1 <= ln <= len(self.original_lines) + 1:\n raise CoconutInternalException(\n \"out of bounds line number\", ln,\n \"not in range [1, \" + str(len(self.original_lines) + 1) + \"]\",\n )\n elif ln == len(self.original_lines) + 1:\n lni = -1\n else:\n lni = ln - 1\n if self.line_numbers and self.keep_lines:\n if self.minify:\n comment = str(ln) + \" \" + self.original_lines[lni]\n else:\n comment = \" line \" + str(ln) + \": \" + self.original_lines[lni]\n elif self.keep_lines:\n if self.minify:\n comment = self.original_lines[lni]\n else:\n comment = \" \" + self.original_lines[lni]\n elif self.line_numbers:\n if self.minify:\n comment = str(ln)\n else:\n comment = \" line \" + str(ln)\n else:\n return \"\"\n return self.wrap_comment(comment, reformat=False)", - "docstring": "Get an end line comment. CoconutInternalExceptions should always be caught and complained." - }, - { - "code": "def __execute_bsh(self, instr):\n op0_val = self.read_operand(instr.operands[0])\n op1_val = self.read_operand(instr.operands[1])\n op1_size = instr.operands[1].size\n if extract_sign_bit(op1_val, op1_size) == 0:\n op2_val = op0_val << op1_val\n else:\n op2_val = op0_val >> twos_complement(op1_val, op1_size)\n self.write_operand(instr.operands[2], op2_val)\n return None", - "docstring": "Execute BSH instruction." - }, - { - "code": "def create_entity(self):\n self._highest_id_seen += 1\n entity = Entity(self._highest_id_seen, self)\n self._entities.append(entity)\n return entity", - "docstring": "Create a new entity.\n\n The entity will have a higher UID than any previously associated\n with this world.\n\n :return: the new entity\n :rtype: :class:`essence.Entity`" - }, - { - "code": "def run_ops(state, serial=False, no_wait=False):\n state.deploying = True\n if serial:\n _run_serial_ops(state)\n elif no_wait:\n _run_no_wait_ops(state)\n for op_hash in state.get_op_order():\n _run_single_op(state, op_hash)", - "docstring": "Runs all operations across all servers in a configurable manner.\n\n Args:\n state (``pyinfra.api.State`` obj): the deploy state to execute\n serial (boolean): whether to run operations host by host\n no_wait (boolean): whether to wait for all hosts between operations" - }, - { - "code": "def markdown(value, extensions=MARKDOWN_EXTENSIONS):\n try:\n import markdown\n except ImportError:\n warnings.warn(\"The Python markdown library isn't installed.\",\n RuntimeWarning)\n return value\n return markdown.markdown(force_text(value), extensions=extensions)", - "docstring": "Markdown processing with optionally using various extensions\n that python-markdown supports.\n `extensions` is an iterable of either markdown.Extension instances\n or extension paths." - }, - { - "code": "def _validate_names(self, name=None, names=None, deep=False):\n from copy import deepcopy\n if names is not None and name is not None:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif names is None and name is None:\n return deepcopy(self.names) if deep else self.names\n elif names is not None:\n if not is_list_like(names):\n raise TypeError(\"Must pass list-like as `names`.\")\n return names\n else:\n if not is_list_like(name):\n return [name]\n return name", - "docstring": "Handles the quirks of having a singular 'name' parameter for general\n Index and plural 'names' parameter for MultiIndex." - }, - { - "code": "def load_stream(self, stream):\n for batch in self.serializer.load_stream(stream):\n yield batch\n num = read_int(stream)\n batch_order = []\n for i in xrange(num):\n index = read_int(stream)\n batch_order.append(index)\n yield batch_order", - "docstring": "Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields\n a list of indices that can be used to put the RecordBatches in the correct order." - }, - { - "code": "def _translate_add(self, oprnd1, oprnd2, oprnd3):\n assert oprnd1.size and oprnd2.size and oprnd3.size\n assert oprnd1.size == oprnd2.size\n op1_var = self._translate_src_oprnd(oprnd1)\n op2_var = self._translate_src_oprnd(oprnd2)\n op3_var, op3_var_constrs = self._translate_dst_oprnd(oprnd3)\n if oprnd3.size > oprnd1.size:\n result = smtfunction.zero_extend(op1_var, oprnd3.size) + smtfunction.zero_extend(op2_var, oprnd3.size)\n elif oprnd3.size < oprnd1.size:\n result = smtfunction.extract(op1_var + op2_var, 0, oprnd3.size)\n else:\n result = op1_var + op2_var\n return [op3_var == result] + op3_var_constrs", - "docstring": "Return a formula representation of an ADD instruction." - }, - { - "code": "def validate(self):\n super(OutputContextVertex, self).validate()\n if self.location.field is not None:\n raise ValueError(u'Expected location at a vertex, but got: {}'.format(self.location))", - "docstring": "Validate that the OutputContextVertex is correctly representable." - }, - { - "code": "def find_dimension_by_id(self, dim_id):\r\n for dim in self.dimensions:\r\n if is_equal_strings_ignore_case(dim.id, dim_id):\r\n return dim\r\n return None", - "docstring": "the method searching dimension with a given id" - }, - { - "code": "def _generate_namespace_types(self, namespace, jazzy_cfg):\n ns_name = fmt_public_name(namespace.name)\n output_path = os.path.join('ApiObjects', ns_name)\n output_path_headers = os.path.join(output_path, 'Headers')\n for data_type in namespace.linearize_data_types():\n class_name = fmt_class_prefix(data_type)\n if self.args.documentation:\n append_to_jazzy_category_dict(jazzy_cfg, ns_name, class_name)\n append_to_jazzy_category_dict(\n jazzy_cfg, 'Serializers', '{}Serializer'.format(class_name))\n if is_struct_type(data_type):\n file_path = os.path.join(output_path_headers,\n class_name + '.h')\n with self.output_to_relative_path(file_path):\n self.emit_raw(base_file_comment)\n self._generate_struct_class_h(data_type)\n elif is_union_type(data_type):\n if self.args.documentation:\n append_to_jazzy_category_dict(\n jazzy_cfg, 'Tags', '{}Tag'.format(fmt_class_prefix(data_type)))\n file_path = os.path.join(output_path_headers,\n class_name + '.h')\n with self.output_to_relative_path(file_path):\n self.emit_raw(base_file_comment)\n self._generate_union_class_h(data_type)\n else:\n raise TypeError('Can\\'t handle type %r' % type(data_type))\n file_path = os.path.join(\n output_path,\n 'DB{}Objects.m'.format(fmt_camel_upper(namespace.name)))\n with self.output_to_relative_path(file_path):\n self.emit_raw(base_file_comment)\n description = '/// Arguments, results, and errors for the `{}` namespace.'.format(\n fmt_camel_upper(namespace.name))\n self.emit(description)\n if self.args.exclude_from_analysis:\n self.emit()\n self.emit('\n for data_type in namespace.linearize_data_types():\n if is_struct_type(data_type):\n self._generate_struct_class_m(data_type)\n elif is_union_type(data_type):\n self._generate_union_class_m(data_type)\n if self.args.exclude_from_analysis:\n self.emit('", - "docstring": "Creates Obj C argument, error, serializer and deserializer types\n for the given namespace." - }, - { - "code": "def text(self):\n return self.template.format(name=self.name, type=self.type)", - "docstring": "Formatted param definition\n\n Equivalent to ``self.template.format(name=self.name, type=self.type)``." - }, - { - "code": "def detect_circle(nodes):\n if not isinstance(nodes, dict):\n raise TypeError('\"nodes\" must be a dictionary')\n dependencies = set(nodes.keys())\n traveled = []\n heads = _detect_circle(nodes, dependencies, traveled)\n return DependencyTree(heads)", - "docstring": "Wrapper for recursive _detect_circle function" - }, - { - "code": "def from_dict(cls, d):\n return Dos(d[\"efermi\"], d[\"energies\"],\n {Spin(int(k)): v\n for k, v in d[\"densities\"].items()})", - "docstring": "Returns Dos object from dict representation of Dos." - }, - { - "code": "def files(self):\n if not self.args.files and self.recursive:\n return ['.']\n else:\n return self.args.files", - "docstring": "A list of input sources. Each item can be a file path, a glob path or URL." - }, - { - "code": "def save_as_pdf(self, dest_path):\n dest_path = self._add_extension('pdf', dest_path)\n build_dir = tempfile.mkdtemp()\n build_path = os.path.join(build_dir, 'document.tex')\n self.save_assets(build_path)\n with open(build_path, 'w') as f:\n f.write(self.render())\n pdf_path = self._build_document(build_path)\n shutil.copyfile(pdf_path, dest_path)\n shutil.rmtree(build_dir)", - "docstring": "Save the plot as a PDF file.\n\n Save and render the plot using LaTeX to create a PDF file.\n\n :param dest_path: path of the file." - }, - { - "code": "def clean_up_network(network_id, **kwargs):\n user_id = kwargs.get('user_id')\n try:\n log.debug(\"Querying Network %s\", network_id)\n net_i = db.DBSession.query(Network).filter(Network.id == network_id).\\\n options(noload('scenarios')).options(noload('nodes')).options(noload('links')).options(noload('resourcegroups')).options(joinedload_all('types.templatetype.template')).one()\n net_i.attributes\n node_qry = db.DBSession.query(Node).filter(Node.network_id==network_id).filter(Node.status=='X').all()\n link_qry = db.DBSession.query(Link).filter(Link.network_id==network_id).filter(Link.status=='X').all()\n group_qry = db.DBSession.query(ResourceGroup).filter(ResourceGroup.network_id==network_id).filter(ResourceGroup.status=='X').all()\n scenario_qry = db.DBSession.query(Scenario).filter(Scenario.network_id==network_id).filter(Scenario.status=='X').all()\n for n in node_qry:\n db.DBSession.delete(n)\n for l in link_qry:\n db.DBSession.delete(l)\n for g in group_qry:\n db.DBSession.delete(g)\n for s in scenario_qry:\n db.DBSession.delete(s)\n except NoResultFound:\n raise ResourceNotFoundError(\"Network %s not found\"%(network_id))\n db.DBSession.flush()\n return 'OK'", - "docstring": "Purge any deleted nodes, links, resourcegroups and scenarios in a given network" - }, - { - "code": "def sort(self, key_or_list, direction=None):\n self.__check_okay_to_chain()\n keys = helpers._index_list(key_or_list, direction)\n self.__ordering = helpers._index_document(keys)\n return self", - "docstring": "Sorts this cursor's results.\n\n Pass a field name and a direction, either\n :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::\n\n for doc in collection.find().sort('field', pymongo.ASCENDING):\n print(doc)\n\n To sort by multiple fields, pass a list of (key, direction) pairs::\n\n for doc in collection.find().sort([\n ('field1', pymongo.ASCENDING),\n ('field2', pymongo.DESCENDING)]):\n print(doc)\n\n Beginning with MongoDB version 2.6, text search results can be\n sorted by relevance::\n\n cursor = db.test.find(\n {'$text': {'$search': 'some words'}},\n {'score': {'$meta': 'textScore'}})\n\n # Sort by 'score' field.\n cursor.sort([('score', {'$meta': 'textScore'})])\n\n for doc in cursor:\n print(doc)\n\n Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has\n already been used. Only the last :meth:`sort` applied to this\n cursor has any effect.\n\n :Parameters:\n - `key_or_list`: a single key or a list of (key, direction)\n pairs specifying the keys to sort on\n - `direction` (optional): only used if `key_or_list` is a single\n key, if not given :data:`~pymongo.ASCENDING` is assumed" - }, - { - "code": "def get_roaster_state(self):\n value = self._current_state.value\n if(value == b'\\x02\\x01'):\n return 'idle'\n elif(value == b'\\x04\\x04'):\n return 'cooling'\n elif(value == b'\\x08\\x01'):\n return 'sleeping'\n elif(value == b'\\x00\\x00' or value == b''):\n return 'connecting'\n elif(value == b'\\x04\\x02'):\n return 'roasting'\n else:\n return 'unknown'", - "docstring": "Returns a string based upon the current state of the roaster. Will\n raise an exception if the state is unknown.\n\n Returns:\n 'idle' if idle,\n 'sleeping' if sleeping,\n 'cooling' if cooling,\n 'roasting' if roasting,\n 'connecting' if in hardware connection phase,\n 'unknown' otherwise" - }, - { - "code": "def get_all_load_balancers(self, load_balancer_names=None):\n params = {}\n if load_balancer_names:\n self.build_list_params(params, load_balancer_names,\n 'LoadBalancerNames.member.%d')\n return self.get_list('DescribeLoadBalancers', params,\n [('member', LoadBalancer)])", - "docstring": "Retrieve all load balancers associated with your account.\n\n :type load_balancer_names: list\n :keyword load_balancer_names: An optional list of load balancer names.\n\n :rtype: :py:class:`boto.resultset.ResultSet`\n :return: A ResultSet containing instances of\n :class:`boto.ec2.elb.loadbalancer.LoadBalancer`" - }, - { - "code": "def xmoe2_v1_l4k_compressed_c4():\n hparams = xmoe2_v1_l4k()\n hparams.decoder_layers = [\n \"compressed_att\" if l == \"att\" else l for l in hparams.decoder_layers]\n hparams.compression_factor = 4\n return hparams", - "docstring": "With compressed attention." - }, - { - "code": "def reread(self):\n logger.debug(\"Loading settings from %s\",\n os.path.abspath(self.filename))\n conf = self.read_conf()\n changed = self.creds.reread()\n checks = self.parser.parse_checks(conf)\n if self.checks != checks:\n self.checks = checks\n return True\n else:\n return changed", - "docstring": "Read configuration file and substitute references into checks conf" - }, - { - "code": "def delta(self, signature):\n \"Generates delta for remote file via API using local file's signature.\"\n return self.api.post('path/sync/delta', self.path, signature=signature)", - "docstring": "Generates delta for remote file via API using local file's signature." - }, - { - "code": "def _try_resolve_sam_resource_id_refs(self, input, supported_resource_id_refs):\n if not self._is_intrinsic_dict(input):\n return input\n function_type = list(input.keys())[0]\n return self.supported_intrinsics[function_type].resolve_resource_id_refs(input, supported_resource_id_refs)", - "docstring": "Try to resolve SAM resource id references on the given template. If the given object looks like one of the\n supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input\n unmodified.\n\n :param dict input: Dictionary that may represent an intrinsic function\n :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones.\n :return: Modified input dictionary with id references resolved" - }, - { - "code": "def _RunAndWaitForVFSFileUpdate(self, path):\n client_id = rdf_client.GetClientURNFromPath(path)\n if client_id is None:\n return\n flow_utils.UpdateVFSFileAndWait(\n client_id,\n token=self.token,\n vfs_file_urn=self.root.Add(path),\n timeout=self.timeout)", - "docstring": "Runs a flow on the client, and waits for it to finish." - }, - { - "code": "def EndOfEventAction(self, event):\n self.log.debug('Processesing simulated event %d', event.GetEventID())\n docs = self.sd.getDocs()\n self.sd.clearDocs()\n for processor in self.processors:\n docs = processor.process(docs)\n if not docs:\n self.log.warning('%s did not return documents in process()!',\n processor.__class__.__name__)", - "docstring": "At the end of an event, grab sensitive detector hits then run processor loop" - }, - { - "code": "def check_data_flow_id(self, data_flow_id):\n if data_flow_id is not None:\n if data_flow_id in self._data_flows.keys():\n raise AttributeError(\"The data_flow id %s already exists. Cannot add data_flow!\", data_flow_id)\n else:\n data_flow_id = generate_data_flow_id()\n while data_flow_id in self._data_flows.keys():\n data_flow_id = generate_data_flow_id()\n return data_flow_id", - "docstring": "Check the data flow id and calculate a new one if its None\n\n :param data_flow_id: The data flow id to check\n :return: The new data flow id\n :raises exceptions.AttributeError: if data_flow.data_flow_id already exists" - }, - { - "code": "def timeseries(datasets):\n expanded_ds = []\n for ds in datasets:\n tmp = ds.expand_dims(\"time\")\n tmp.coords[\"time\"] = pd.DatetimeIndex([ds.attrs[\"start_time\"]])\n expanded_ds.append(tmp)\n res = xr.concat(expanded_ds, dim=\"time\")\n res.attrs = combine_metadata(*[x.attrs for x in expanded_ds])\n return res", - "docstring": "Expands dataset with and concats by time dimension" - }, - { - "code": "def add_http_endpoint(self, url, request_handler):\n self.app.router.add_route('*', url, request_handler)", - "docstring": "This method provides a programatic way of added invidual routes\n to the http server.\n\n Args:\n url (str): the url to be handled by the request_handler\n request_handler (nautilus.network.RequestHandler): The request handler" - }, - { - "code": "def copyFile(src, dest):\n try:\n if os.path.isfile(src):\n dpath, dfile = os.path.split(dest)\n if not os.path.isdir(dpath):\n os.makedirs(dpath)\n if not os.path.exists(dest):\n touch(dest)\n try:\n shutil.copy2(src, dest)\n except shutil.Error as e:\n logging.exception('Error: %s' % e)\n except IOError as e:\n logging.exception('Error: %s' % e.strerror)\n except:\n logging.exception('Error: src to copy does not exist.')", - "docstring": "Copies a source file to a destination whose path may not yet exist.\n\n Keyword arguments:\n src -- Source path to a file (string)\n dest -- Path for destination file (also a string)" - }, - { - "code": "def verify(self, type_):\n raw_missing, mistyped, mismatched = self._diff_signatures(type_)\n missing = []\n defaults_to_use = {}\n for name in raw_missing:\n try:\n defaults_to_use[name] = self._defaults[name].implementation\n except KeyError:\n missing.append(name)\n if not any((missing, mistyped, mismatched)):\n return defaults_to_use\n raise self._invalid_implementation(type_, missing, mistyped, mismatched)", - "docstring": "Check whether a type implements ``self``.\n\n Parameters\n ----------\n type_ : type\n The type to check.\n\n Raises\n ------\n TypeError\n If ``type_`` doesn't conform to our interface.\n\n Returns\n -------\n None" - }, - { - "code": "def create_address(kwargs=None, call=None):\n if call != 'function':\n raise SaltCloudSystemExit(\n 'The create_address function must be called with -f or --function.'\n )\n if not kwargs or 'name' not in kwargs:\n log.error(\n 'A name must be specified when creating an address.'\n )\n return False\n if 'region' not in kwargs:\n log.error(\n 'A region must be specified for the address.'\n )\n return False\n name = kwargs['name']\n ex_region = kwargs['region']\n ex_address = kwargs.get(\"address\", None)\n kwargs['region'] = _expand_region(kwargs['region'])\n conn = get_conn()\n __utils__['cloud.fire_event'](\n 'event',\n 'create address',\n 'salt/cloud/address/creating',\n args=salt.utils.data.simple_types_filter(kwargs),\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n addy = conn.ex_create_address(name, ex_region, ex_address)\n __utils__['cloud.fire_event'](\n 'event',\n 'created address',\n 'salt/cloud/address/created',\n args=salt.utils.data.simple_types_filter(kwargs),\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n log.info('Created GCE Address %s', name)\n return _expand_address(addy)", - "docstring": "Create a static address in a region.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP" - }, - { - "code": "def div(self, y):\n r\n y = np.asanyarray(y)\n if y.shape[0] != self.Ne:\n raise ValueError('First dimension must be the number of edges '\n 'G.Ne = {}, got {}.'.format(self.Ne, y.shape))\n return self.D.dot(y)", - "docstring": "r\"\"\"Compute the divergence of a signal defined on the edges.\n\n The divergence :math:`z` of a signal :math:`y` is defined as\n\n .. math:: z = \\operatorname{div}_\\mathcal{G} y = D y,\n\n where :math:`D` is the differential operator :attr:`D`.\n\n The value of the divergence on the vertex :math:`v_i` is\n\n .. math:: z[i] = \\sum_k D[i, k] y[k]\n = \\sum_{\\{k,j | e_k=(v_j, v_i) \\in \\mathcal{E}\\}}\n \\sqrt{\\frac{W[j, i]}{2}} y[k]\n - \\sum_{\\{k,j | e_k=(v_i, v_j) \\in \\mathcal{E}\\}}\n \\sqrt{\\frac{W[i, j]}{2}} y[k]\n\n for the combinatorial Laplacian, and\n\n .. math:: z[i] = \\sum_k D[i, k] y[k]\n = \\sum_{\\{k,j | e_k=(v_j, v_i) \\in \\mathcal{E}\\}}\n \\sqrt{\\frac{W[j, i]}{2 d[i]}} y[k]\n - \\sum_{\\{k,j | e_k=(v_i, v_j) \\in \\mathcal{E}\\}}\n \\sqrt{\\frac{W[i, j]}{2 d[i]}} y[k]\n\n for the normalized Laplacian.\n\n For undirected graphs, only half the edges are kept and the\n :math:`1/\\sqrt{2}` factor disappears from the above equations. See\n :meth:`compute_differential_operator` for details.\n\n Parameters\n ----------\n y : array_like\n Signal of length :attr:`n_edges` living on the edges.\n\n Returns\n -------\n z : ndarray\n Divergence signal of length :attr:`n_vertices` living on the\n vertices.\n\n See Also\n --------\n compute_differential_operator\n grad : compute the gradient of a vertex signal\n\n Examples\n --------\n\n Non-directed graph and combinatorial Laplacian:\n\n >>> graph = graphs.Path(4, directed=False, lap_type='combinatorial')\n >>> graph.compute_differential_operator()\n >>> graph.div([2, -2, 0])\n array([-2., 4., -2., 0.])\n\n Directed graph and combinatorial Laplacian:\n\n >>> graph = graphs.Path(4, directed=True, lap_type='combinatorial')\n >>> graph.compute_differential_operator()\n >>> graph.div([2, -2, 0])\n array([-1.41421356, 2.82842712, -1.41421356, 0. ])\n\n Non-directed graph and normalized Laplacian:\n\n >>> graph = graphs.Path(4, directed=False, lap_type='normalized')\n >>> graph.compute_differential_operator()\n >>> graph.div([2, -2, 0])\n array([-2. , 2.82842712, -1.41421356, 0. ])\n\n Directed graph and normalized Laplacian:\n\n >>> graph = graphs.Path(4, directed=True, lap_type='normalized')\n >>> graph.compute_differential_operator()\n >>> graph.div([2, -2, 0])\n array([-2. , 2.82842712, -1.41421356, 0. ])" - }, - { - "code": "def columnCount(self, qindex=QModelIndex()):\r\n if self.total_cols <= self.cols_loaded:\r\n return self.total_cols\r\n else:\r\n return self.cols_loaded", - "docstring": "Array column number" - }, - { - "code": "def each(self, callback):\n items = self.items\n for item in items:\n if callback(item) is False:\n break\n return self", - "docstring": "Execute a callback over each item.\n\n .. code::\n\n collection = Collection([1, 2, 3])\n collection.each(lambda x: x + 3)\n\n .. warning::\n\n It only applies the callback but does not modify the collection's items.\n Use the `transform() <#backpack.Collection.transform>`_ method to\n modify the collection.\n\n :param callback: The callback to execute\n :type callback: callable\n\n :rtype: Collection" - }, - { - "code": "def do_run(self, line):\n self._split_args(line, 0, 0)\n self._command_processor.get_operation_queue().execute()\n self._print_info_if_verbose(\n \"All operations in the write queue were successfully executed\"\n )", - "docstring": "run Perform each operation in the queue of write operations." - }, - { - "code": "def Flow(self, flow_id):\n return flow.FlowRef(\n client_id=self.client_id, flow_id=flow_id, context=self._context)", - "docstring": "Return a reference to a flow with a given id on this client." - }, - { - "code": "def stop(self, *args, **kwargs):\n if self.status in (Status.stopping, Status.stopped):\n logger.debug(\"{} is already {}\".format(self, self.status.name))\n else:\n self.status = Status.stopping\n self.onStopping(*args, **kwargs)\n self.status = Status.stopped", - "docstring": "Set the status to Status.stopping and also call `onStopping`\n with the provided args and kwargs." - }, - { - "code": "def pathparts(self):\n try:\n parts = self.parent.pathparts()\n parts.append(self.name)\n return parts\n except AttributeError:\n return []", - "docstring": "A list of the parts of the path, with the root node returning\n an empty list." - }, - { - "code": "def help_members(obj, use_other=False):\n r\n import utool as ut\n attrnames = dir(obj)\n attr_list = [getattr(obj, attrname) for attrname in attrnames]\n attr_types = ut.lmap(ut.type_str, map(type, attr_list))\n unique_types, groupxs = ut.group_indices(attr_types)\n type_to_items = ut.dzip(unique_types, ut.apply_grouping(attr_list, groupxs))\n type_to_itemname = ut.dzip(unique_types, ut.apply_grouping(attrnames, groupxs))\n memtypes = ['instancemethod']\n func_mems = ut.dict_subset(type_to_items, memtypes, [])\n func_list = ut.flatten(func_mems.values())\n defsig_list = []\n num_unbound_args_list = []\n num_args_list = []\n for func in func_list:\n argspec = ut.get_func_argspec(func)\n args = argspec.args\n unbound_args = get_unbound_args(argspec)\n defsig = ut.func_defsig(func)\n defsig_list.append(defsig)\n num_unbound_args_list.append(len(unbound_args))\n num_args_list.append(len(args))\n group = ut.hierarchical_group_items(defsig_list, [num_unbound_args_list, num_args_list])\n print(repr(obj))\n print(ut.repr3(group, strvals=True))\n if use_other:\n other_mems = ut.delete_keys(type_to_items.copy(), memtypes)\n other_mems_attrnames = ut.dict_subset(type_to_itemname, other_mems.keys())\n named_other_attrs = ut.dict_union_combine(other_mems_attrnames, other_mems, lambda x, y: list(zip(x, y)))\n print(ut.repr4(named_other_attrs, nl=2, strvals=True))", - "docstring": "r\"\"\"\n Inspects members of a class\n\n Args:\n obj (class or module):\n\n CommandLine:\n python -m utool.util_inspect help_members\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_inspect import * # NOQA\n >>> import utool as ut\n >>> obj = ut.DynStruct\n >>> result = help_members(obj)\n >>> print(result)" - }, - { - "code": "def bdd(*keywords):\n settings = _personal_settings().data\n _storybook().with_params(\n **{\"python version\": settings[\"params\"][\"python version\"]}\n ).only_uninherited().shortcut(*keywords).play()", - "docstring": "Run tests matching keywords." - }, - { - "code": "def to_nullable_boolean(value):\n if value == None:\n return None\n if type(value) == type(True):\n return value\n str_value = str(value).lower()\n if str_value in ['1', 'true', 't', 'yes', 'y']:\n return True\n if str_value in ['0', 'frue', 'f', 'no', 'n']:\n return False\n return None", - "docstring": "Converts value into boolean or returns None when conversion is not possible.\n\n :param value: the value to convert.\n\n :return: boolean value or None when convertion is not supported." - }, - { - "code": "def createElement(self, token):\n name = token[\"name\"]\n namespace = token.get(\"namespace\", self.defaultNamespace)\n element = self.elementClass(name, namespace)\n element.attributes = token[\"data\"]\n return element", - "docstring": "Create an element but don't insert it anywhere" - }, - { - "code": "def sections(self):\n sections = []\n for match in texutils.section_pattern.finditer(self.text):\n textbefore = self.text[0:match.start()]\n wordsbefore = nlputils.wordify(textbefore)\n numwordsbefore = len(wordsbefore)\n sections.append((numwordsbefore, match.group(1)))\n self._sections = sections\n return sections", - "docstring": "List with tuples of section names and positions.\n Positions of section names are measured by cumulative word count." - }, - { - "code": "def relabel(self, catalogue):\n for work, label in catalogue.items():\n self._matches.loc[self._matches[constants.WORK_FIELDNAME] == work,\n constants.LABEL_FIELDNAME] = label", - "docstring": "Relabels results rows according to `catalogue`.\n\n A row whose work is labelled in the catalogue will have its\n label set to the label in the catalogue. Rows whose works are\n not labelled in the catalogue will be unchanged.\n\n :param catalogue: mapping of work names to labels\n :type catalogue: `Catalogue`" - }, - { - "code": "def _find_usage_dynamodb(self):\n table_count = 0\n region_read_capacity = 0\n region_write_capacity = 0\n logger.debug(\"Getting usage for DynamoDB tables\")\n for table in self.resource_conn.tables.all():\n table_count += 1\n gsi_write = 0\n gsi_read = 0\n gsi_count = 0\n if table.global_secondary_indexes is not None:\n for gsi in table.global_secondary_indexes:\n gsi_count += 1\n gsi_read += gsi['ProvisionedThroughput'][\n 'ReadCapacityUnits']\n gsi_write += gsi['ProvisionedThroughput'][\n 'WriteCapacityUnits']\n table_write_capacity = table.provisioned_throughput[\n 'WriteCapacityUnits'] + gsi_write\n table_read_capacity = table.provisioned_throughput[\n 'ReadCapacityUnits'] + gsi_read\n region_write_capacity += table_write_capacity\n region_read_capacity += table_read_capacity\n self.limits['Global Secondary Indexes']._add_current_usage(\n gsi_count,\n resource_id=table.name,\n aws_type='AWS::DynamoDB::Table'\n )\n self.limits['Local Secondary Indexes']._add_current_usage(\n len(table.local_secondary_indexes)\n if table.local_secondary_indexes is not None else 0,\n resource_id=table.name,\n aws_type='AWS::DynamoDB::Table'\n )\n self.limits['Table Max Write Capacity Units']._add_current_usage(\n table_write_capacity,\n resource_id=table.name,\n aws_type='AWS::DynamoDB::Table'\n )\n self.limits['Table Max Read Capacity Units']._add_current_usage(\n table_read_capacity,\n resource_id=table.name,\n aws_type='AWS::DynamoDB::Table'\n )\n self.limits['Tables Per Region']._add_current_usage(\n table_count,\n aws_type='AWS::DynamoDB::Table'\n )\n self.limits['Account Max Write Capacity Units']._add_current_usage(\n region_write_capacity,\n aws_type='AWS::DynamoDB::Table'\n )\n self.limits['Account Max Read Capacity Units']._add_current_usage(\n region_read_capacity,\n aws_type='AWS::DynamoDB::Table'\n )", - "docstring": "calculates current usage for all DynamoDB limits" - }, - { - "code": "def load_precision(filename):\n path = _find_file(filename)\n r = dict()\n with open(path, 'r') as f:\n exec(f.read(), {}, r)\n return r", - "docstring": "Load a CLASS precision file into a dictionary.\n\n Parameters\n ----------\n filename : str\n the name of an existing file to load, or one in the files included\n as part of the CLASS source\n\n Returns\n -------\n dict :\n the precision parameters loaded from file" - }, - { - "code": "def Read(self, meta_only=False, allowed=None, cast=False):\n if allowed is not None and not isinstance(allowed, (list, tuple)):\n raise RuntimeError('`allowed` must be a list of str names.')\n meta = xmltodict.parse(\n open(self.filename, 'r').read()\n ).get('espa_metadata')\n bands = meta.get('bands').get('band')\n del(meta['bands'])\n if not isinstance(bands, (list)):\n bands = [bands]\n meta = self.CleanDict(meta)\n ras = SetProperties(RasterSet, meta)\n if allowed is not None:\n for k in list(self.bdict.keys()):\n if k not in allowed:\n del(self.bdict[k])\n for i in range(len(bands)):\n info = self.GenerateBand(bands[i], meta_only=True, cast=cast)\n if allowed is not None and info.name not in allowed:\n continue\n if info.name not in self.bdict.keys() or self.bdict[info.name].data is None:\n b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast)\n self.bdict[b.name] = b\n elif cast and self.bdict[info.name].data.dtype != np.float32:\n b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast)\n self.bdict[b.name] = b\n elif not cast and self.bdict[info.name].data.dtype == np.float32:\n b = self.GenerateBand(bands[i], meta_only=meta_only, cast=cast)\n self.bdict[b.name] = b\n ras.bands = self.bdict\n if not meta_only:\n ras.validate()\n return ras", - "docstring": "Read the ESPA XML metadata file" - }, - { - "code": "def parse_masked_phone_number(html, parser=None):\n if parser is None:\n parser = bs4.BeautifulSoup(html, 'html.parser')\n fields = parser.find_all('span', {'class': 'field_prefix'})\n if not fields:\n raise VkParseError(\n 'No ... in the \\n%s' % html)\n result = []\n for f in fields:\n value = f.get_text().replace(six.u('\\xa0'), '')\n result.append(value)\n return tuple(result)", - "docstring": "Get masked phone number from security check html\n\n :param html: str: raw html text\n :param parser: bs4.BeautifulSoup: html parser\n :return: tuple of phone prefix and suffix, for example: ('+1234', '89')\n :rtype : tuple" - }, - { - "code": "def register_components(self):\n unregistered_components = []\n for path in self.paths:\n for file in foundations.walkers.files_walker(path, (\"\\.{0}$\".format(self.__extension),), (\"\\._\",)):\n if not self.register_component(file):\n unregistered_components.append(file)\n if not unregistered_components:\n return True\n else:\n raise manager.exceptions.ComponentRegistrationError(\n \"{0} | '{1}' Components failed to register!\".format(self.__class__.__name__,\n \", \".join(unregistered_components)))", - "docstring": "Registers the Components.\n\n Usage::\n\n >>> manager = Manager((\"./manager/tests/tests_manager/resources/components/core\",))\n >>> manager.register_components()\n True\n >>> manager.components.keys()\n [u'core.tests_component_a', u'core.tests_component_b']\n\n :return: Method success.\n :rtype: bool" - }, - { - "code": "def add_group(self, group_attribs=None, parent=None):\n if parent is None:\n parent = self.tree.getroot()\n elif not self.contains_group(parent):\n warnings.warn('The requested group {0} does not belong to '\n 'this Document'.format(parent))\n if group_attribs is None:\n group_attribs = {}\n else:\n group_attribs = group_attribs.copy()\n return SubElement(parent, '{{{0}}}g'.format(\n SVG_NAMESPACE['svg']), group_attribs)", - "docstring": "Add an empty group element to the SVG." - }, - { - "code": "def validate(self):\n for context_name in self.context_names:\n context = self.context(context_name)\n try:\n context.validate()\n except ResolvedContextError as e:\n raise SuiteError(\"Error in context %r: %s\"\n % (context_name, str(e)))", - "docstring": "Validate the suite." - }, - { - "code": "def get_configdir(name):\n configdir = os.environ.get('%sCONFIGDIR' % name.upper())\n if configdir is not None:\n return os.path.abspath(configdir)\n p = None\n h = _get_home()\n if ((sys.platform.startswith('linux') or\n sys.platform.startswith('darwin')) and h is not None):\n p = os.path.join(h, '.config/' + name)\n elif h is not None:\n p = os.path.join(h, '.' + name)\n if not os.path.exists(p):\n os.makedirs(p)\n return p", - "docstring": "Return the string representing the configuration directory.\n\n The directory is chosen as follows:\n\n 1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied,\n choose that.\n\n 2a. On Linux, choose `$HOME/.config`.\n\n 2b. On other platforms, choose `$HOME/.matplotlib`.\n\n 3. If the chosen directory exists, use that as the\n configuration directory.\n 4. A directory: return None.\n\n Notes\n -----\n This function is taken from the matplotlib [1] module\n\n References\n ----------\n [1]: http://matplotlib.org/api/" - }, - { - "code": "def _set_commands(package_names):\n commands = {}\n for pkg_name in package_names:\n cmd_name = pkg_name.split('.')[-1]\n commands[cmd_name] = pkg_name\n return commands", - "docstring": "Extract the command name from package name. Last part of the module path is the command\n ie. if path is foo.bar.baz, then \"baz\" is the command name.\n\n :param package_names: List of package names\n :return: Dictionary with command name as key and the package name as value." - }, - { - "code": "def _create_record(self, rtype, name, content):\n existing_records = self._list_records(rtype, name, content)\n if len(existing_records) >= 1:\n return True\n record = {\n \"record_type\": rtype,\n \"name\": self._relative_name(name),\n \"content\": content,\n }\n if self._get_lexicon_option(\"ttl\"):\n record[\"ttl\"] = self._get_lexicon_option(\"ttl\")\n if self._get_lexicon_option(\"priority\"):\n record[\"prio\"] = self._get_lexicon_option(\"priority\")\n payload = self._post(\n \"/v1/domains/{0}/records\".format(self.domain),\n {\"record\": record},\n )\n status = \"id\" in payload.get(\"record\", {})\n LOGGER.debug(\"create_record: %s\", status)\n return status", - "docstring": "Create record if doesnt already exist with same content" - }, - { - "code": "def run_task(func):\n def _wrapped(*a, **k):\n gen = func(*a, **k)\n return _consume_task(gen)\n return _wrapped", - "docstring": "Decorator to collect and return generator results, returning a list\n if there are multiple results" - }, - { - "code": "def element_for_value(cls, attrname, value):\n if isinstance(value, Resource):\n if attrname in cls._classes_for_nodename:\n return value.to_element(attrname)\n return value.to_element()\n el = ElementTreeBuilder.Element(attrname)\n if value is None:\n el.attrib['nil'] = 'nil'\n elif isinstance(value, bool):\n el.attrib['type'] = 'boolean'\n el.text = 'true' if value else 'false'\n elif isinstance(value, int):\n el.attrib['type'] = 'integer'\n el.text = str(value)\n elif isinstance(value, datetime):\n el.attrib['type'] = 'datetime'\n el.text = value.strftime('%Y-%m-%dT%H:%M:%SZ')\n elif isinstance(value, list) or isinstance(value, tuple):\n for sub_resource in value:\n if hasattr(sub_resource, 'to_element'):\n el.append(sub_resource.to_element())\n else:\n el.append(cls.element_for_value(re.sub(r\"s$\", \"\", attrname), sub_resource))\n elif isinstance(value, Money):\n value.add_to_element(el)\n else:\n el.text = six.text_type(value)\n return el", - "docstring": "Serialize the given value into an XML `Element` with the\n given tag name, returning it.\n\n The value argument may be:\n * a `Resource` instance\n * a `Money` instance\n * a `datetime.datetime` instance\n * a string, integer, or boolean value\n * ``None``\n * a list or tuple of these values" - }, - { - "code": "def _check_obj_properties(self, pub, name=\"pub\"):\n if not hasattr(pub, \"indexes\"):\n raise InvalidType(\"`%s` doesn't have .indexes property!\" % name)\n if not pub.indexes:\n raise InvalidType(\"`%s.indexes` is not set!\" % name)\n if not hasattr(pub, \"project_key\"):\n raise InvalidType(\n \"`%s` doesn't have .project_key property!\" % name\n )\n if not pub.project_key:\n raise InvalidType(\"`%s.project_key` is not set!\" % name)", - "docstring": "Make sure, that `pub` has the right interface.\n\n Args:\n pub (obj): Instance which will be checked.\n name (str): Name of the instance. Used in exception. Default `pub`.\n\n Raises:\n InvalidType: When the `pub` is not instance of `obj_type`." - }, - { - "code": "def update_pipe_channel(self, uid, channel_name, label):\n pipe_group_name = _form_pipe_channel_name(channel_name)\n if self.channel_layer:\n current = self.channel_maps.get(uid, None)\n if current != pipe_group_name:\n if current:\n async_to_sync(self.channel_layer.group_discard)(current, self.channel_name)\n self.channel_maps[uid] = pipe_group_name\n async_to_sync(self.channel_layer.group_add)(pipe_group_name, self.channel_name)", - "docstring": "Update this consumer to listen on channel_name for the js widget associated with uid" - }, - { - "code": "def _find_current_phase(self, global_step):\n epoch_size = sum(phase.steps for phase in self._phases)\n epoch = int(global_step // epoch_size)\n steps_in = global_step % epoch_size\n for phase in self._phases:\n if steps_in < phase.steps:\n return phase, epoch, steps_in\n steps_in -= phase.steps", - "docstring": "Determine the current phase based on the global step.\n\n This ensures continuing the correct phase after restoring checkoints.\n\n Args:\n global_step: The global number of steps performed across all phases.\n\n Returns:\n Tuple of phase object, epoch number, and phase steps within the epoch." - }, - { - "code": "def from_file(f):\n if sys.hexversion >= 0x02030000:\n str_type = basestring\n opts = 'rU'\n else:\n str_type = str\n opts = 'r'\n if isinstance(f, str_type):\n f = file(f, opts)\n want_close = True\n else:\n want_close = False\n try:\n m = from_text(f)\n finally:\n if want_close:\n f.close()\n return m", - "docstring": "Read the next text format message from the specified file.\n\n @param f: file or string. If I{f} is a string, it is treated\n as the name of a file to open.\n @raises UnknownHeaderField:\n @raises dns.exception.SyntaxError:\n @rtype: dns.message.Message object" - }, - { - "code": "def last_address(self, skip_broadcast_address=True):\n\t\tbin_address = self.__address.bin_address()\n\t\tbin_address_length = len(bin_address)\n\t\tif self.__mask > (bin_address_length - 2):\n\t\t\tskip_broadcast_address = False\n\t\tfor i in range(bin_address_length - self.__mask):\n\t\t\tbin_address[self.__mask + i] = 1\n\t\tif skip_broadcast_address:\n\t\t\tbin_address[bin_address_length - 1] = 0\n\t\treturn WIPV4Address(bin_address)", - "docstring": "Return the last IP address of this network\n\n\t\t:param skip_broadcast_address: this flag specifies whether to skip the very last address (that is \\\n\t\tusually used as broadcast address) or not.\n\t\t:return: WIPV4Address" - }, - { - "code": "def all_replica_set_links(rs_id, rel_to=None):\n return [\n replica_set_link(rel, rs_id, self_rel=(rel == rel_to))\n for rel in (\n 'get-replica-set-info',\n 'delete-replica-set', 'replica-set-command',\n 'get-replica-set-members', 'add-replica-set-member',\n 'get-replica-set-secondaries', 'get-replica-set-primary',\n 'get-replica-set-arbiters', 'get-replica-set-hidden-members',\n 'get-replica-set-passive-members', 'get-replica-set-servers'\n )\n ]", - "docstring": "Get a list of all links to be included with replica sets." - }, - { - "code": "def remove_experiment(self, id):\n if id in self.experiments:\n self.experiments.pop(id)\n self.write_file()", - "docstring": "remove an experiment by id" - }, - { - "code": "def delete(self, key):\n super(DirectoryTreeDatastore, self).delete(key)\n str_key = str(key)\n if str_key == '/':\n return\n dir_key = key.parent.instance('directory')\n directory = self.directory(dir_key)\n if directory and str_key in directory:\n directory.remove(str_key)\n if len(directory) > 0:\n super(DirectoryTreeDatastore, self).put(dir_key, directory)\n else:\n super(DirectoryTreeDatastore, self).delete(dir_key)", - "docstring": "Removes the object named by `key`.\n DirectoryTreeDatastore removes the directory entry." - }, - { - "code": "def get_irradiance_value(self, month, day, hour):\n dt = DateTime(month, day, hour, leap_year=self.is_leap_year)\n count = int(dt.hoy * self.timestep)\n return self.direct_normal_irradiance[count], \\\n self.diffuse_horizontal_irradiance[count]", - "docstring": "Get direct and diffuse irradiance values for a point in time." - }, - { - "code": "def list_sizes(self, provider=None):\n mapper = salt.cloud.Map(self._opts_defaults())\n return salt.utils.data.simple_types_filter(\n mapper.size_list(provider)\n )", - "docstring": "List all available sizes in configured cloud systems" - }, - { - "code": "def absent(name, acl_type, acl_name='', perms='', recurse=False):\n ret = {'name': name,\n 'result': True,\n 'changes': {},\n 'comment': ''}\n if not os.path.exists(name):\n ret['comment'] = '{0} does not exist'.format(name)\n ret['result'] = False\n return ret\n __current_perms = __salt__['acl.getfacl'](name, recursive=recurse)\n if acl_type.startswith(('d:', 'default:')):\n _acl_type = ':'.join(acl_type.split(':')[1:])\n _current_perms = __current_perms[name].get('defaults', {})\n _default = True\n else:\n _acl_type = acl_type\n _current_perms = __current_perms[name]\n _default = False\n if acl_name == '':\n _search_name = __current_perms[name].get('comment').get(_acl_type, '')\n else:\n _search_name = acl_name\n if _current_perms.get(_acl_type, None) or _default:\n try:\n user = [i for i in _current_perms[_acl_type] if next(six.iterkeys(i)) == _search_name].pop()\n except (AttributeError, IndexError, StopIteration, KeyError):\n user = None\n need_refresh = False\n for path in __current_perms:\n acl_found = False\n for user_acl in __current_perms[path].get(_acl_type, []):\n if _search_name in user_acl:\n acl_found = True\n break\n if acl_found:\n need_refresh = True\n break\n if user or need_refresh:\n ret['comment'] = 'Removing permissions'\n if __opts__['test']:\n ret['result'] = None\n return ret\n __salt__['acl.delfacl'](acl_type, acl_name, perms, name, recursive=recurse)\n else:\n ret['comment'] = 'Permissions are in the desired state'\n else:\n ret['comment'] = 'ACL Type does not exist'\n ret['result'] = False\n return ret", - "docstring": "Ensure a Linux ACL does not exist\n\n name\n The acl path\n\n acl_type\n The type of the acl is used for, it can be 'user' or 'group'\n\n acl_names\n The user or group\n\n perms\n Remove the permissions eg.: rwx\n\n recurse\n Set the permissions recursive in the path" - }, - { - "code": "def conf():\n stanza = ''\n stanzas = []\n in_stanza = False\n ret = {}\n pos = 0\n try:\n with salt.utils.files.fopen(_detect_conf(), 'r') as _fp:\n for line in _fp:\n line = salt.utils.stringutils.to_unicode(line)\n if line.startswith('\n continue\n if line.startswith('\\n'):\n in_stanza = False\n if 'title' in stanza:\n stanza += 'order {0}'.format(pos)\n pos += 1\n stanzas.append(stanza)\n stanza = ''\n continue\n if line.strip().startswith('title'):\n if in_stanza:\n stanza += 'order {0}'.format(pos)\n pos += 1\n stanzas.append(stanza)\n stanza = ''\n else:\n in_stanza = True\n if in_stanza:\n stanza += line\n if not in_stanza:\n key, value = _parse_line(line)\n ret[key] = value\n if in_stanza:\n if not line.endswith('\\n'):\n line += '\\n'\n stanza += line\n stanza += 'order {0}'.format(pos)\n pos += 1\n stanzas.append(stanza)\n except (IOError, OSError) as exc:\n msg = \"Could not read grub config: {0}\"\n raise CommandExecutionError(msg.format(exc))\n ret['stanzas'] = []\n for stanza in stanzas:\n mydict = {}\n for line in stanza.strip().splitlines():\n key, value = _parse_line(line)\n mydict[key] = value\n ret['stanzas'].append(mydict)\n return ret", - "docstring": "Parse GRUB conf file\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' grub.conf" - }, - { - "code": "def _state_run(self):\n if self.opts['startup_states']:\n if self.opts.get('master_type', 'str') == 'disable' and \\\n self.opts.get('file_client', 'remote') == 'remote':\n log.warning(\n 'Cannot run startup_states when \\'master_type\\' is set '\n 'to \\'disable\\' and \\'file_client\\' is set to '\n '\\'remote\\'. Skipping.'\n )\n else:\n data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}\n if self.opts['startup_states'] == 'sls':\n data['fun'] = 'state.sls'\n data['arg'] = [self.opts['sls_list']]\n elif self.opts['startup_states'] == 'top':\n data['fun'] = 'state.top'\n data['arg'] = [self.opts['top_file']]\n else:\n data['fun'] = 'state.highstate'\n data['arg'] = []\n self._handle_decoded_payload(data)", - "docstring": "Execute a state run based on information set in the minion config file" - }, - { - "code": "def lifecycle_rules(self):\n info = self._properties.get(\"lifecycle\", {})\n for rule in info.get(\"rule\", ()):\n action_type = rule[\"action\"][\"type\"]\n if action_type == \"Delete\":\n yield LifecycleRuleDelete.from_api_repr(rule)\n elif action_type == \"SetStorageClass\":\n yield LifecycleRuleSetStorageClass.from_api_repr(rule)\n else:\n raise ValueError(\"Unknown lifecycle rule: {}\".format(rule))", - "docstring": "Retrieve or set lifecycle rules configured for this bucket.\n\n See https://cloud.google.com/storage/docs/lifecycle and\n https://cloud.google.com/storage/docs/json_api/v1/buckets\n\n .. note::\n\n The getter for this property returns a list which contains\n *copies* of the bucket's lifecycle rules mappings. Mutating the\n list or one of its dicts has no effect unless you then re-assign\n the dict via the setter. E.g.:\n\n >>> rules = bucket.lifecycle_rules\n >>> rules.append({'origin': '/foo', ...})\n >>> rules[1]['rule']['action']['type'] = 'Delete'\n >>> del rules[0]\n >>> bucket.lifecycle_rules = rules\n >>> bucket.update()\n\n :setter: Set lifestyle rules for this bucket.\n :getter: Gets the lifestyle rules for this bucket.\n\n :rtype: generator(dict)\n :returns: A sequence of mappings describing each lifecycle rule." - }, - { - "code": "def sanitize(url, config):\n if url.scheme != \"mysql\":\n raise ValueError(\"Unsupported database type: '%s'\" % (url.scheme,))\n args, env = get_mysqldump_args_and_env_from_url(url=url)\n process = subprocess.Popen(\n args=[\"mysqldump\"] + args,\n env=env,\n stdout=subprocess.PIPE,\n )\n return sanitize_from_stream(stream=process.stdout, config=config)", - "docstring": "Obtains dump of MySQL database by executing `mysqldump` command and\n sanitizes it output.\n\n :param url: URL to the database which is going to be sanitized, parsed by\n Python's URL parser.\n :type url: urllib.urlparse.ParseResult\n\n :param config: Optional sanitizer configuration to be used for sanitation\n of the values stored in the database.\n :type config: database_sanitizer.config.Configuration|None" - }, - { - "code": "def _parse_table_list_response(self, list_response):\n tables = defaultdict(dict)\n for table in list_response.get('tables', []):\n table_ref = table.get('tableReference')\n if not table_ref:\n continue\n table_id = table_ref.get('tableId', '')\n year_month, app_id = self._parse_table_name(table_id)\n if not year_month:\n continue\n table_date = datetime.strptime(year_month, '%Y-%m')\n unix_seconds = calendar.timegm(table_date.timetuple())\n tables[app_id].update({table_id: unix_seconds})\n tables.default_factory = None\n return tables", - "docstring": "Parse the response received from calling list on tables.\n\n Parameters\n ----------\n list_response\n The response found by calling list on a BigQuery table object.\n\n Returns\n -------\n dict\n Dates referenced by table names" - }, - { - "code": "def with_args(self, **kwargs):\n new_info = mutablerecords.CopyRecord(self)\n new_info.options = new_info.options.format_strings(**kwargs)\n new_info.extra_kwargs.update(kwargs)\n new_info.measurements = [m.with_args(**kwargs) for m in self.measurements]\n return new_info", - "docstring": "Send these keyword-arguments to the phase when called." - }, - { - "code": "def register(self, subscriber):\r\n assert isinstance(subscriber, RequestHandler)\r\n logger.debug('New subscriber')\r\n self.subscribers.add(subscriber)", - "docstring": "Register a new subscriber. This method should be invoked by\r\n listeners to start receiving messages." - }, - { - "code": "def get_command_output(self, shell_id, command_id):\n stdout_buffer, stderr_buffer = [], []\n command_done = False\n while not command_done:\n try:\n stdout, stderr, return_code, command_done = \\\n self._raw_get_command_output(shell_id, command_id)\n stdout_buffer.append(stdout)\n stderr_buffer.append(stderr)\n except WinRMOperationTimeoutError as e:\n pass\n return b''.join(stdout_buffer), b''.join(stderr_buffer), return_code", - "docstring": "Get the Output of the given shell and command\n @param string shell_id: The shell id on the remote machine.\n See #open_shell\n @param string command_id: The command id on the remote machine.\n See #run_command\n #@return [Hash] Returns a Hash with a key :exitcode and :data.\n Data is an Array of Hashes where the cooresponding key\n # is either :stdout or :stderr. The reason it is in an Array so so\n we can get the output in the order it ocurrs on\n # the console." - }, - { - "code": "def to_dict(self, converter=None):\n if converter is None:\n converter = self.stringify\n out = dict()\n for k, v in self.iteritems():\n out[k] = converter(v)\n return out", - "docstring": "Returns a copy dict of the current object\n\n If a converter function is given, pass each value to it.\n Per default the values are converted by `self.stringify`." - }, - { - "code": "def iobject_import(self,\n id_and_rev_info,\n elt_name,\n obj_dict,\n markings=None,\n cybox_id=None):\n iobject_type_ns = None\n if ('@xsi:type' in obj_dict or\n '@@embedded_type_info' in obj_dict or\n '@xsi:type' in obj_dict.get('Properties', {}) or\n '@xsi:type' in obj_dict.get('Defined_Object', {}) ):\n if '@xsi:type' in obj_dict:\n iobject_type_ns = obj_dict['@xsi:type'].split(':')[0]\n elif '@xsi:type' in obj_dict.get('Properties', {}):\n iobject_type_ns = obj_dict['Properties']['@xsi:type'].split(':')[0]\n elif '@xsi:type' in obj_dict.get('Defined_Object', {}):\n iobject_type_ns = obj_dict['Defined_Object']['@xsi:type'].split(':')[0]\n else:\n iobject_type_ns = obj_dict['@@embedded_type_info']\n type_info = self.derive_iobject_type(obj_dict['@@ns'], iobject_type_ns, elt_name)\n if (not 'id' in id_and_rev_info or not id_and_rev_info['id']) and (not 'id_ns' in id_and_rev_info):\n logger.info(\"Object of type %s without id information encountered, skipping\" % elt_name)\n return\n if 'id_ns' in id_and_rev_info:\n namespace_uri = id_and_rev_info['id_ns']\n uid = id_and_rev_info['id_uid']\n else:\n (namespace, namespace_uri, uid) = self.split_qname(id_and_rev_info['id'])\n object_timestamp = id_and_rev_info.get('timestamp',None)\n if not object_timestamp:\n object_timestamp = self.default_timestamp\n (info_obj, existed) = MantisImporter.create_iobject(iobject_family_name=type_info['iobject_family_name'],\n iobject_family_revision_name=type_info[\n 'iobject_family_revision_name'],\n iobject_type_name=type_info['iobject_type_name'],\n iobject_type_namespace_uri=type_info[\n 'iobject_type_namespace_uri'],\n iobject_type_revision_name=type_info[\n 'iobject_type_revision_name'],\n iobject_data=obj_dict,\n uid=uid,\n identifier_ns_uri=namespace_uri,\n timestamp= object_timestamp,\n create_timestamp=self.create_timestamp,\n markings=markings,\n config_hooks={\n 'special_ft_handler': self.fact_handler_list(),\n 'datatype_extractor': self.cybox_datatype_extractor,\n 'attr_ignore_predicate': self.attr_ignore_predicate,\n 'force_nonleaf_fact_predicate': self.force_nonleaf_fact_predicate},\n namespace_dict=self.namespace_dict,\n )\n return (info_obj, existed)", - "docstring": "Derives InfoObjectType and import InfoObjectType" - }, - { - "code": "def clean_translated_locales(configuration, langs=None):\n if not langs:\n langs = configuration.translated_locales\n for locale in langs:\n clean_locale(configuration, locale)", - "docstring": "Strips out the warning from all translated po files\n about being an English source file." - }, - { - "code": "def ensureFulltextIndex(self, fields, minLength = None) :\n data = {\n \"type\" : \"fulltext\",\n \"fields\" : fields,\n }\n if minLength is not None :\n data[\"minLength\"] = minLength\n ind = Index(self, creationData = data)\n self.indexes[\"fulltext\"][ind.infos[\"id\"]] = ind\n return ind", - "docstring": "Creates a fulltext index if it does not already exist, and returns it" - }, - { - "code": "def binder_spec_from_github_url(github_url):\n tokens = re.split(r'/|:', github_url.replace('.git', ''))\n return '{}/{}/master'.format(tokens[-2], tokens[-1])", - "docstring": "Converts GitHub origin into a Binder spec.\n\n For example:\n git@github.com:SamLau95/nbinteract.git -> SamLau95/nbinteract/master\n https://github.com/Calebs97/riemann_book -> Calebs97/riemann_book/master" - }, - { - "code": "def _read_and_exec_opcode(self, ident=0, expect=None):\n position = self.object_stream.tell()\n (opid,) = self._readStruct(\">B\")\n log_debug(\n \"OpCode: 0x{0:X} -- {1} (at offset 0x{2:X})\".format(\n opid, OpCodeDebug.op_id(opid), position\n ),\n ident,\n )\n if expect and opid not in expect:\n raise IOError(\n \"Unexpected opcode 0x{0:X} -- {1} (at offset 0x{2:X})\".format(\n opid, OpCodeDebug.op_id(opid), position\n )\n )\n try:\n handler = self.opmap[opid]\n except KeyError:\n raise RuntimeError(\n \"Unknown OpCode in the stream: 0x{0:X} (at offset 0x{1:X})\".format(\n opid, position\n )\n )\n else:\n return opid, handler(ident=ident)", - "docstring": "Reads the next opcode, and executes its handler\n\n :param ident: Log identation level\n :param expect: A list of expected opcodes\n :return: A tuple: (opcode, result of the handler)\n :raise IOError: Read opcode is not one of the expected ones\n :raise RuntimeError: Unknown opcode" - }, - { - "code": "def round_linestring_coords(ls, precision):\n return LineString([[round(x, precision) for x in c] for c in ls.coords])", - "docstring": "Round the coordinates of a shapely LineString to some decimal precision.\n\n Parameters\n ----------\n ls : shapely LineString\n the LineString to round the coordinates of\n precision : int\n decimal precision to round coordinates to\n\n Returns\n -------\n LineString" - }, - { - "code": "def get_db_instance_info(self, dbid):\n if not self.connect_to_aws_rds():\n return False\n try:\n instances = self.rdsc.describe_db_instances(dbid).get('DBInstances')\n except:\n return False\n else:\n myinstance = instances[0]\n return myinstance", - "docstring": "Get DB instance info" - }, - { - "code": "def getdict(locale):\n global zhcdicts, dict_zhcn, dict_zhsg, dict_zhtw, dict_zhhk, pfsdict\n if zhcdicts is None:\n loaddict(DICTIONARY)\n if locale == 'zh-cn':\n if dict_zhcn:\n got = dict_zhcn\n else:\n dict_zhcn = zhcdicts['zh2Hans'].copy()\n dict_zhcn.update(zhcdicts['zh2CN'])\n got = dict_zhcn\n elif locale == 'zh-tw':\n if dict_zhtw:\n got = dict_zhtw\n else:\n dict_zhtw = zhcdicts['zh2Hant'].copy()\n dict_zhtw.update(zhcdicts['zh2TW'])\n got = dict_zhtw\n elif locale == 'zh-hk' or locale == 'zh-mo':\n if dict_zhhk:\n got = dict_zhhk\n else:\n dict_zhhk = zhcdicts['zh2Hant'].copy()\n dict_zhhk.update(zhcdicts['zh2HK'])\n got = dict_zhhk\n elif locale == 'zh-sg' or locale == 'zh-my':\n if dict_zhsg:\n got = dict_zhsg\n else:\n dict_zhsg = zhcdicts['zh2Hans'].copy()\n dict_zhsg.update(zhcdicts['zh2SG'])\n got = dict_zhsg\n elif locale == 'zh-hans':\n got = zhcdicts['zh2Hans']\n elif locale == 'zh-hant':\n got = zhcdicts['zh2Hant']\n else:\n got = {}\n if locale not in pfsdict:\n pfsdict[locale] = getpfset(got)\n return got", - "docstring": "Generate or get convertion dict cache for certain locale.\n Dictionaries are loaded on demand." - }, - { - "code": "def parse_xml_node(self, node):\n self.group_id = node.getAttributeNS(RTS_NS, 'groupId')\n self._members = []\n for c in node.getElementsByTagNameNS(RTS_NS, 'Members'):\n self._members.append(TargetComponent().parse_xml_node(c))\n return self", - "docstring": "Parse an xml.dom Node object representing a component group into\n this object." - }, - { - "code": "def exec_script(scriptfilename, *args):\n if scriptfilename != os.path.basename(scriptfilename):\n raise SystemError(\"To prevent missuse, the script passed to \"\n \"hookutils.exec-script must be located in \"\n \"the `hooks` directory.\")\n cmd = [os.path.join(os.path.dirname(__file__), scriptfilename)]\n cmd.extend(args)\n return __exec_python_cmd(cmd)", - "docstring": "Executes a Python script in an externally spawned interpreter, and\n returns anything that was emitted in the standard output as a\n single string.\n\n To prevent missuse, the script passed to hookutils.exec-script\n must be located in the `hooks` directory." - }, - { - "code": "def create_design_matrix_2(Z, data, Y_len, lag_no):\n row_count = 1\n for lag in range(1, lag_no+1):\n for reg in range(Y_len):\n Z[row_count, :] = data[reg][(lag_no-lag):-lag]\n row_count += 1\n return Z", - "docstring": "For Python 2.7 - cythonized version only works for 3.5" - }, - { - "code": "def _apply_style(self):\n ttk.Frame.configure(self, style=self._style_name + \".TFrame\")\n self.label.configure(style=self._style_name + \".TLabel\")\n bg = self.style.lookup('TFrame', 'background', default='light grey')\n for label in self.ticklabels:\n label.configure(style=self._style_name + \".TLabel\")\n self.style.configure(self._style_name + \".TFrame\",\n background=self.style.lookup(self._style_name,\n 'background',\n default=bg))\n self.style.map(self._style_name + \".TFrame\",\n background=self.style.map(self._style_name, 'background'))\n self.style.configure(self._style_name + \".TLabel\",\n font=self.style.lookup(self._style_name, 'font', default='TkDefaultFont'),\n background=self.style.lookup(self._style_name, 'background', default=bg),\n foreground=self.style.lookup(self._style_name, 'foreground', default='black'))\n self.style.map(self._style_name + \".TLabel\",\n font=self.style.map(self._style_name, 'font'),\n background=self.style.map(self._style_name, 'background'),\n foreground=self.style.map(self._style_name, 'foreground'))", - "docstring": "Apply the scale style to the frame and labels." - }, - { - "code": "def lock(self, block=True):\n\t\tself._locked = True\n\t\treturn self._lock.acquire(block)", - "docstring": "Lock connection from being used else where" - }, - { - "code": "def parse_headers(cls, msg):\n return list(email.parser.Parser().parsestr(msg).items())", - "docstring": "Parse HTTP headers.\n\n Args:\n msg (str): HTTP message.\n\n Returns:\n (List[Tuple[str, str]): List of header tuples." - }, - { - "code": "def newResponseEvent(self):\n respEvt = ResponseEvent()\n self.respLock.acquire()\n eid = id(respEvt)\n self.respEvents[eid] = respEvt\n self.respLock.release()\n return (respEvt,eid)", - "docstring": "creates a response event and adds it to a waiting list\n When the reponse arrives it will be removed from the list." - }, - { - "code": "def CheckBreakpointsExpiration(self):\n with self._lock:\n current_time = BreakpointsManager.GetCurrentTime()\n if self._next_expiration > current_time:\n return\n expired_breakpoints = []\n self._next_expiration = datetime.max\n for breakpoint in six.itervalues(self._active):\n expiration_time = breakpoint.GetExpirationTime()\n if expiration_time <= current_time:\n expired_breakpoints.append(breakpoint)\n else:\n self._next_expiration = min(self._next_expiration, expiration_time)\n for breakpoint in expired_breakpoints:\n breakpoint.ExpireBreakpoint()", - "docstring": "Completes all breakpoints that have been active for too long." - }, - { - "code": "def _value_from_label(self, label):\n unser_val = (label.rel_strength.value, label.meta)\n return cbor.dumps(unser_val)", - "docstring": "Convert a label into a kvl value." - }, - { - "code": "def show_abierrors(self, nids=None, stream=sys.stdout):\n lines = []\n app = lines.append\n for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):\n header = \"=== \" + task.qout_file.path + \"===\"\n app(header)\n report = task.get_event_report()\n if report is not None:\n app(\"num_errors: %s, num_warnings: %s, num_comments: %s\" % (\n report.num_errors, report.num_warnings, report.num_comments))\n app(\"*** ERRORS ***\")\n app(\"\\n\".join(str(e) for e in report.errors))\n app(\"*** BUGS ***\")\n app(\"\\n\".join(str(b) for b in report.bugs))\n else:\n app(\"get_envent_report returned None!\")\n app(\"=\" * len(header) + 2*\"\\n\")\n return stream.writelines(lines)", - "docstring": "Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.\n\n Args:\n nids: optional list of node identifiers used to filter the tasks.\n stream: File-like object. Default: sys.stdout" - }, - { - "code": "def _download_urls(url_list, storage_folder, overwrite_existing,\n meta_handler, access_cookie=None):\n for url in url_list:\n filename = os.path.basename(url)\n if not overwrite_existing and filename in os.listdir(storage_folder):\n continue\n storage_file = os.path.join(storage_folder, filename)\n req = requests.post(url, stream=True, cookies=access_cookie)\n with open(storage_file, 'wb') as lf:\n for chunk in req.iter_content(1024*5):\n lf.write(chunk)\n meta_handler._add_fileio('Downloaded {} to {}'.format(url, filename))\n meta_handler.save()\n return meta_handler", - "docstring": "Save url from url_list to storage_folder\n\n Parameters\n ----------\n url_list: list of str\n Valid url to download\n\n storage_folder: str, valid path\n Location to store the download, folder will be created if\n not existing. If the file is already present in the folder,\n the download depends on the setting in 'overwrite_existing'.\n\n overwrite_existing: boolean, optional\n If False, skip download of file already existing in\n the storage folder (default). Set to True to replace\n files.\n\n meta_handler: instance of MRIOMetaData\n\n Returns\n -------\n\n The meta_handler is passed back" - }, - { - "code": "def set_itunes_complete(self):\n try:\n self.itunes_complete = self.soup.find('itunes:complete').string\n self.itunes_complete = self.itunes_complete.lower()\n except AttributeError:\n self.itunes_complete = None", - "docstring": "Parses complete from itunes tags and sets value" - }, - { - "code": "def __pathToTuple(self, path):\n if not path or path.count('/') > 2:\n raise YTFS.PathConvertError(\"Bad path given\")\n try:\n split = path.split('/')\n except (AttributeError, TypeError):\n raise TypeError(\"Path has to be string\")\n if split[0]:\n raise YTFS.PathConvertError(\"Path needs to start with '/'\")\n del split[0]\n try:\n if not split[-1]: split.pop()\n except IndexError:\n raise YTFS.PathConvertError(\"Bad path given\")\n if len(split) > 2:\n raise YTFS.PathConvertError(\"Path is too deep. Max allowed level is 2\")\n try:\n d = split[0]\n except IndexError:\n d = None\n try:\n f = split[1]\n except IndexError:\n f = None\n if not d and f:\n raise YTFS.PathConvertError(\"Bad path given\")\n return (d, f)", - "docstring": "Convert directory or file path to its tuple identifier.\n\n Parameters\n ----------\n path : str\n Path to convert. It can look like /, /directory, /directory/ or /directory/filename.\n\n Returns\n -------\n tup_id : tuple\n Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main\n directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`\n will be ``None``.\n\n Raises\n ------\n YTFS.PathConvertError\n When invalid path is given." - }, - { - "code": "def face_normals(self, values):\n if values is not None:\n values = np.asanyarray(values,\n order='C',\n dtype=np.float64)\n nonzero = np.logical_or(values > tol.merge,\n values < -tol.merge)\n if not nonzero.any():\n log.warning('face_normals all zero, ignoring!')\n return\n check, valid = triangles.normals(\n self.vertices.view(np.ndarray)[self.faces[:20]])\n compare = np.zeros((len(valid), 3))\n compare[valid] = check\n if not np.allclose(compare, values[:20]):\n log.warning('face_normals didn\\'t match triangles, ignoring!')\n return\n self._cache['face_normals'] = values", - "docstring": "Assign values to face normals.\n\n Parameters\n -------------\n values : (len(self.faces), 3) float\n Unit face normals" - }, - { - "code": "def remove_entities(self, entity_ids):\n if len(entity_ids) == 0:\n return\n keep = np.ones(len(self.entities))\n keep[entity_ids] = False\n self.entities = self.entities[keep]", - "docstring": "Remove entities by index.\n\n Parameters\n -----------\n entity_ids : (n,) int\n Indexes of self.entities to remove" - }, - { - "code": "def compare_view(self, request, object_id, version_id, extra_context=None):\n opts = self.model._meta\n object_id = unquote(object_id)\n current = Version.objects.get_for_object_reference(self.model, object_id)[0]\n revision = Version.objects.get_for_object_reference(self.model, object_id).filter(id=version_id)[0]\n the_diff = make_diff(current, revision)\n context = {\n \"title\": _(\"Comparing current %(model)s with revision created %(date)s\") % {\n 'model': current,\n 'date' : get_date(revision),\n },\n \"opts\": opts,\n \"compare_list_url\": reverse(\"%s:%s_%s_comparelist\" % (self.admin_site.name, opts.app_label, opts.model_name),\n args=(quote(object_id),)),\n \"diff_list\": the_diff,\n }\n extra_context = extra_context or {}\n context.update(extra_context)\n return render(request, self.compare_template or self._get_template_list(\"compare.html\"),\n context)", - "docstring": "Actually compare two versions." - }, - { - "code": "def get_scanner_param_mandatory(self, param):\n assert isinstance(param, str)\n entry = self.scanner_params.get(param)\n if not entry:\n return False\n return entry.get('mandatory')", - "docstring": "Returns if a scanner parameter is mandatory." - }, - { - "code": "def buggy_div(request):\n a = float(request.GET.get('a', '0'))\n b = float(request.GET.get('b', '0'))\n return JsonResponse({'result': a / b})", - "docstring": "A buggy endpoint to perform division between query parameters a and b. It will fail if b is equal to 0 or\n either a or b are not float.\n\n :param request: request object\n :return:" - }, - { - "code": "def exit(self, message):\n self.output(message, normal=True, color=\"green\")\n sys.exit()", - "docstring": "outputs an exit message and exits\n\n :param message:\n The message to be outputed\n :type message:\n String\n\n :returns:\n void" - }, - { - "code": "def cleanup_dead_jobs():\n from .models import WooeyJob\n inspect = celery_app.control.inspect()\n active_tasks = {task['id'] for worker, tasks in six.iteritems(inspect.active()) for task in tasks}\n active_jobs = WooeyJob.objects.filter(status=WooeyJob.RUNNING)\n to_disable = set()\n for job in active_jobs:\n if job.celery_id not in active_tasks:\n to_disable.add(job.pk)\n WooeyJob.objects.filter(pk__in=to_disable).update(status=WooeyJob.FAILED)", - "docstring": "This cleans up jobs that have been marked as ran, but are not queue'd in celery. It is meant\n to cleanup jobs that have been lost due to a server crash or some other reason a job is\n in limbo." - }, - { - "code": "def _get_file(self, path, prepend=False):\n if prepend:\n path = os.path.join(self._dirname(), path)\n extracted = self._tar.extractfile(path)\n if extracted:\n return extracted\n raise DapFileError(('Could not read %s from %s, maybe it\\'s a directory,' +\n 'bad link or the dap file is corrupted') % (path, self.basename))", - "docstring": "Extracts a file from dap to a file-like object" - }, - { - "code": "def deprecated(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n warnings.warn(fn.__doc__.split('\\n')[0],\n category=DeprecationWarning, stacklevel=2)\n return fn(*args, **kwargs)\n return wrapper", - "docstring": "Mark a function as deprecated and warn the user on use." - }, - { - "code": "def toProtocolElement(self):\n gaContinuousSet = protocol.ContinuousSet()\n gaContinuousSet.id = self.getId()\n gaContinuousSet.dataset_id = self.getParentContainer().getId()\n gaContinuousSet.reference_set_id = pb.string(\n self._referenceSet.getId())\n gaContinuousSet.name = self._name\n gaContinuousSet.source_uri = self._sourceUri\n attributes = self.getAttributes()\n for key in attributes:\n gaContinuousSet.attributes.attr[key] \\\n .values.extend(protocol.encodeValue(attributes[key]))\n return gaContinuousSet", - "docstring": "Returns the representation of this ContinuousSet as the corresponding\n ProtocolElement." - }, - { - "code": "def match(self, **kwargs):\n if kwargs:\n if self.definition.get('model') is None:\n raise ValueError(\"match() with filter only available on relationships with a model\")\n output = process_filter_args(self.definition['model'], kwargs)\n if output:\n self.filters.append(output)\n return self", - "docstring": "Traverse relationships with properties matching the given parameters.\n\n e.g: `.match(price__lt=10)`\n\n :param kwargs: see `NodeSet.filter()` for syntax\n :return: self" - }, - { - "code": "def set_gamma_ramp(monitor, ramp):\n gammaramp = _GLFWgammaramp()\n gammaramp.wrap(ramp)\n _glfw.glfwSetGammaRamp(monitor, ctypes.pointer(gammaramp))", - "docstring": "Sets the current gamma ramp for the specified monitor.\n\n Wrapper for:\n void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp);" - }, - { - "code": "def geom_wh(geom):\n e = geom.GetEnvelope()\n h = e[1] - e[0]\n w = e[3] - e[2]\n return w, h", - "docstring": "Compute width and height of geometry in projected units" - }, - { - "code": "def zoom(self, factor, order=1, verbose=True):\n raise NotImplementedError\n import scipy.ndimage\n for axis in self._axes:\n axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)\n for channel in self.channels:\n channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)\n if verbose:\n print(\"data zoomed to new shape:\", self.shape)", - "docstring": "Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n generated/scipy.ndimage.interpolation.zoom.html\n\n Parameters\n ----------\n factor : float\n The number of points along each axis will increase by this factor.\n order : int (optional)\n The order of the spline used to interpolate onto new points.\n verbose : bool (optional)\n Toggle talkback. Default is True." - }, - { - "code": "def to_obj(self, wd=False, pack=False, relpath=None):\n obj = CommentedMap()\n if pack:\n obj['run'] = self.orig\n elif relpath is not None:\n if self.from_url:\n obj['run'] = self.run\n else:\n obj['run'] = os.path.relpath(self.run, relpath)\n elif wd:\n if self.from_url:\n obj['run'] = self.run\n else:\n obj['run'] = os.path.basename(self.run)\n else:\n obj['run'] = self.run\n obj['in'] = self.step_inputs\n obj['out'] = self.output_names\n if self.is_scattered:\n obj['scatter'] = self.scattered_inputs\n if self.scatter_method is not None:\n obj['scatterMethod'] = self.scatter_method\n return obj", - "docstring": "Return the step as an dict that can be written to a yaml file.\n\n Returns:\n dict: yaml representation of the step." - }, - { - "code": "def alignment(job, ids, input_args, sample):\n uuid, urls = sample\n work_dir = job.fileStore.getLocalTempDir()\n output_dir = input_args['output_dir']\n key_path = input_args['ssec']\n cores = multiprocessing.cpu_count()\n return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',\n 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')\n for url in urls:\n download_encrypted_file(work_dir, url, key_path, os.path.basename(url))\n docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]\n bwa_command = [\"jvivian/bwa\",\n \"mem\",\n \"-R\", \"@RG\\tID:{0}\\tPL:Illumina\\tSM:{0}\\tLB:KapaHyper\".format(uuid),\n \"-T\", str(0),\n \"-t\", str(cores),\n \"/data/ref.fa\"] + [os.path.join('/data/', os.path.basename(x)) for x in urls]\n bamsort_command = [\"jeltje/biobambam\",\n \"/usr/local/bin/bamsort\",\n \"inputformat=sam\",\n \"level=1\",\n \"inputthreads={}\".format(cores),\n \"outputthreads={}\".format(cores),\n \"calmdnm=1\",\n \"calmdnmrecompindetonly=1\",\n \"calmdnmreference=/data/ref.fa\",\n \"I=/data/{}\".format(uuid + '.sam')]\n with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:\n subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)\n with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:\n subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)\n ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))\n if input_args['s3_dir']:\n job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')\n if input_args['output_dir']:\n move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam'])", - "docstring": "Runs BWA and then Bamsort on the supplied fastqs for this sample\n\n Input1: Toil Job instance\n Input2: jobstore id dictionary\n Input3: Input arguments dictionary\n Input4: Sample tuple -- contains uuid and urls for the sample" - }, - { - "code": "def update_slugs(apps, schema_editor):\n Representative = apps.get_model(\"representatives\", \"Representative\")\n for rep in Representative.objects.all():\n rep.slug = '%s-%s' % (rep.slug, rep.birth_date)\n rep.save()", - "docstring": "Include birthdate in slugs" - }, - { - "code": "def register(self, cls, instance):\n if not issubclass(cls, DropletInterface):\n raise TypeError('Given class is not a NAZInterface subclass: %s'\n % cls)\n if not isinstance(instance, cls):\n raise TypeError('Given instance does not implement the class: %s'\n % instance)\n if instance.name in self.INSTANCES_BY_NAME:\n if self.INSTANCES_BY_NAME[instance.name] != instance:\n raise ValueError('Given name is registered '\n 'by other instance: %s' % instance.name)\n self.INSTANCES_BY_INTERFACE[cls].add(instance)\n self.INSTANCES_BY_NAME[instance.name] = instance", - "docstring": "Register the given instance as implementation for a class interface" - }, - { - "code": "def selected_fields(self):\n items = self.lstFields.selectedItems()\n if items and self.mode == MULTI_MODE:\n return [item.text() for item in items]\n elif items and self.mode == SINGLE_MODE:\n return items[0].text()\n else:\n return []", - "docstring": "Obtain the fields selected by user.\n\n :returns: Keyword of the selected field.\n :rtype: list, str" - }, - { - "code": "def set_preferred_names(self):\n if len(self.entries) == 0:\n self.log.error(\"WARNING: `entries` is empty, loading stubs\")\n self.load_stubs()\n task_str = self.get_current_task_str()\n for ni, oname in enumerate(pbar(self.entries, task_str)):\n name = self.add_entry(oname)\n self.entries[name].set_preferred_name()\n if self.args.travis and ni > self.TRAVIS_QUERY_LIMIT:\n break\n return", - "docstring": "Choose between each entries given name and its possible aliases for\n the best one." - }, - { - "code": "def read(self, entity=None, attrs=None, ignore=None, params=None):\n if attrs is None:\n attrs = self.read_json()\n if ignore is None:\n ignore = set()\n if 'parameters' in attrs:\n attrs['host_parameters_attributes'] = attrs.pop('parameters')\n else:\n ignore.add('host_parameters_attributes')\n if 'content_facet_attributes' not in attrs:\n ignore.add('content_facet_attributes')\n ignore.add('compute_attributes')\n ignore.add('interfaces_attributes')\n ignore.add('root_pass')\n ignore.add('image')\n ignore.add('interface')\n ignore.add('build_status_label')\n result = super(Host, self).read(entity, attrs, ignore, params)\n if attrs.get('image_id'):\n result.image = Image(\n server_config=self._server_config,\n id=attrs.get('image_id'),\n compute_resource=attrs.get('compute_resource_id'),\n )\n else:\n result.image = None\n if 'interfaces' in attrs and attrs['interfaces']:\n result.interface = [\n Interface(\n self._server_config,\n host=result.id,\n id=interface['id'],\n )\n for interface in attrs['interfaces']\n ]\n if 'build_status_label' in attrs:\n result.build_status_label = attrs['build_status_label']\n return result", - "docstring": "Deal with oddly named and structured data returned by the server.\n\n For more information, see `Bugzilla #1235019\n `_\n and `Bugzilla #1449749\n `_.\n\n `content_facet_attributes` are returned only in case any of facet\n attributes were actually set.\n\n Also add image to the response if needed, as\n :meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize\n image." - }, - { - "code": "def _getUniqueDev(self, devpath):\n realpath = os.path.realpath(devpath)\n mobj = re.match('\\/dev\\/(.*)$', realpath)\n if mobj:\n dev = mobj.group(1)\n if dev in self._diskStats:\n return dev\n else:\n try:\n (major, minor) = self._getDevMajorMinor(realpath)\n except:\n return None\n return self._mapMajorMinor2dev.get((major, minor))\n return None", - "docstring": "Return unique device for any block device path.\n \n @param devpath: Full path for block device.\n @return: Unique device string without the /dev prefix." - }, - { - "code": "def nanopub_stats(ctx, input_fn):\n counts = {\n \"nanopubs\": 0,\n \"assertions\": {\"total\": 0, \"subject_only\": 0, \"nested\": 0, \"relations\": {}},\n }\n for np in bnf.read_nanopubs(input_fn):\n if \"nanopub\" in np:\n counts[\"nanopubs\"] += 1\n counts[\"assertions\"][\"total\"] += len(np[\"nanopub\"][\"assertions\"])\n for assertion in np[\"nanopub\"][\"assertions\"]:\n if assertion[\"relation\"] is None:\n counts[\"assertions\"][\"subject_only\"] += 1\n else:\n if re.match(\"\\s*\\(\", assertion[\"object\"]):\n counts[\"assertions\"][\"nested\"] += 1\n if (\n not assertion.get(\"relation\")\n in counts[\"assertions\"][\"relations\"]\n ):\n counts[\"assertions\"][\"relations\"][assertion.get(\"relation\")] = 1\n else:\n counts[\"assertions\"][\"relations\"][\n assertion.get(\"relation\")\n ] += 1\n counts[\"assertions\"][\"relations\"] = sorted(counts[\"assertions\"][\"relations\"])\n print(\"DumpVar:\\n\", json.dumps(counts, indent=4))", - "docstring": "Collect statistics on nanopub file\n\n input_fn can be json, jsonl or yaml and additionally gzipped" - }, - { - "code": "def isclose(a, b, rtol=1e-5, atol=1e-8):\n return abs(a - b) < (atol + rtol * abs(b))", - "docstring": "This is essentially np.isclose, but slightly faster." - }, - { - "code": "def set_brightness(self, brightness):\n brightness = min([1.0, max([brightness, 0.0])])\n self.state.brightness = brightness\n self._repeat_last_frame()\n sequence_number = self.zmq_publisher.publish_brightness(brightness)\n logging.debug(\"Set brightness to {brightPercent:05.1f}%\".format(brightPercent=brightness*100))\n return (True, sequence_number, \"OK\")", - "docstring": "set general brightness in range 0...1" - }, - { - "code": "def makekey(self, *args):\r\n if len(args) > 1:\r\n args = args[:1] + (args[1].encode('utf-8'),) + args[2:]\r\n if len(args) == 3 and type(args[-1]) == str:\r\n return struct.pack(self.keyfmt[:1 + len(args)], b'.', *args[:-1]) + args[-1].encode('utf-8')\r\n elif len(args) == 3 and type(args[-1]) == type(-1) and args[-1] < 0:\r\n return struct.pack(self.keyfmt[:1 + len(args)] + self.fmt.lower(), b'.', *args)\r\n else:\r\n return struct.pack(self.keyfmt[:2 + len(args)], b'.', *args)", - "docstring": "return a binary key for the nodeid, tag and optional value" - }, - { - "code": "def arr_to_vector(arr):\n dim = array_dim(arr)\n tmp_arr = []\n for n in range(len(dim) - 1):\n for inner in arr:\n for i in inner:\n tmp_arr.append(i)\n arr = tmp_arr\n tmp_arr = []\n return arr", - "docstring": "Reshape a multidimensional array to a vector." - }, - { - "code": "def restart(self):\n restart_file, irdvars = None, None\n wf_files = self.outdir.find_1wf_files()\n if wf_files is not None:\n restart_file = wf_files[0].path\n irdvars = irdvars_for_ext(\"1WF\")\n if len(wf_files) != 1:\n restart_file = None\n self.history.critical(\"Found more than one 1WF file in outdir. Restart is ambiguous!\")\n if restart_file is None:\n den_files = self.outdir.find_1den_files()\n if den_files is not None:\n restart_file = den_files[0].path\n irdvars = {\"ird1den\": 1}\n if len(den_files) != 1:\n restart_file = None\n self.history.critical(\"Found more than one 1DEN file in outdir. Restart is ambiguous!\")\n if restart_file is None:\n raise self.RestartError(\"%s: Cannot find the 1WF|1DEN file to restart from.\" % self)\n self.history.info(\"Will restart from %s\", restart_file)\n restart_file = self.out_to_in(restart_file)\n self.set_vars(irdvars)\n return self._restart()", - "docstring": "DFPT calculations can be restarted only if we have the 1WF file or the 1DEN file.\n from which we can read the first-order wavefunctions or the first order density.\n Prefer 1WF over 1DEN since we can reuse the wavefunctions." - }, - { - "code": "def ensure_async(function_original):\n def wrapper(*args, **kwargs):\n try:\n result = function_original(*args, **kwargs)\n if isinstance(result, Deferred):\n return result\n d = Deferred()\n d.callback(result)\n return d\n except:\n return fail()\n return wrapper", - "docstring": "A function decorated with this will always return a defer.Deferred\n even when returning synchronous result or raise an exception." - }, - { - "code": "def check_job(cls, job_details):\n return check_log(job_details.logfile, cls.string_exited, cls.string_successful)", - "docstring": "Check the status of a specfic job" - }, - { - "code": "def get_assignable_repository_ids(self, repository_id):\n mgr = self._get_provider_manager('REPOSITORY', local=True)\n lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)\n repositories = lookup_session.get_repositories()\n id_list = []\n for repository in repositories:\n id_list.append(repository.get_id())\n return IdList(id_list)", - "docstring": "Gets a list of repositories including and under the given repository node in which any asset can be assigned.\n\n arg: repository_id (osid.id.Id): the ``Id`` of the\n ``Repository``\n return: (osid.id.IdList) - list of assignable repository ``Ids``\n raise: NullArgument - ``repository_id`` is ``null``\n raise: OperationFailed - unable to complete request\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType:\n with open(path, 'r') as fh:\n wif_content = fh.read()\n regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE)\n match = search(regex, wif_content)\n if not match:\n raise Exception('Error: Bad format WIF or EWIF v1 file')\n wif_hex = match.groups()[0]\n return SigningKey.from_wif_or_ewif_hex(wif_hex, password)", - "docstring": "Return SigningKey instance from Duniter WIF or EWIF file\n\n :param path: Path to WIF of EWIF file\n :param password: Password needed for EWIF file" - }, - { - "code": "def get_coordinate_system(self):\n coordinate_list = ['specimen']\n initial_coordinate = 'specimen'\n for specimen in self.specimens:\n if 'geographic' not in coordinate_list and self.Data[specimen]['zijdblock_geo']:\n coordinate_list.append('geographic')\n initial_coordinate = 'geographic'\n if 'tilt-corrected' not in coordinate_list and self.Data[specimen]['zijdblock_tilt']:\n coordinate_list.append('tilt-corrected')\n return initial_coordinate, coordinate_list", - "docstring": "Check self.Data for available coordinate systems.\n\n Returns\n ---------\n initial_coordinate, coordinate_list : str, list\n i.e., 'geographic', ['specimen', 'geographic']" - }, - { - "code": "def set_parse_attributes(self, string, location, tokens):\n \"Fluent API for setting parsed location\"\n self.string = string\n self.location = location\n self.tokens = tokens\n return self", - "docstring": "Fluent API for setting parsed location" - }, - { - "code": "def print_config(self, _):\n for section in self.config.sections():\n print '[%s]' % section\n items = dict(self.config.items(section))\n for k in items:\n print \"%(a)s=%(b)s\" % {'a': k, 'b': items[k]}\n print ''", - "docstring": "Print configuration." - }, - { - "code": "def from_val(val_schema):\n definition = getattr(val_schema, \"definition\", val_schema) if isinstance(\n val_schema, BaseSchema) else val_schema\n if isinstance(definition, dict):\n return _dict_to_teleport(definition)\n if isinstance(definition, list):\n if len(definition) == 1:\n return {\"Array\": from_val(definition[0])}\n if definition in VAL_PRIMITIVES:\n return VAL_PRIMITIVES[definition]\n raise SerializationError(\n \"Serializing %r not (yet) supported.\" % definition)", - "docstring": "Serialize a val schema to teleport." - }, - { - "code": "def is_good_age_ratios(self):\n ratios = self.age_ratios()\n if None in ratios:\n clean_ratios = [x for x in ratios if x is not None]\n ratios.remove(None)\n if sum(clean_ratios) > 1:\n return False\n else:\n if sum(ratios) != 1:\n return False\n return True", - "docstring": "Method to check the sum of age ratio is 1.\n\n :returns: True if the sum is 1 or the sum less than 1 but there is\n None.\n :rtype: bool" - }, - { - "code": "def get_configs(self):\n self.check_configs_ready()\n result = []\n for bot in self.bots.values():\n result.extend(list(bot.get_models()))\n return result", - "docstring": "Return a list of all installed configs." - }, - { - "code": "def _make_temp_directory(prefix):\n temp_dir = _make_temp_filename(prefix=str(prefix))\n _os.makedirs(temp_dir)\n return temp_dir", - "docstring": "Generate a temporary directory that would not live beyond the lifetime of\n unity_server.\n\n Caller is expected to clean up the temp file as soon as the directory is no\n longer needed. But the directory will be cleaned as unity_server restarts" - }, - { - "code": "def dict2obj(d):\n if isinstance(d, (Mapping, list, tuple)):\n try:\n d = dict(d)\n except (ValueError, TypeError):\n return d\n else:\n return d\n obj = Object()\n for k, v in viewitems(d):\n obj.__dict__[k] = dict2obj(v)\n return obj", - "docstring": "Convert a dict to an object or namespace\n\n\n >>> d = {'a': 1, 'b': {'c': 2}, 'd': [\"hi\", {'foo': \"bar\"}]}\n >>> obj = dict2obj(d)\n >>> obj.b.c\n 2\n >>> obj.d\n ['hi', {'foo': 'bar'}]\n >>> d = {'a': 1, 'b': {'c': 2}, 'd': [(\"hi\", {'foo': \"bar\"})]}\n >>> obj = dict2obj(d)\n >>> obj.d.hi.foo\n 'bar'" - }, - { - "code": "def todo(request):\n eartag_list = Animal.objects.filter(Born__lt=(datetime.date.today() - datetime.timedelta(days=settings.WEAN_AGE))).filter(MouseID__isnull=True, Alive=True)\n genotype_list = Animal.objects.filter(Q(Genotype='N.D.')|Q(Genotype__icontains='?')).filter(Alive=True, Born__lt=(datetime.date.today() - datetime.timedelta(days=settings.GENOTYPE_AGE)))\n wean = datetime.date.today() - datetime.timedelta(days=settings.WEAN_AGE)\n wean_list = Animal.objects.filter(Born__lt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage')\n return render(request, 'todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list})", - "docstring": "This view generates a summary of the todo lists.\n \n The login restricted view passes lists for ear tagging, genotyping and weaning and passes them to the template todo.html." - }, - { - "code": "def get_data_type_signature(self):\n if self._data_type_signature is None:\n sig = Signature(self.get_dsdl_signature())\n fields = self.request_fields + self.response_fields if self.kind == CompoundType.KIND_SERVICE else self.fields\n for field in fields:\n field_sig = field.type.get_data_type_signature()\n if field_sig is not None:\n sig_value = sig.get_value()\n sig.add(bytes_from_crc64(field_sig))\n sig.add(bytes_from_crc64(sig_value))\n self._data_type_signature = sig.get_value()\n return self._data_type_signature", - "docstring": "Computes data type signature of this type. The data type signature is\n guaranteed to match only if all nested data structures are compatible.\n Please refer to the specification for details about signatures." - }, - { - "code": "def transformer_cifar10_memory_v0():\n hparams = transformer_wikitext103_l4k_memory_v0()\n hparams.num_hidden_layers = 6\n hparams.max_length = 32 * 32 * 3\n hparams.split_targets_chunk_length = 64 * 3\n hparams.split_targets_max_chunks = int(\n hparams.max_length / hparams.split_targets_chunk_length)\n hparams.num_memory_items = 128 * 3\n target_images_per_batch = 4\n hparams.batch_size = int(target_images_per_batch * (\n hparams.max_length / hparams.split_targets_chunk_length))\n hparams.recurrent_memory_batch_size = hparams.batch_size\n hparams.max_relative_position = (\n hparams.num_memory_items + hparams.split_targets_chunk_length)\n return hparams", - "docstring": "HParams for training image_cifar10_plain_gen_flat_rev with memory." - }, - { - "code": "def exportCertificate(self, certificate, folder):\n url = self._url + \"/sslcertificates/%s/export\" % certificate\n params = {\n \"f\" : \"json\",\n }\n return self._get(url=url,\n param_dict=params,\n out_folder=folder)", - "docstring": "gets the SSL Certificates for a given machine" - }, - { - "code": "def evaluate(self, values):\n ret = self.mba.evaluate(self.vec, values)\n if isinstance(ret, six.integer_types):\n return ret\n return self.from_vec(self.mba, ret)", - "docstring": "Evaluates the expression to an integer\n\n values is a dictionnary that associates n-bit variables to integer\n values. Every symbolic variables used in the expression must be\n represented. \n\n For instance, let x and y 4-bit variables, and e = x+y:\n\n >>> mba = MBA(4)\n >>> x = mba.var('x')\n >>> y = mba.var('y')\n >>> e = x+y\n\n To evaluate e with x=4 and y=5, we can do:\n\n >>> e.eval({x: 4, y: 5})\n 9\n\n If a variable is missing from values, an exception will occur. (x\n or y in the example above)" - }, - { - "code": "def fabs(x):\n if isinstance(x, UncertainFunction):\n mcpts = np.fabs(x._mcpts)\n return UncertainFunction(mcpts)\n else:\n return np.fabs(x)", - "docstring": "Absolute value function" - }, - { - "code": "def prj_view_seq(self, *args, **kwargs):\n if not self.cur_prj:\n return\n i = self.prj_seq_tablev.currentIndex()\n item = i.internalPointer()\n if item:\n seq = item.internal_data()\n self.view_seq(seq)", - "docstring": "View the, in the prj_seq_tablev selected, sequence.\n\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def remote_root(self):\n return os.path.relpath(self.address.spec_path, self.target_base)", - "docstring": "The remote package root prefix portion of the the full `import_path`" - }, - { - "code": "def generate_slug(self, model_instance):\n queryset = model_instance.__class__._default_manager.all()\n lookup = {'%s__regex' % self.attname: r'^.{%s}$' % self.length}\n if queryset.filter(**lookup).count() >= len(self.chars)**self.length:\n raise FieldError(\"No available slugs remaining.\")\n slug = get_random_string(self.length, self.chars)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n while queryset.filter(**kwargs):\n slug = get_random_string(self.length, self.chars)\n kwargs[self.attname] = slug\n return slug", - "docstring": "Returns a unique slug." - }, - { - "code": "def validate(self, **kwargs):\n self.check_crossrefs()\n for value in self.values():\n value.validate(**kwargs)", - "docstring": "Validates each entry (passing the provided arguments down to them and\n also tries to resolve all cross-references between the entries." - }, - { - "code": "def rotation_matrix(self, angles):\n squeeze_out = (np.broadcast(*angles).shape == ())\n angles_in = angles\n angles = tuple(np.array(angle, dtype=float, copy=False, ndmin=1)\n for angle in angles)\n if (self.check_bounds and\n not is_inside_bounds(angles, self.motion_params)):\n raise ValueError('`angles` {} not in the valid range '\n '{}'.format(angles_in, self.motion_params))\n matrix = euler_matrix(*angles)\n if squeeze_out:\n matrix = matrix.squeeze()\n return matrix", - "docstring": "Return the rotation matrix to the system state at ``angles``.\n\n Parameters\n ----------\n angles : `array-like` or sequence\n Euler angles in radians describing the rotation of the detector.\n The length of the provided argument (along the first axis in\n case of an array) must be equal to the number of Euler angles\n in this geometry.\n\n Returns\n -------\n rot : `numpy.ndarray`\n Rotation matrix (or matrices) mapping vectors at the\n initial state to the ones in the state defined by ``angles``.\n The rotation is extrinsic, i.e., defined in the \"world\"\n coordinate system.\n If ``angles`` is a single pair (or triplet) of Euler angles,\n an array of shape ``(3, 3)`` representing a single matrix is\n returned. Otherwise, the shape of the returned array is\n ``broadcast(*angles).shape + (3, 3)``." - }, - { - "code": "def resetToPreviousLoc(self):\r\n self.rect.left = self.startDraggingX\r\n self.rect.top = self.startDraggingY", - "docstring": "Resets the loc of the dragger to place where dragging started.\r\n\r\n This could be used in a test situation if the dragger was dragged to an incorrect location." - }, - { - "code": "def _get_current_deployment_label(self):\n deploymentId = self._get_current_deployment_id()\n deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,\n deploymentId=deploymentId,\n **self._common_aws_args).get('deployment')\n if deployment:\n return deployment.get('description')\n return None", - "docstring": "Helper method to find the deployment label that the stage_name is currently associated with." - }, - { - "code": "def approximate_cholesky(self, epsilon=1e-6):\n r\n Wk = self._C_k[self._columns, :]\n L0 = spd_inv_split(Wk, epsilon=epsilon)\n L = np.dot(self._C_k, L0)\n return L", - "docstring": "r\"\"\" Compute low-rank approximation to the Cholesky decomposition of target matrix.\n\n The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive.\n\n Parameters\n ----------\n epsilon : float, optional, default 1e-6\n Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest\n negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues\n will be cut off.\n\n Returns\n -------\n L : ndarray((n,m), dtype=float)\n Cholesky matrix such that `A \\approx L L^{\\top}`. Number of columns :math:`m` is most at the number of columns\n used in the Nystroem approximation, but may be smaller depending on epsilon." - }, - { - "code": "def residmap(self, prefix='', **kwargs):\n timer = Timer.create(start=True)\n self.logger.info('Generating residual maps')\n schema = ConfigSchema(self.defaults['residmap'])\n config = schema.create_config(self.config['residmap'], **kwargs)\n config['model'].setdefault('Index', 2.0)\n config['model'].setdefault('SpectrumType', 'PowerLaw')\n config['model'].setdefault('SpatialModel', 'PointSource')\n config['model'].setdefault('Prefactor', 1E-13)\n o = self._make_residual_map(prefix, **config)\n if config['make_plots']:\n plotter = plotting.AnalysisPlotter(self.config['plotting'],\n fileio=self.config['fileio'],\n logging=self.config['logging'])\n plotter.make_residmap_plots(o, self.roi)\n self.logger.info('Finished residual maps')\n outfile = utils.format_filename(self.workdir, 'residmap',\n prefix=[o['name']])\n if config['write_fits']:\n o['file'] = os.path.basename(outfile) + '.fits'\n self._make_residmap_fits(o, outfile + '.fits')\n if config['write_npy']:\n np.save(outfile + '.npy', o)\n self.logger.info('Execution time: %.2f s', timer.elapsed_time)\n return o", - "docstring": "Generate 2-D spatial residual maps using the current ROI\n model and the convolution kernel defined with the `model`\n argument.\n\n Parameters\n ----------\n prefix : str\n String that will be prefixed to the output residual map files.\n\n {options}\n\n Returns\n -------\n maps : dict\n A dictionary containing the `~fermipy.utils.Map` objects\n for the residual significance and amplitude." - }, - { - "code": "def plot_wave(fps=30, frequency=1, wavetime=3, interactive=False,\n off_screen=False, notebook=None):\n cpos = [(6.879481857604187, -32.143727535933195, 23.05622921691103),\n (-0.2336056403734026, -0.6960083534590372, -0.7226721553894022),\n (-0.008900669873416645, 0.6018246347860926, 0.7985786667826725)]\n X = np.arange(-10, 10, 0.25)\n Y = np.arange(-10, 10, 0.25)\n X, Y = np.meshgrid(X, Y)\n R = np.sqrt(X**2 + Y**2)\n Z = np.sin(R)\n sgrid = vtki.StructuredGrid(X, Y, Z)\n points = sgrid.points.copy()\n plotter = vtki.Plotter(off_screen=off_screen, notebook=notebook)\n plotter.add_mesh(sgrid, scalars=Z.ravel())\n plotter.camera_position = cpos\n plotter.plot(title='Wave Example', window_size=[800, 600],\n auto_close=False, interactive_update=True)\n tdelay = 1. / fps\n tlast = time.time()\n tstart = time.time()\n while time.time() - tstart < wavetime:\n telap = time.time() - tstart\n phase = telap * 2 * np.pi * frequency\n Z = np.sin(R + phase)\n points[:, -1] = Z.ravel()\n plotter.update_coordinates(points, render=False)\n plotter.update_scalars(Z.ravel(), render=False)\n rstart = time.time()\n plotter.update()\n rstop = time.time()\n tpast = time.time() - tlast\n if tpast < tdelay and tpast >= 0:\n time.sleep(tdelay - tpast)\n tlast = time.time()\n plotter.close()\n return points", - "docstring": "Plot a 3D moving wave in a render window.\n\n Parameters\n ----------\n fps : int, optional\n Maximum frames per second to display. Defaults to 30.\n\n frequency: float, optional\n Wave cycles per second. Defaults to 1\n\n wavetime : float, optional\n The desired total display time in seconds. Defaults to 3 seconds.\n\n interactive: bool, optional\n Allows the user to set the camera position before the start of the\n wave movement. Default False.\n\n off_screen : bool, optional\n Enables off screen rendering when True. Used for automated testing.\n Disabled by default.\n\n Returns\n -------\n points : np.ndarray\n Position of points at last frame." - }, - { - "code": "def block(self):\n self._nosig = True\n yield\n self._nosig = False\n if self._interrupted:\n raise SystemExit(\"Aborted...\")", - "docstring": "While this context manager is active any signals for aborting\n the process will be queued and exit the program once the context\n is left." - }, - { - "code": "def runtime_import(object_path):\n obj_module, obj_element = object_path.rsplit(\".\", 1)\n loader = __import__(obj_module, globals(), locals(), [str(obj_element)])\n return getattr(loader, obj_element)", - "docstring": "Import at runtime." - }, - { - "code": "def _ReceiveOp(self):\n try:\n fs_msg, received_bytes = self._fs.Recv()\n except (IOError, struct.error):\n logging.critical(\"Broken local Fleetspeak connection (read end).\")\n raise\n received_type = fs_msg.data.TypeName()\n if not received_type.endswith(\"grr.GrrMessage\"):\n raise ValueError(\n \"Unexpected proto type received through Fleetspeak: %r; expected \"\n \"grr.GrrMessage.\" % received_type)\n stats_collector_instance.Get().IncrementCounter(\"grr_client_received_bytes\",\n received_bytes)\n grr_msg = rdf_flows.GrrMessage.FromSerializedString(fs_msg.data.value)\n grr_msg.auth_state = jobs_pb2.GrrMessage.AUTHENTICATED\n self._threads[\"Worker\"].QueueMessages([grr_msg])", - "docstring": "Receives a single message through Fleetspeak." - }, - { - "code": "def get_instance_route53_names(self, instance):\n instance_attributes = [ 'public_dns_name', 'private_dns_name',\n 'ip_address', 'private_ip_address' ]\n name_list = set()\n for attrib in instance_attributes:\n try:\n value = getattr(instance, attrib)\n except AttributeError:\n continue\n if value in self.route53_records:\n name_list.update(self.route53_records[value])\n return list(name_list)", - "docstring": "Check if an instance is referenced in the records we have from\n Route53. If it is, return the list of domain names pointing to said\n instance. If nothing points to it, return an empty list." - }, - { - "code": "def get_full_history(self, force=None, last_update=None, flush=False):\n return self._run_object_import(force=force, last_update=last_update,\n flush=flush, full_history=True)", - "docstring": "Fields change depending on when you run activity_import,\n such as \"last_updated\" type fields which don't have activity\n being tracked, which means we'll always end up with different\n hash values, so we need to always remove all existing object\n states and import fresh" - }, - { - "code": "def _register_notification_callback(self, connection_handle, attribute_handle, callback, once=False):\n notification_id = (connection_handle, attribute_handle)\n with self.notification_callbacks_lock:\n self.notification_callbacks[notification_id] = (callback, once)", - "docstring": "Register a callback as a notification callback. It will be called if a notification with the matching\n connection_handle and attribute_handle is received.\n\n Args:\n connection_handle (int): The connection handle to watch\n attribute_handle (int): The attribute handle to watch\n callback (func): The callback function to call once the notification has been received\n once (bool): Should the callback only be called once (and then removed from the notification callbacks)" - }, - { - "code": "def update_browse_tabs_menu(self):\r\n self.browse_tabs_menu.clear()\r\n names = []\r\n dirnames = []\r\n for index in range(self.count()):\r\n if self.menu_use_tooltips:\r\n text = to_text_string(self.tabToolTip(index))\r\n else:\r\n text = to_text_string(self.tabText(index))\r\n names.append(text)\r\n if osp.isfile(text):\r\n dirnames.append(osp.dirname(text))\r\n offset = None\r\n if len(names) == len(dirnames):\r\n common = get_common_path(dirnames)\r\n if common is None:\r\n offset = None\r\n else:\r\n offset = len(common)+1\r\n if offset <= 3:\r\n offset = None\r\n for index, text in enumerate(names):\r\n tab_action = create_action(self, text[offset:],\r\n icon=self.tabIcon(index),\r\n toggled=lambda state, index=index:\r\n self.setCurrentIndex(index),\r\n tip=self.tabToolTip(index))\r\n tab_action.setChecked(index == self.currentIndex())\r\n self.browse_tabs_menu.addAction(tab_action)", - "docstring": "Update browse tabs menu" - }, - { - "code": "def build_response(headers: Headers, key: str) -> None:\n headers[\"Upgrade\"] = \"websocket\"\n headers[\"Connection\"] = \"Upgrade\"\n headers[\"Sec-WebSocket-Accept\"] = accept(key)", - "docstring": "Build a handshake response to send to the client.\n\n ``key`` comes from :func:`check_request`." - }, - { - "code": "def send(self, sender, **named):\n responses = []\n if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:\n return responses\n for receiver in self._live_receivers(sender):\n response = receiver(signal=self, sender=sender, **named)\n responses.append((receiver, response))\n return responses", - "docstring": "Send signal from sender to all connected receivers.\n\n If any receiver raises an error, the error propagates back through send,\n terminating the dispatch loop. So it's possible that all receivers\n won't be called if an error is raised.\n\n Arguments:\n\n sender\n The sender of the signal. Either a specific object or None.\n\n named\n Named arguments which will be passed to receivers.\n\n Returns a list of tuple pairs [(receiver, response), ... ]." - }, - { - "code": "def cast_values_csvs(d, idx, x):\n try:\n d[idx].append(float(x))\n except ValueError:\n d[idx].append(x)\n except KeyError as e:\n logger_misc.warn(\"cast_values_csv: KeyError: col: {}, {}\".format(x, e))\n return d", - "docstring": "Attempt to cast string to float. If error, keep as a string.\n\n :param dict d: Data\n :param int idx: Index number\n :param str x: Data\n :return any:" - }, - { - "code": "def rset(self):\n self._send('RSET\\r\\n')\n resp = self._read()\n if not resp.startswith('250'):\n logger.warn('Unexpected server response at RSET: ' + resp)\n self._recipients = []\n self.results = {}", - "docstring": "Send LMTP RSET command and process the server response." - }, - { - "code": "def wait_for(func):\n def wrapped(*args, **kwargs):\n timeout = kwargs.pop('timeout', 15)\n start = time()\n result = None\n while time() - start < timeout:\n result = func(*args, **kwargs)\n if result:\n break\n sleep(0.2)\n return result\n return wrapped", - "docstring": "A decorator to invoke a function periodically until it returns a truthy\n value." - }, - { - "code": "def _ParseRecurseKeys(self, parser_mediator, root_key):\n for registry_key in root_key.RecurseKeys():\n if parser_mediator.abort:\n break\n self._ParseKey(parser_mediator, registry_key)", - "docstring": "Parses the Registry keys recursively.\n\n Args:\n parser_mediator (ParserMediator): parser mediator.\n root_key (dfwinreg.WinRegistryKey): root Windows Registry key." - }, - { - "code": "def parent(self):\n if self.parent_id:\n return self._client.part(pk=self.parent_id, category=self.category)\n else:\n return None", - "docstring": "Retrieve the parent of this `Part`.\n\n :return: the parent :class:`Part` of this part\n :raises APIError: if an Error occurs\n\n Example\n -------\n\n >>> part = project.part('Frame')\n >>> bike = part.parent()" - }, - { - "code": "def download(url):\n session = requests.Session()\n session.mount('file://', FileAdapter())\n try:\n res = session.get(url)\n except requests.exceptions.ConnectionError as e:\n raise e\n res.raise_for_status()\n return res", - "docstring": "Uses requests to download an URL, maybe from a file" - }, - { - "code": "def create_track_token(request):\n from tracked_model.models import RequestInfo\n request_pk = RequestInfo.create_or_get_from_request(request).pk\n user_pk = None\n if request.user.is_authenticated():\n user_pk = request.user.pk\n return TrackToken(request_pk=request_pk, user_pk=user_pk)", - "docstring": "Returns ``TrackToken``.\n ``TrackToken' contains request and user making changes.\n\n It can be passed to ``TrackedModel.save`` instead of ``request``.\n It is intended to be used when passing ``request`` is not possible\n e.g. when ``TrackedModel.save`` will be called from celery task." - }, - { - "code": "def parse(s, subs):\n if len(subs) == 0:\n return []\n points = []\n requests = _tokenize_request(s)\n if len(requests) == 1 and requests[0].type_ == _Request.Type.OFFSET:\n return _offset_subtitles(requests[0], subs)\n return _sync_subtitles(requests, subs)", - "docstring": "Parses a given string and creates a list of SyncPoints." - }, - { - "code": "def send(self, confirmation_email, send_date=\"immediately\"):\n body = {\n \"ConfirmationEmail\": confirmation_email,\n \"SendDate\": send_date}\n response = self._post(self.uri_for(\"send\"), json.dumps(body))", - "docstring": "Sends this campaign." - }, - { - "code": "def set_to_current(self, ):\n cur = self.get_current_file()\n if cur is not None:\n self.set_selection(cur)\n else:\n self.init_selection()", - "docstring": "Set the selection to the currently open one\n\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def set_channel_property(self, channel_id, property_name, value):\n if isinstance(channel_id, (int, np.integer)):\n if channel_id in self.get_channel_ids():\n if channel_id not in self._channel_properties:\n self._channel_properties[channel_id] = {}\n if isinstance(property_name, str):\n self._channel_properties[channel_id][property_name] = value\n else:\n raise ValueError(str(property_name) + \" must be a string\")\n else:\n raise ValueError(str(channel_id) + \" is not a valid channel_id\")\n else:\n raise ValueError(str(channel_id) + \" must be an int\")", - "docstring": "This function adds a property dataset to the given channel under the\n property name.\n\n Parameters\n ----------\n channel_id: int\n The channel id for which the property will be added\n property_name: str\n A property stored by the RecordingExtractor (location, etc.)\n value:\n The data associated with the given property name. Could be many\n formats as specified by the user." - }, - { - "code": "def get_version_id(protocol_version):\n ver = registry.version_info.get(protocol_version)\n if ver:\n return ver.version_id", - "docstring": "Get a tuple with major and minor version number\n\n :param Integer protocol_version: Internal version ID\n :return: Tuple of major and minor protocol version\n :rtype: Tuple" - }, - { - "code": "def pymatgen_mol(self):\n sp = []\n coords = []\n for atom in ob.OBMolAtomIter(self._obmol):\n sp.append(atom.GetAtomicNum())\n coords.append([atom.GetX(), atom.GetY(), atom.GetZ()])\n return Molecule(sp, coords)", - "docstring": "Returns pymatgen Molecule object." - }, - { - "code": "def memory_write(self, offset: int, data: List[Union[int, BitVec]]) -> None:\n self.mem_extend(offset, len(data))\n self.memory[offset : offset + len(data)] = data", - "docstring": "Writes data to memory starting at offset.\n\n :param offset:\n :param data:" - }, - { - "code": "def get_thumbnail_preset(self):\n if isinstance(self, ChannelNode):\n return format_presets.CHANNEL_THUMBNAIL\n elif isinstance(self, TopicNode):\n return format_presets.TOPIC_THUMBNAIL\n elif isinstance(self, VideoNode):\n return format_presets.VIDEO_THUMBNAIL\n elif isinstance(self, AudioNode):\n return format_presets.AUDIO_THUMBNAIL\n elif isinstance(self, DocumentNode):\n return format_presets.DOCUMENT_THUMBNAIL\n elif isinstance(self, ExerciseNode):\n return format_presets.EXERCISE_THUMBNAIL\n elif isinstance(self, HTML5AppNode):\n return format_presets.HTML5_THUMBNAIL\n else:\n return None", - "docstring": "Returns the format preset corresponding to this Node's type, or None if the node doesn't have a format preset." - }, - { - "code": "def last_version():\n try:\n last_update, version, success = last_version._cache\n except AttributeError:\n last_update = 0\n version = None\n success = False\n cache_delta = 24 * 3600 if success else 600\n if (time.time() - last_update) < cache_delta:\n return version\n else:\n try:\n req = requests.get(settings.CAS_NEW_VERSION_JSON_URL)\n data = json.loads(req.text)\n version = data[\"info\"][\"version\"]\n last_version._cache = (time.time(), version, True)\n return version\n except (\n KeyError,\n ValueError,\n requests.exceptions.RequestException\n ) as error:\n logger.error(\n \"Unable to fetch %s: %s\" % (settings.CAS_NEW_VERSION_JSON_URL, error)\n )\n last_version._cache = (time.time(), version, False)", - "docstring": "Fetch the last version from pypi and return it. On successful fetch from pypi, the response\n is cached 24h, on error, it is cached 10 min.\n\n :return: the last django-cas-server version\n :rtype: unicode" - }, - { - "code": "def _parse(self, data):\n if type(data) == type(bytes()):\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n return data\n try:\n data = json.loads(data, parse_float=Decimal)\n except ValueError:\n return data\n if type(data) is dict:\n if 'error' in data:\n error = data['error']\n if error.get('type') == \"OAuthException\":\n exception = OAuthError\n else:\n exception = FacebookError\n raise exception(**self._get_error_params(data))\n if 'error_msg' in data:\n raise FacebookError(**self._get_error_params(data))\n return data", - "docstring": "Parse the response from Facebook's Graph API.\n\n :param data: A string describing the Graph API's response." - }, - { - "code": "def _calibrate_quantized_sym(qsym, th_dict):\n if th_dict is None or len(th_dict) == 0:\n return qsym\n num_layer_outputs = len(th_dict)\n layer_output_names = []\n min_vals = []\n max_vals = []\n for k, v in th_dict.items():\n layer_output_names.append(k)\n min_vals.append(v[0])\n max_vals.append(v[1])\n calibrated_sym = SymbolHandle()\n check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle,\n mx_uint(num_layer_outputs),\n c_str_array(layer_output_names),\n c_array(ctypes.c_float, min_vals),\n c_array(ctypes.c_float, max_vals),\n ctypes.byref(calibrated_sym)))\n return Symbol(calibrated_sym)", - "docstring": "Given a dictionary containing the thresholds for quantizing the layers,\n set the thresholds into the quantized symbol as the params of requantize operators." - }, - { - "code": "def run_local(\n context: cli.CommandContext,\n project: projects.Project,\n project_steps: typing.List[projects.ProjectStep],\n force: bool,\n continue_after: bool,\n single_step: bool,\n limit: int,\n print_status: bool,\n skip_library_reload: bool = False\n) -> environ.Response:\n skip_reload = (\n skip_library_reload\n or environ.modes.has(environ.modes.TESTING)\n )\n if not skip_reload:\n runner.reload_libraries()\n environ.log_header('RUNNING', 5)\n steps_run = []\n if single_step:\n ps = project_steps[0] if len(project_steps) > 0 else None\n force = force or (single_step and bool(ps is not None))\n steps_run = runner.section(\n response=context.response,\n project=project,\n starting=ps,\n limit=1,\n force=force\n )\n elif continue_after or len(project_steps) == 0:\n ps = project_steps[0] if len(project_steps) > 0 else None\n steps_run = runner.complete(\n context.response,\n project,\n ps,\n force=force,\n limit=limit\n )\n else:\n for ps in project_steps:\n steps_run += runner.section(\n response=context.response,\n project=project,\n starting=ps,\n limit=max(1, limit),\n force=force or (limit < 1 and len(project_steps) < 2),\n skips=steps_run + []\n )\n project.write()\n environ.log_blanks()\n step_changes = []\n for ps in steps_run:\n step_changes.append(dict(\n name=ps.definition.name,\n action='updated',\n step=writing.step_writer.serialize(ps)._asdict()\n ))\n context.response.update(step_changes=step_changes)\n if print_status or context.response.failed:\n context.response.update(project=project.kernel_serialize())\n return context.response", - "docstring": "Execute the run command locally within this cauldron environment\n\n :param context:\n :param project:\n :param project_steps:\n :param force:\n :param continue_after:\n :param single_step:\n :param limit:\n :param print_status:\n :param skip_library_reload:\n Whether or not to skip reloading all project libraries prior to\n execution of the project. By default this is False in which case\n the project libraries are reloaded prior to execution.\n :return:" - }, - { - "code": "def baseclass(self):\n for cls in _BASE_CLASSES:\n if isinstance(self, cls):\n return cls\n raise ValueError(\"Cannot determine the base class of %s\" % self.__class__.__name__)", - "docstring": "The baseclass of self." - }, - { - "code": "def pushd(cls, new_dir):\n previous_dir = os.getcwd()\n try:\n new_ab_dir = None\n if os.path.isabs(new_dir):\n new_ab_dir = new_dir\n else:\n new_ab_dir = os.path.join(previous_dir, new_dir)\n cls.cd(new_ab_dir)\n yield\n finally:\n cls.cd(previous_dir)", - "docstring": "Change directory, and back to previous directory.\n\n It behaves like \"pushd directory; something; popd\"." - }, - { - "code": "def _tempfilepager(generator, cmd, color):\n import tempfile\n filename = tempfile.mktemp()\n text = \"\".join(generator)\n if not color:\n text = strip_ansi(text)\n encoding = get_best_encoding(sys.stdout)\n with open_stream(filename, 'wb')[0] as f:\n f.write(text.encode(encoding))\n try:\n os.system(cmd + ' \"' + filename + '\"')\n finally:\n os.unlink(filename)", - "docstring": "Page through text by invoking a program on a temporary file." - }, - { - "code": "def deal_caps(x:Collection[str]) -> Collection[str]:\n \"Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before.\"\n res = []\n for t in x:\n if t == '': continue\n if t[0].isupper() and len(t) > 1 and t[1:].islower(): res.append(TK_MAJ)\n res.append(t.lower())\n return res", - "docstring": "Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before." - }, - { - "code": "def unsubscribe(self, channel, *channels):\n conn = self._pool_or_conn\n return conn.execute_pubsub(b'UNSUBSCRIBE', channel, *channels)", - "docstring": "Unsubscribe from specific channels.\n\n Arguments can be instances of :class:`~aioredis.Channel`." - }, - { - "code": "def strip_querystring(url):\n p = six.moves.urllib.parse.urlparse(url)\n return p.scheme + \"://\" + p.netloc + p.path", - "docstring": "Remove the querystring from the end of a URL." - }, - { - "code": "def _sortByCreated(a, b):\n if a.created < b.created:\n return 1\n elif a.created > b.created:\n return -1\n else:\n return 0", - "docstring": "Sort function for object by created date" - }, - { - "code": "def update( self, jump ):\n atom = jump.initial_site.atom\n dr = jump.dr( self.cell_lengths )\n jump.final_site.occupation = atom.number\n jump.final_site.atom = atom\n jump.final_site.is_occupied = True\n jump.initial_site.occupation = 0\n jump.initial_site.atom = None\n jump.initial_site.is_occupied = False\n atom.site = jump.final_site\n atom.number_of_hops += 1\n atom.dr += dr\n atom.summed_dr2 += np.dot( dr, dr )", - "docstring": "Update the lattice state by accepting a specific jump\n\n Args:\n jump (Jump): The jump that has been accepted.\n\n Returns:\n None." - }, - { - "code": "def cancel(self) :\n \"tells libdbus you no longer care about the pending incoming message.\"\n dbus.dbus_pending_call_cancel(self._dbobj)\n if self._awaiting != None :\n self._awaiting.cancel()", - "docstring": "tells libdbus you no longer care about the pending incoming message." - }, - { - "code": "def _rule_compare(rule1, rule2):\n commonkeys = set(rule1.keys()).intersection(rule2.keys())\n for key in commonkeys:\n if rule1[key] != rule2[key]:\n return False\n return True", - "docstring": "Compare the common keys between security group rules against eachother" - }, - { - "code": "def pixels_from_coordinates(lat, lon, max_y, max_x):\n x_ratio, y_ratio = max_x/360., max_y/180.\n x, y = np.zeros(lon.shape), np.zeros(lat.shape)\n x = (lon + 180.) * x_ratio\n y = (lat + 90.) * y_ratio\n return x, y", - "docstring": "Return the 2 matrix with lat and lon of each pixel.\n\n Keyword arguments:\n lat -- A latitude matrix\n lon -- A longitude matrix\n max_y -- The max vertical pixels amount of an orthorectified image.\n max_x -- The max horizontal pixels amount of an orthorectified image." - }, - { - "code": "def MSELossFlat(*args, axis:int=-1, floatify:bool=True, **kwargs):\n \"Same as `nn.MSELoss`, but flattens input and target.\"\n return FlattenedLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)", - "docstring": "Same as `nn.MSELoss`, but flattens input and target." - }, - { - "code": "def get_or_create(self, user, obj, **kwargs):\n if not self.is_following(user, obj):\n return self.create(user, obj, **kwargs), True\n return self.get_follows(obj).get(user=user), False", - "docstring": "Almost the same as `FollowManager.objects.create` - behaves the same \n as the normal `get_or_create` methods in django though. \n\n Returns a tuple with the `Follow` and either `True` or `False`" - }, - { - "code": "def degenerate_like(x, k):\n R\n x = np.atleast_1d(x)\n return sum(np.log([i == k for i in x]))", - "docstring": "R\"\"\"\n Degenerate log-likelihood.\n\n .. math::\n f(x \\mid k) = \\left\\{ \\begin{matrix} 1 \\text{ if } x = k \\\\ 0 \\text{ if } x \\ne k\\end{matrix} \\right.\n\n :Parameters:\n - `x` : Input value.\n - `k` : Degenerate value." - }, - { - "code": "def _link_or_update_vars(self):\n for d, source in self.links.items():\n target = os.path.join(self.inventory_directory, d)\n source = os.path.join(self._config.scenario.directory, source)\n if not os.path.exists(source):\n msg = \"The source path '{}' does not exist.\".format(source)\n util.sysexit_with_message(msg)\n msg = \"Inventory {} linked to {}\".format(source, target)\n LOG.info(msg)\n os.symlink(source, target)", - "docstring": "Creates or updates the symlink to group_vars and returns None.\n\n :returns: None" - }, - { - "code": "def _parse_certificate(cls, response):\n links = _parse_header_links(response)\n try:\n cert_chain_uri = links[u'up'][u'url']\n except KeyError:\n cert_chain_uri = None\n return (\n response.content()\n .addCallback(\n lambda body: messages.CertificateResource(\n uri=cls._maybe_location(response),\n cert_chain_uri=cert_chain_uri,\n body=body))\n )", - "docstring": "Parse a response containing a certificate resource." - }, - { - "code": "def connectAcknowledge():\n a = TpPd(pd=0x3)\n b = MessageType(mesType=0xf)\n packet = a / b\n return packet", - "docstring": "CONNECT ACKNOWLEDGE Section 9.3.6" - }, - { - "code": "def _ensure_frames(cls, documents):\n frames = []\n for document in documents:\n if not isinstance(document, Frame):\n frames.append(cls(document))\n else:\n frames.append(document)\n return frames", - "docstring": "Ensure all items in a list are frames by converting those that aren't." - }, - { - "code": "def minimum(self):\n return min([(x, energy) for _, x, energy, _, _ in self.get_kinks()],\n key=lambda i: i[1])", - "docstring": "Finds the minimum reaction energy E_min and corresponding\n mixing ratio x_min.\n\n Returns:\n Tuple (x_min, E_min)." - }, - { - "code": "def extract_dirs(mod, path, dst, verbose=False, exclude=None, exclude_ext=None, recursion=True, replace=True):\n default_exclude = ['.svn', '_svn', '.git']\n default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp']\n exclude = exclude or []\n exclude_ext = exclude_ext or []\n if not os.path.exists(dst):\n os.makedirs(dst)\n if verbose:\n print 'Make directory %s' % dst\n for r in pkg.resource_listdir(mod, path):\n if r in exclude or r in default_exclude:\n continue\n fpath = os.path.join(path, r)\n if pkg.resource_isdir(mod, fpath):\n if recursion:\n extract_dirs(mod, fpath, os.path.join(dst, r), verbose, exclude, exclude_ext, recursion, replace)\n else:\n ext = os.path.splitext(fpath)[1]\n if ext in exclude_ext or ext in default_exclude_ext:\n continue\n extract_file(mod, fpath, dst, verbose, replace)", - "docstring": "mod name\n path mod path\n dst output directory\n resursion True will extract all sub module of mod" - }, - { - "code": "def register_schemas_dir(self, directory):\n for root, dirs, files in os.walk(directory):\n dir_path = os.path.relpath(root, directory)\n if dir_path == '.':\n dir_path = ''\n for file_ in files:\n if file_.lower().endswith(('.json')):\n schema_name = os.path.join(dir_path, file_)\n if schema_name in self.schemas:\n raise JSONSchemaDuplicate(\n schema_name,\n self.schemas[schema_name],\n directory\n )\n self.schemas[schema_name] = os.path.abspath(directory)", - "docstring": "Recursively register all json-schemas in a directory.\n\n :param directory: directory path." - }, - { - "code": "def prepare_fluxseries(self, ramflag: bool = True) -> None:\n for element in printtools.progressbar(self):\n element.prepare_fluxseries(ramflag)", - "docstring": "Call method |Element.prepare_fluxseries| of all handled\n |Element| objects." - }, - { - "code": "def extend(self, items):\n items = np.array(items)\n pos = items.shape[0] + self.logical_size\n if pos > self.physical_size:\n amt = self._tmp_size()\n if self.physical_size + amt < pos:\n amt = pos - self.physical_size\n self._grow(amt=amt)\n stop = self._position + items.shape[0]\n self._data[self._position: stop] = items\n self._position += items.shape[0]\n return self", - "docstring": "extend the numpy array with multiple items, growing the wrapped array\n if necessary" - }, - { - "code": "def _write_to_error(self, s, truncate=False):\n with open(self._errorfile, 'w' if truncate else 'a') as fp:\n fp.writelines((to_text(s)), )", - "docstring": "Writes the given output to the error file, appending unless `truncate` is True." - }, - { - "code": "def communityvisibilitystate(self):\n if self._communityvisibilitystate == None:\n return None\n elif self._communityvisibilitystate in self.VisibilityState:\n return self.VisibilityState[self._communityvisibilitystate]\n else:\n return None", - "docstring": "Return the Visibility State of the Users Profile" - }, - { - "code": "def GetYearFromPosixTime(posix_time, timezone=pytz.UTC):\n datetime_object = datetime.datetime.fromtimestamp(posix_time, tz=timezone)\n return datetime_object.year", - "docstring": "Gets the year from a POSIX timestamp\n\n The POSIX time is the number of seconds since 1970-01-01 00:00:00 UTC.\n\n Args:\n posix_time: An integer containing the number of seconds since\n 1970-01-01 00:00:00 UTC.\n timezone: Optional timezone of the POSIX timestamp.\n\n Returns:\n The year of the POSIX timestamp.\n\n Raises:\n ValueError: If the posix timestamp is out of the range of supported values." - }, - { - "code": "def has_concluded(self, bigchain, current_votes=[]):\n if self.has_validator_set_changed(bigchain):\n return False\n election_pk = self.to_public_key(self.id)\n votes_committed = self.get_commited_votes(bigchain, election_pk)\n votes_current = self.count_votes(election_pk, current_votes)\n total_votes = sum(output.amount for output in self.outputs)\n if (votes_committed < (2/3) * total_votes) and \\\n (votes_committed + votes_current >= (2/3)*total_votes):\n return True\n return False", - "docstring": "Check if the election can be concluded or not.\n\n * Elections can only be concluded if the validator set has not changed\n since the election was initiated.\n * Elections can be concluded only if the current votes form a supermajority.\n\n Custom elections may override this function and introduce additional checks." - }, - { - "code": "def new_template(self, template_name=''):\n self._add_entry(templates.NEW_MODEL_TEMPLATE\n .format(template_name=template_name))", - "docstring": "Append a new template from .rft entry to the journal.\n\n This instructs Revit to create a new template model based on the\n provided .rft template.\n\n Args:\n template_name (str): optional full path to .rft template\n to be used. default value is " - }, - { - "code": "def _delete_vdev_info(self, vdev):\n vdev = vdev.lower()\n network_config_file_name = self._get_network_file()\n device = self._get_device_name(vdev)\n cmd = '\\n'.join((\"num=$(sed -n '/auto %s/=' %s)\" % (device,\n network_config_file_name),\n \"dns=$(awk 'NR==(\\\"\\'$num\\'\\\"+6)&&\"\n \"/dns-nameservers/' %s)\" %\n network_config_file_name,\n \"if [[ -n $dns ]]; then\",\n \" sed -i '/auto %s/,+6d' %s\" % (device,\n network_config_file_name),\n \"else\",\n \" sed -i '/auto %s/,+5d' %s\" % (device,\n network_config_file_name),\n \"fi\"))\n return cmd", - "docstring": "handle vdev related info." - }, - { - "code": "def _process_panel_configuration(self, config):\n try:\n dashboard = config.get('PANEL_DASHBOARD')\n if not dashboard:\n LOG.warning(\"Skipping %s because it doesn't have \"\n \"PANEL_DASHBOARD defined.\", config.__name__)\n return\n panel_slug = config.get('PANEL')\n dashboard_cls = self.get_dashboard(dashboard)\n panel_group = config.get('PANEL_GROUP')\n default_panel = config.get('DEFAULT_PANEL')\n if default_panel:\n dashboard_cls.default_panel = default_panel\n if config.get('REMOVE_PANEL', False):\n for panel in dashboard_cls.get_panels():\n if panel_slug == panel.slug:\n dashboard_cls.unregister(panel.__class__)\n elif config.get('ADD_PANEL', None):\n panel_path = config['ADD_PANEL']\n mod_path, panel_cls = panel_path.rsplit(\".\", 1)\n try:\n mod = import_module(mod_path)\n except ImportError as e:\n LOG.warning(\"Could not import panel module %(module)s: \"\n \"%(exc)s\", {'module': mod_path, 'exc': e})\n return\n panel = getattr(mod, panel_cls)\n if hasattr(panel, 'can_register') and \\\n callable(getattr(panel, 'can_register')):\n if not panel.can_register():\n LOG.debug(\"Load condition failed for panel: %(panel)s\",\n {'panel': panel_slug})\n return\n dashboard_cls.register(panel)\n if panel_group:\n dashboard_cls.get_panel_group(panel_group).\\\n panels.append(panel.slug)\n else:\n panels = list(dashboard_cls.panels)\n panels.append(panel)\n dashboard_cls.panels = tuple(panels)\n except Exception as e:\n LOG.warning('Could not process panel %(panel)s: %(exc)s',\n {'panel': panel_slug, 'exc': e})", - "docstring": "Add, remove and set default panels on the dashboard." - }, - { - "code": "def _retry(self, state):\n state.attempt += 1\n self.debug('Starting restart attempt: %d.', state.attempt)\n if self._cmp_strategy(RestartStrategy.buryme):\n self.debug('Agent %r is going to by buried according to his '\n 'last will.', state.factory.descriptor_type)\n return self._send_buried_notifications()\n else:\n f = self._set_restart_flag()\n f.add_callback(fiber.drop_param, self._send_died_notifications)\n f.add_both(self._ensure_someone_took_responsability)\n return f", - "docstring": "Starts a single try of the whole restart path." - }, - { - "code": "def to_signed_str(self, private, public, passphrase=None):\n from pyxmli import xmldsig\n try:\n from Crypto.PublicKey import RSA\n except ImportError:\n raise ImportError('PyCrypto 2.5 or more recent module is ' \\\n 'required to enable XMLi signing.\\n' \\\n 'Please visit: http://pycrypto.sourceforge.net/')\n if not isinstance(private, RSA._RSAobj):\n private = RSA.importKey(private.read(), passphrase=passphrase)\n if not isinstance(public, RSA._RSAobj):\n public = RSA.importKey(public.read())\n return to_unicode(xmldsig.sign(to_unicode(self.to_string()),\n private, public))", - "docstring": "Returns a signed version of the invoice.\n @param private:file Private key file-like object\n @param public:file Public key file-like object\n @param passphrase:str Private key passphrase if any.\n @return: str" - }, - { - "code": "def mode(self, mode):\n allowed_values = [\"test\", \"live\"]\n if mode is not None and mode not in allowed_values:\n raise ValueError(\n \"Invalid value for `mode` ({0}), must be one of {1}\"\n .format(mode, allowed_values)\n )\n self._mode = mode", - "docstring": "Sets the mode of this BraintreeGateway.\n\n\n :param mode: The mode of this BraintreeGateway.\n :type: str" - }, - { - "code": "def load_data(self):\n try:\n df = self.live_quote_arg_func(self.tickers)\n for index, ticker in enumerate(self.tickers):\n ticker_info = df.loc[index]\n self.ticker_dict[ticker].append(ticker_info['price'],\n ticker_info['volume'],\n ticker_info['amount'],\n ticker_info['time'])\n except Exception:\n raise ValueError('Polling thread exception')", - "docstring": "Overwrite this for new source data structures" - }, - { - "code": "def refresh_schema_metadata(self, max_schema_agreement_wait=None):\n if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True):\n raise DriverException(\"Schema metadata was not refreshed. See log for details.\")", - "docstring": "Synchronously refresh all schema metadata.\n\n By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`\n and :attr:`~.Cluster.control_connection_timeout`.\n\n Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.\n\n Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately.\n\n An Exception is raised if schema refresh fails for any reason." - }, - { - "code": "def full_size(self):\n self.dragpos = wx.Point(0, 0)\n self.zoom = 1.0\n self.need_redraw = True", - "docstring": "show image at full size" - }, - { - "code": "def integer(self, x):\n if type(x) is str:\n hex = binascii.unhexlify(x)\n return int.from_bytes(hex, 'big')\n return x.value if isinstance(x, FiniteField.Value) else x", - "docstring": "returns a plain integer" - }, - { - "code": "def ProduceAnalysisReport(self, plugin):\n analysis_report = plugin.CompileReport(self)\n if not analysis_report:\n return\n analysis_report.time_compiled = timelib.Timestamp.GetNow()\n plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name)\n if plugin_name:\n analysis_report.plugin_name = plugin_name\n if self._event_filter_expression:\n analysis_report.filter_string = self._event_filter_expression\n self._storage_writer.AddAnalysisReport(analysis_report)\n self.number_of_produced_analysis_reports += 1\n self.number_of_produced_event_tags = (\n self._storage_writer.number_of_event_tags)\n self.last_activity_timestamp = time.time()", - "docstring": "Produces an analysis report.\n\n Args:\n plugin (AnalysisPlugin): plugin." - }, - { - "code": "def makelib(self, remake=False, full=True, compiler=\"gfortran\", debug=False, profile=False):\n if self.link is None or remake:\n from os import path\n if self.library is not None:\n outpath = path.join(self.dirpath, \"{}.a\".format(self.library))\n else:\n outpath = path.join(self.dirpath, \"{}.a\".format(self.name))\n if not remake and path.isfile(outpath):\n self.link = outpath\n return\n dependencies = self._get_lib_modules(full)\n makepath = path.join(path.dirname(self.module.filepath), \"Makefile.ftypes\")\n if full:\n compileid = \"ftypes.all_libs\"\n identifier = self.library\n else:\n compileid = \"ftypes.{}_c\".format(self.module.name)\n identifier = self.module.name\n makefile(identifier, dependencies, makepath, compileid,\n self.module.precompile, False, self.module.parent, \"a\")\n code = self._compile(path.dirname(self.module.filepath), \"Makefile.ftypes\",\n compiler, debug, profile)\n if code == 0:\n self.link = path.join(path.dirname(self.module.filepath), \"{}.a\".format(identifier))\n self._copy_so(outpath)\n self.link = outpath\n else:\n self._copy_so(self.dirpath)", - "docstring": "Generates a makefile for the code files that reside in the same directory as the source\n that was parsed by the code parser.\n\n :arg full: when True, the shared library is compiled for *all* the code files in the directory\n not just the one's that are dependencies of the source module." - }, - { - "code": "def extract_guts(image_path,\n tar,\n file_filter=None,\n tag_root=True,\n include_sizes=True):\n if file_filter is None:\n file_filter = get_level('IDENTICAL')\n results = dict()\n digest = dict()\n allfiles = []\n if tag_root:\n roots = dict()\n if include_sizes: \n sizes = dict()\n for member in tar:\n member_name = member.name.replace('.','',1)\n allfiles.append(member_name)\n included = False\n if member.isdir() or member.issym():\n continue\n elif assess_content(member,file_filter):\n digest[member_name] = extract_content(image_path, member.name, return_hash=True)\n included = True\n elif include_file(member,file_filter):\n hasher = hashlib.md5()\n buf = member.tobuf()\n hasher.update(buf)\n digest[member_name] = hasher.hexdigest()\n included = True\n if included:\n if include_sizes:\n sizes[member_name] = member.size\n if tag_root:\n roots[member_name] = is_root_owned(member)\n results['all'] = allfiles\n results['hashes'] = digest\n if include_sizes:\n results['sizes'] = sizes\n if tag_root:\n results['root_owned'] = roots\n return results", - "docstring": "extract the file guts from an in memory tarfile. The file is not closed.\n This should not be done for large images." - }, - { - "code": "def _calculate_expires(self):\n self._backend_client.expires = None\n now = datetime.utcnow()\n self._backend_client.expires = now + timedelta(seconds=self._config.timeout)", - "docstring": "Calculates the session expiry using the timeout" - }, - { - "code": "def seventh(note, key):\n return triad(note, key) + [intervals.seventh(note, key)]", - "docstring": "Return the seventh chord on note in key.\n\n Example:\n >>> seventh('C', 'C')\n ['C', 'E', 'G', 'B']" - }, - { - "code": "def rank(self):\n rank = ctypes.c_int()\n check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))\n return rank.value", - "docstring": "Returns the rank of this worker node.\n\n Returns\n -------\n rank : int\n The rank of this node, which is in range [0, num_workers())" - }, - { - "code": "def nf_step_to_process(step, out_handle):\n pprint.pprint(step)\n directives = []\n for req in step[\"task_definition\"][\"requirements\"]:\n if req[\"requirement_type\"] == \"docker\":\n directives.append(\"container '%s'\" % req[\"value\"])\n elif req[\"requirement_type\"] == \"cpu\":\n directives.append(\"cpus %s\" % req[\"value\"])\n elif req[\"requirement_type\"] == \"memory\":\n directives.append(\"memory '%s'\" % req[\"value\"])\n task_id = step[\"task_id\"]\n directives = \"\\n \".join(directives)\n inputs = \"\\n \".join(nf_io_to_process(step[\"inputs\"], step[\"task_definition\"][\"inputs\"],\n step[\"scatter\"]))\n outputs = \"\\n \".join(nf_io_to_process(step[\"outputs\"], step[\"task_definition\"][\"outputs\"]))\n commandline = (step[\"task_definition\"][\"baseCommand\"] + \" \" +\n \" \".join([nf_input_to_cl(i) for i in step[\"task_definition\"][\"inputs\"]]))\n out_handle.write(_nf_process_tmpl.format(**locals()))", - "docstring": "Convert CWL step into a nextflow process." - }, - { - "code": "def waypoint_request_send(self, seq):\n if self.mavlink10():\n self.mav.mission_request_send(self.target_system, self.target_component, seq)\n else:\n self.mav.waypoint_request_send(self.target_system, self.target_component, seq)", - "docstring": "wrapper for waypoint_request_send" - }, - { - "code": "def apt_cache(in_memory=True, progress=None):\n from apt import apt_pkg\n apt_pkg.init()\n if in_memory:\n apt_pkg.config.set(\"Dir::Cache::pkgcache\", \"\")\n apt_pkg.config.set(\"Dir::Cache::srcpkgcache\", \"\")\n return apt_pkg.Cache(progress)", - "docstring": "Build and return an apt cache." - }, - { - "code": "def _check_time_fn(self, time_instance=False):\n if time_instance and not isinstance(self.time_fn, param.Time):\n raise AssertionError(\"%s requires a Time object\"\n % self.__class__.__name__)\n if self.time_dependent:\n global_timefn = self.time_fn is param.Dynamic.time_fn\n if global_timefn and not param.Dynamic.time_dependent:\n raise AssertionError(\"Cannot use Dynamic.time_fn as\"\n \" parameters are ignoring time.\")", - "docstring": "If time_fn is the global time function supplied by\n param.Dynamic.time_fn, make sure Dynamic parameters are using\n this time function to control their behaviour.\n\n If time_instance is True, time_fn must be a param.Time instance." - }, - { - "code": "def _reorderForPreference(themeList, preferredThemeName):\n for theme in themeList:\n if preferredThemeName == theme.themeName:\n themeList.remove(theme)\n themeList.insert(0, theme)\n return", - "docstring": "Re-order the input themeList according to the preferred theme.\n\n Returns None." - }, - { - "code": "def download_highlights(self,\n user: Union[int, Profile],\n fast_update: bool = False,\n filename_target: Optional[str] = None,\n storyitem_filter: Optional[Callable[[StoryItem], bool]] = None) -> None:\n for user_highlight in self.get_highlights(user):\n name = user_highlight.owner_username\n self.context.log(\"Retrieving highlights \\\"{}\\\" from profile {}\".format(user_highlight.title, name))\n totalcount = user_highlight.itemcount\n count = 1\n for item in user_highlight.get_items():\n if storyitem_filter is not None and not storyitem_filter(item):\n self.context.log(\"<{} skipped>\".format(item), flush=True)\n continue\n self.context.log(\"[%3i/%3i] \" % (count, totalcount), end=\"\", flush=True)\n count += 1\n with self.context.error_catcher('Download highlights \\\"{}\\\" from user {}'.format(user_highlight.title, name)):\n downloaded = self.download_storyitem(item, filename_target\n if filename_target\n else '{}/{}'.format(name, user_highlight.title))\n if fast_update and not downloaded:\n break", - "docstring": "Download available highlights from a user whose ID is given.\n To use this, one needs to be logged in.\n\n .. versionadded:: 4.1\n\n :param user: ID or Profile of the user whose highlights should get downloaded.\n :param fast_update: If true, abort when first already-downloaded picture is encountered\n :param filename_target: Replacement for {target} in dirname_pattern and filename_pattern\n or None if profile name and the highlights' titles should be used instead\n :param storyitem_filter: function(storyitem), which returns True if given StoryItem should be downloaded" - }, - { - "code": "def device_measurement(device,\n ts=None,\n part=None,\n result=None,\n code=None,\n **kwargs):\n if ts is None:\n ts = local_now()\n payload = MeasurementPayload(device=device, part=part)\n m = Measurement(ts, result, code, list(kwargs))\n payload.measurements.append(m)\n m.add_sample(ts, **kwargs)\n return dumps(payload)", - "docstring": "Returns a JSON MeasurementPayload ready to be send through a\n transport.\n\n If `ts` is not given, the current time is used. `part` is an\n optional `Part` object, and `result` and `code` are the respective\n fields of the `Measurement` object. All other arguments are\n interpreted as dimensions.\n\n Minimal example, using a `Device` object to send two\n measurements:\n\n >>> d = Device(\"12345\")\n >>> def publish(msg):\n ... pass\n >>> publish(d.measurement(temperature=22.8))\n >>> publish(d.measurement(pressure=4.1))" - }, - { - "code": "def _do_help(self, cmd, args):\n print(self.doc_string())\n print()\n data_unsorted = []\n cls = self.__class__\n for name in dir(cls):\n obj = getattr(cls, name)\n if iscommand(obj):\n cmds = []\n for cmd in getcommands(obj):\n cmds.append(cmd)\n cmd_str = ','.join(sorted(cmds))\n doc_str = textwrap.dedent(obj.__doc__).strip() if obj.__doc__ else \\\n '(no doc string available)'\n data_unsorted.append([cmd_str, doc_str])\n data_sorted = sorted(data_unsorted, key = lambda x: x[0])\n data = [['COMMANDS', 'DOC STRING']] + data_sorted\n table_banner = 'List of Available Commands'\n table = terminaltables.SingleTable(data, table_banner)\n table.inner_row_border = True\n table.inner_heading_row_border = True\n print(table.table)", - "docstring": "Display doc strings of the shell and its commands." - }, - { - "code": "async def issueAccumulator(self, schemaId: ID, iA,\n L) -> AccumulatorPublicKey:\n accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(\n schemaId, iA, L)\n accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,\n accumPK=accPK,\n accum=accum, tails=tails)\n await self.wallet.submitAccumSecret(schemaId=schemaId,\n accumSK=accSK)\n return accPK", - "docstring": "Issues and submits an accumulator used for non-revocation proof.\n\n :param schemaId: The schema ID (reference to claim\n definition schema)\n :param iA: accumulator ID\n :param L: maximum number of claims within accumulator.\n :return: Submitted accumulator public key" - }, - { - "code": "def delete_matching_path(self):\n logger.info(\n \"Deleting all mails with file path matching the {} regexp...\"\n \"\".format(self.conf.regexp.pattern))\n candidates = [\n mail for mail in self.pool\n if re.search(self.conf.regexp, mail.path)]\n if len(candidates) == self.size:\n logger.warning(\n \"Skip deletion: all {} mails matches the rexexp.\".format(\n self.size))\n return\n logger.info(\n \"{} candidates found for deletion.\".format(len(candidates)))\n for mail in candidates:\n self.delete(mail)", - "docstring": "Delete all duplicates whose file path match the regexp." - }, - { - "code": "def sum_of_squares(obs, pred):\n return np.sum((np.array(obs) - np.array(pred)) ** 2)", - "docstring": "Sum of squares between observed and predicted data\n\n Parameters\n ----------\n obs : iterable\n Observed data\n pred : iterable\n Predicted data\n\n Returns\n -------\n float\n Sum of squares\n\n Notes\n -----\n The length of observed and predicted data must match." - }, - { - "code": "def normalize_route(route: str) -> str:\n normalized_route = str(route).lstrip('^').rstrip('$').rstrip('?')\n normalized_route = normalized_route.replace('<', '(').replace('>', ')')\n return normalized_route", - "docstring": "Strip some of the ugly regexp characters from the given pattern.\n\n >>> normalize_route('^/user//?$')\n u'/user/(user_id:int)/'" - }, - { - "code": "def get_string(self):\n return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start)", - "docstring": "A string representation of the junction\n\n :return: string represnetation\n :rtype: string" - }, - { - "code": "def get_lookup(self, operator):\n try:\n return self._lookups[operator]\n except KeyError:\n raise NotImplementedError(\"Lookup operator '{}' is not supported\".format(operator))", - "docstring": "Look up a lookup.\n\n :param operator: Name of the lookup operator" - }, - { - "code": "def _set_data(self, **kwargs):\n if \"shape\" in kwargs:\n self.shape = kwargs[\"shape\"]\n if \"grid\" in kwargs:\n self.dict_grid.clear()\n self.dict_grid.update(kwargs[\"grid\"])\n if \"attributes\" in kwargs:\n self.attributes[:] = kwargs[\"attributes\"]\n if \"row_heights\" in kwargs:\n self.row_heights = kwargs[\"row_heights\"]\n if \"col_widths\" in kwargs:\n self.col_widths = kwargs[\"col_widths\"]\n if \"macros\" in kwargs:\n self.macros = kwargs[\"macros\"]", - "docstring": "Sets data from given parameters\n\n Old values are deleted.\n If a paremeter is not given, nothing is changed.\n\n Parameters\n ----------\n\n shape: 3-tuple of Integer\n \\tGrid shape\n grid: Dict of 3-tuples to strings\n \\tCell content\n attributes: List of 3-tuples\n \\tCell attributes\n row_heights: Dict of 2-tuples to float\n \\t(row, tab): row_height\n col_widths: Dict of 2-tuples to float\n \\t(col, tab): col_width\n macros: String\n \\tMacros from macro list" - }, - { - "code": "def _getPowerupInterfaces(self):\n powerupInterfaces = getattr(self.__class__, \"powerupInterfaces\", ())\n pifs = []\n for x in powerupInterfaces:\n if isinstance(x, type(Interface)):\n pifs.append((x, 0))\n else:\n pifs.append(x)\n m = getattr(self, \"__getPowerupInterfaces__\", None)\n if m is not None:\n pifs = m(pifs)\n try:\n pifs = [(i, p) for (i, p) in pifs]\n except ValueError:\n raise ValueError(\"return value from %r.__getPowerupInterfaces__\"\n \" not an iterable of 2-tuples\" % (self,))\n return pifs", - "docstring": "Collect powerup interfaces this object declares that it can be\n installed on." - }, - { - "code": "def handler(self, scheme_name=None):\n\t\tif scheme_name is None:\n\t\t\treturn self.__default_handler_cls\n\t\tfor handler in self.__handlers_cls:\n\t\t\tif handler.scheme_specification().scheme_name() == scheme_name:\n\t\t\t\treturn handler", - "docstring": "Return handler which scheme name matches the specified one\n\n\t\t:param scheme_name: scheme name to search for\n\t\t:return: WSchemeHandler class or None (if matching handler was not found)" - }, - { - "code": "def get_archive_part_value(self, part):\n parts_dict = {'year': '%Y',\n 'month': self.month_format,\n 'week': self.week_format,\n 'day': '%d'}\n if self.today is None:\n today = timezone.now()\n if timezone.is_aware(today):\n today = timezone.localtime(today)\n self.today = today\n return self.today.strftime(parts_dict[part])", - "docstring": "Return archive part for today" - }, - { - "code": "def get_workers(self, status=None, chosen_hits=None, assignment_ids=None, all_studies=False):\n if assignment_ids:\n workers = [self.get_worker(assignment_id) for assignment_id in assignment_ids]\n else:\n workers = self.amt_services.get_workers(assignment_status=status, chosen_hits=chosen_hits)\n if workers is False:\n raise Exception('*** failed to get workers')\n if not all_studies:\n my_hitids = self._get_my_hitids()\n workers = [worker for worker in workers if worker['hitId'] in my_hitids]\n workers = [self.add_bonus(worker) for worker in workers]\n return workers", - "docstring": "Status, if set, can be one of `Submitted`, `Approved`, or `Rejected`" - }, - { - "code": "def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None):\n if type(symbol_name) is not int:\n sym = self.loader.find_symbol(symbol_name)\n if sym is None:\n new_sym = None\n for reloc in self.loader.find_relevant_relocations(symbol_name):\n if not reloc.symbol.is_weak:\n raise Exception(\"Symbol is strong but we couldn't find its resolution? Report to @rhelmot.\")\n if new_sym is None:\n new_sym = self.loader.extern_object.make_extern(symbol_name)\n reloc.resolve(new_sym)\n reloc.relocate([])\n if new_sym is None:\n l.error(\"Could not find symbol %s\", symbol_name)\n return None\n sym = new_sym\n basic_addr = sym.rebased_addr\n else:\n basic_addr = symbol_name\n symbol_name = None\n hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr)\n self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace)\n return hook_addr", - "docstring": "Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that\n address. If the symbol was not available in the loaded libraries, this address may be provided\n by the CLE externs object.\n\n Additionally, if instead of a symbol name you provide an address, some secret functionality will\n kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some\n yet-unknown scary ABI that has its function pointers point to something other than the actual\n functions, in which case it'll do the right thing.\n\n :param symbol_name: The name of the dependency to resolve.\n :param simproc: The SimProcedure instance (or function) with which to hook the symbol\n :param kwargs: If you provide a SimProcedure for the hook, these are the keyword\n arguments that will be passed to the procedure's `run` method\n eventually.\n :param replace: Control the behavior on finding that the address is already hooked. If\n true, silently replace the hook. If false, warn and do not replace the\n hook. If none (default), warn and replace the hook.\n :returns: The address of the new symbol.\n :rtype: int" - }, - { - "code": "def shortcut_app_id(shortcut):\n algorithm = Crc(width = 32, poly = 0x04C11DB7, reflect_in = True, xor_in = 0xffffffff, reflect_out = True, xor_out = 0xffffffff)\n crc_input = ''.join([shortcut.exe,shortcut.name])\n high_32 = algorithm.bit_by_bit(crc_input) | 0x80000000\n full_64 = (high_32 << 32) | 0x02000000\n return str(full_64)", - "docstring": "Generates the app id for a given shortcut. Steam uses app ids as a unique\n identifier for games, but since shortcuts dont have a canonical serverside\n representation they need to be generated on the fly. The important part\n about this function is that it will generate the same app id as Steam does\n for a given shortcut" - }, - { - "code": "def FlushShortIdRecords(site_service):\n szService = c_char_p(site_service.encode('utf-8'))\n szMessage = create_string_buffer(b\" \")\n nMessage = c_ushort(20)\n nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),\n nMessage)\n return str(nRet) + szMessage.value.decode('utf-8')", - "docstring": "Flush all the queued records.\n\n :param site_service: The site.service where data was pushed\n :return: message whether function was successful" - }, - { - "code": "def get_fixture(self, fixture_id, head2head=None):\n filters = []\n if head2head is not None and int(head2head) > 0:\n self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.')\n filters.append(self.__createFilter('head2head', head2head))\n else:\n self.logger.debug(f'Getting fixture {fixture_id}.')\n return self._request('fixtures', fixture_id, filters=filters)", - "docstring": "Loads a single fixture.\n\n Args:\n * fixture_id (str): the id of the fixture\n * head2head (int, optional): load the previous n fixture of the two teams\n\n Returns:\n * :obj: json: the fixture-json" - }, - { - "code": "def get_gradebook_column_admin_session(self):\n if not self.supports_gradebook_column_admin():\n raise errors.Unimplemented()\n return sessions.GradebookColumnAdminSession(runtime=self._runtime)", - "docstring": "Gets the ``OsidSession`` associated with the gradebook column administration service.\n\n return: (osid.grading.GradebookColumnAdminSession) - a\n ``GradebookColumnAdminSession``\n raise: OperationFailed - unable to complete request\n raise: Unimplemented - ``supports_gradebook_column_admin()`` is\n ``false``\n *compliance: optional -- This method must be implemented if\n ``supports_gradebook_column_admin()`` is ``true``.*" - }, - { - "code": "def out(self):\n out = \"\"\n if self.use_sentinel:\n out += sentinel_var + \" = _coconut.object()\\n\"\n closes = 0\n for checks, defs in self.checkdefs:\n if checks:\n out += \"if \" + paren_join(checks, \"and\") + \":\\n\" + openindent\n closes += 1\n if defs:\n out += \"\\n\".join(defs) + \"\\n\"\n return out + (\n self.check_var + \" = True\\n\"\n + closeindent * closes\n + \"\".join(other.out() for other in self.others)\n + (\n \"if \" + self.check_var + \" and not (\"\n + paren_join(self.guards, \"and\")\n + \"):\\n\" + openindent\n + self.check_var + \" = False\\n\" + closeindent\n if self.guards else \"\"\n )\n )", - "docstring": "Return pattern-matching code." - }, - { - "code": "def execution():\n client = salt.client.get_local_client(__opts__['conf_file'])\n docs = {}\n try:\n for ret in client.cmd_iter('*', 'sys.doc', timeout=__opts__['timeout']):\n for v in six.itervalues(ret):\n docs.update(v)\n except SaltClientError as exc:\n print(exc)\n return []\n i = itertools.chain.from_iterable([six.iteritems(docs['ret'])])\n ret = dict(list(i))\n return ret", - "docstring": "Collect all the sys.doc output from each minion and return the aggregate\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-run doc.execution" - }, - { - "code": "def _update_zone(self, zone, status=None):\n if not zone in self._zones:\n raise IndexError('Zone does not exist and cannot be updated: %d', zone)\n old_status = self._zones[zone].status\n if status is None:\n status = old_status\n self._zones[zone].status = status\n self._zones[zone].timestamp = time.time()\n if status == Zone.CLEAR:\n if zone in self._zones_faulted:\n self._zones_faulted.remove(zone)\n self.on_restore(zone=zone)\n else:\n if old_status != status and status is not None:\n self.on_fault(zone=zone)", - "docstring": "Updates a zones status.\n\n :param zone: zone number\n :type zone: int\n :param status: zone status\n :type status: int\n\n :raises: IndexError" - }, - { - "code": "def POST_timegrid(self) -> None:\n init = hydpy.pub.timegrids.init\n sim = hydpy.pub.timegrids.sim\n sim.firstdate = self._inputs['firstdate']\n sim.lastdate = self._inputs['lastdate']\n state.idx1 = init[sim.firstdate]\n state.idx2 = init[sim.lastdate]", - "docstring": "Change the current simulation |Timegrid|." - }, - { - "code": "def compile_regex_from_str(self, ft_str):\n sequence = []\n for m in re.finditer(r'\\[([^]]+)\\]', ft_str):\n ft_mask = fts(m.group(1))\n segs = self.all_segs_matching_fts(ft_mask)\n sub_pat = '({})'.format('|'.join(segs))\n sequence.append(sub_pat)\n pattern = ''.join(sequence)\n regex = re.compile(pattern)\n return regex", - "docstring": "Given a string describing features masks for a sequence of segments,\n return a regex matching the corresponding strings.\n\n Args:\n ft_str (str): feature masks, each enclosed in square brackets, in\n which the features are delimited by any standard delimiter.\n\n Returns:\n Pattern: regular expression pattern equivalent to `ft_str`" - }, - { - "code": "def energy(data):\n data = np.mean(data, axis=1)\n return np.sum(data ** 2) / np.float64(len(data))", - "docstring": "Computes signal energy of data" - }, - { - "code": "def _prepare_results(self, results):\n if self._instances:\n results = self._to_instances(results)\n else:\n results = list(results)\n self._len = len(results)\n return results", - "docstring": "Called in _collection to prepare results from redis before returning\n them." - }, - { - "code": "def remap_index_fn(ref_file):\n return os.path.join(os.path.dirname(os.path.dirname(ref_file)), \"star\")", - "docstring": "Map sequence references to equivalent star indexes" - }, - { - "code": "def _check_rules(browser, rules_js, config):\n audit_run_script = dedent(u\n).format(\n rules_js=rules_js,\n custom_rules=config.custom_rules,\n context=config.context,\n options=config.rules\n )\n audit_results_script = dedent(u\n)\n browser.execute_script(audit_run_script)\n def audit_results_check_func():\n unicode_results = browser.execute_script(audit_results_script)\n try:\n results = json.loads(unicode_results)\n except (TypeError, ValueError):\n results = None\n if results:\n return True, results\n return False, None\n result = Promise(\n audit_results_check_func,\n \"Timed out waiting for a11y audit results.\",\n timeout=5,\n ).fulfill()\n audit_results = result.get('violations')\n return audit_results", - "docstring": "Run an accessibility audit on the page using the axe-core ruleset.\n\n Args:\n browser: a browser instance.\n rules_js: the ruleset JavaScript as a string.\n config: an AxsAuditConfig instance.\n\n Returns:\n A list of violations.\n\n Related documentation:\n\n https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object\n\n __Caution__: You probably don't really want to call this method\n directly! It will be used by `AxeCoreAudit.do_audit`." - }, - { - "code": "def prior_dates(*args, **kwargs):\n try:\n chron = args[0]\n except IndexError:\n chron = kwargs['coredates']\n d_r = np.array(kwargs['d_r'])\n d_std = np.array(kwargs['d_std'])\n t_a = np.array(kwargs['t_a'])\n t_b = np.array(kwargs['t_b'])\n try:\n normal_distr = kwargs['normal_distr']\n except KeyError:\n normal_distr = None\n cc_int = kwargs['cc']\n ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13',\n 3: 'SHCal13', 4: 'ConstCal'}\n if 'cc1' in kwargs:\n ccdict[1] = str(kwargs['cc1'])\n if 'cc2' in kwargs:\n ccdict[2] = str(kwargs['cc2'])\n if 'cc3' in kwargs:\n ccdict[3] = str(kwargs['cc3'])\n if 'cc4' in kwargs:\n ccdict[4] = str(kwargs['cc4'])\n cc = []\n for i in cc_int:\n i = int(i)\n cc.append(fetch_calibcurve(ccdict[i]))\n d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std,\n t_a=t_a, t_b=t_b, normal_distr=normal_distr)\n return d, p", - "docstring": "Get the prior distribution of calibrated radiocarbon dates" - }, - { - "code": "def appendRow(self, *args, **kwargs):\n\t\trow = self.RowType(*args, **kwargs)\n\t\tself.append(row)\n\t\treturn row", - "docstring": "Create and append a new row to this table, then return it\n\n\t\tAll positional and keyword arguments are passed to the RowType\n\t\tconstructor for this table." - }, - { - "code": "def register_type(self, oids, name, casting):\n assert type(oids) is tuple\n assert isinstance(name, basestring)\n assert hasattr(casting, '__call__')\n self._register_types.append((oids, name, casting))\n psycopg2.extensions.register_type(psycopg2.extensions.new_type(oids, name, casting))", - "docstring": "Callback to register data types when reconnect" - }, - { - "code": "def get_list_from_model(learn:Learner, ds_type:DatasetType, batch:Tuple)->[]:\n \"Factory method to convert a batch of model images to a list of ModelImageSet.\"\n image_sets = []\n x,y = batch[0],batch[1]\n preds = learn.pred_batch(ds_type=ds_type, batch=(x,y), reconstruct=True) \n for orig_px, real_px, gen in zip(x,y,preds):\n orig, real = Image(px=orig_px), Image(px=real_px)\n image_set = ModelImageSet(orig=orig, real=real, gen=gen)\n image_sets.append(image_set)\n return image_sets", - "docstring": "Factory method to convert a batch of model images to a list of ModelImageSet." - }, - { - "code": "def _get_distset(tgt):\n tgtattrs = tgt.split('-')\n if tgtattrs[0] == 'amzn':\n distset = '--define \"dist .{0}1\"'.format(tgtattrs[0])\n elif tgtattrs[1] in ['6', '7']:\n distset = '--define \"dist .el{0}\"'.format(tgtattrs[1])\n else:\n distset = ''\n return distset", - "docstring": "Get the distribution string for use with rpmbuild and mock" - }, - { - "code": "def projectname(self):\n if self._projectname is None:\n exps = self.config.experiments\n if self._experiment is not None and self._experiment in exps:\n return exps[self._experiment]['project']\n try:\n self._projectname = list(self.config.projects.keys())[-1]\n except IndexError:\n raise ValueError(\n \"No experiment has yet been created! Please run setup \"\n \"before.\")\n return self._projectname", - "docstring": "The name of the project that is currently processed" - }, - { - "code": "def import_module(module_path):\n if six.PY2:\n try:\n return importlib.import_module(module_path)\n except ImportError:\n tb = sys.exc_info()[2]\n stack = traceback.extract_tb(tb, 3)\n if len(stack) > 2:\n raise\n else:\n from importlib import find_loader\n if find_loader(module_path):\n return importlib.import_module(module_path)", - "docstring": "Try to import and return the given module, if it exists, None if it doesn't\n exist\n\n :raises ImportError: When imported module contains errors" - }, - { - "code": "def text(self, path, compression=None, lineSep=None):\n self._set_opts(compression=compression, lineSep=lineSep)\n self._jwrite.text(path)", - "docstring": "Saves the content of the DataFrame in a text file at the specified path.\n The text files will be encoded as UTF-8.\n\n :param path: the path in any Hadoop supported file system\n :param compression: compression codec to use when saving to file. This can be one of the\n known case-insensitive shorten names (none, bzip2, gzip, lz4,\n snappy and deflate).\n :param lineSep: defines the line separator that should be used for writing. If None is\n set, it uses the default value, ``\\\\n``.\n\n The DataFrame must have only one column that is of string type.\n Each row becomes a new line in the output file." - }, - { - "code": "def git_iter(sep, *args, git=maybeloggit, **kwargs):\n 'Generator of chunks of stdout from given git command, delineated by sep character'\n bufsize = 512\n err = io.StringIO()\n chunks = []\n try:\n for data in git('--no-pager', *args, _decode_errors='replace', _out_bufsize=bufsize, _iter=True, _err=err, **kwargs):\n while True:\n i = data.find(sep)\n if i < 0:\n break\n chunks.append(data[:i])\n data = data[i+1:]\n yield ''.join(chunks)\n chunks.clear()\n chunks.append(data)\n except sh.ErrorReturnCode as e:\n status('exit_code=%s' % e.exit_code)\n r = ''.join(chunks)\n if r:\n yield r\n errlines = err.getvalue().splitlines()\n if len(errlines) < 3:\n for line in errlines:\n status(line)\n else:\n vd().push(TextSheet('git ' + ' '.join(args), errlines))", - "docstring": "Generator of chunks of stdout from given git command, delineated by sep character" - }, - { - "code": "def set(self, name, value, index=-1):\n if isinstance(value, ElementProxy):\n value = value[0].to_er7()\n name = name.upper()\n reference = None if name is None else self.element.find_child_reference(name)\n child_ref, child_name = (None, None) if reference is None else (reference['ref'], reference['name'])\n if isinstance(value, basestring):\n child = self.element.parse_child(value, child_name=child_name, reference=child_ref)\n elif isinstance(value, Element):\n child = value\n elif isinstance(value, BaseDataType):\n child = self.create_element(name, False, reference)\n child.value = value\n else:\n raise ChildNotValid(value, child_name)\n if child.name != child_name:\n raise ChildNotValid(value, child_name)\n child_to_remove = self.child_at_index(child_name, index)\n if child_to_remove is None:\n self.append(child)\n else:\n self.replace_child(child_to_remove, child)\n self.element.set_parent_to_traversal()", - "docstring": "Assign the ``value`` to the child having the given ``name`` at the ``index`` position\n\n :type name: ``str``\n :param name: the child name (e.g. PID)\n\n :type value: an instance of :class:`Element `, a `str` or an instance of\n :class:`ElementProxy `\n :param value: the child value\n\n :type index: ``int``\n :param index: the child position (e.g. 1)" - }, - { - "code": "def convert_value_to_es(value, ranges, obj, method=None):\n def sub_convert(val):\n if isinstance(val, BaseRdfDataType):\n return val.to_json\n elif isinstance(value, __MODULE__.rdfclass.RdfClassBase):\n return val.subject.sparql_uri\n return val\n if method == \"missing_obj\":\n rtn_obj = {\n \"rdf_type\": [rng.sparql_uri for rng in ranges],\n \"label\": [getattr(obj, label)[0] \\\n for label in LABEL_FIELDS \\\n if hasattr(obj, label)][0]}\n try:\n rtn_obj['uri'] = value.sparql_uri\n rtn_obj[\"rdfs_label\"] = NSM.nouri(value.sparql_uri)\n except AttributeError:\n rtn_obj['uri'] = \"None Specified\"\n rtn_obj['rdfs_label'] = sub_convert(value)\n rtn_obj['value'] = rtn_obj['rdfs_label']\n return rtn_obj\n return sub_convert(value)", - "docstring": "Takes an value and converts it to an elasticsearch representation\n\n args:\n value: the value to convert\n ranges: the list of ranges\n method: convertion method to use\n 'None': default -> converts the value to its json value\n 'missing_obj': adds attributes as if the value should have\n been a rdfclass object" - }, - { - "code": "def delete_api_key(self, api_key_id):\n api = self._get_api(iam.DeveloperApi)\n api.delete_api_key(api_key_id)\n return", - "docstring": "Delete an API key registered in the organisation.\n\n :param str api_key_id: The ID of the API key (Required)\n :returns: void" - }, - { - "code": "def get_all_active(self):\n now = timezone.now()\n return self.select_related().filter(active_datetime__lte=now,\n inactive_datetime__gte=now).order_by('active_datetime')", - "docstring": "Get all of the active messages ordered by the active_datetime." - }, - { - "code": "def set_miter_limit(self, limit):\n cairo.cairo_set_miter_limit(self._pointer, limit)\n self._check_status()", - "docstring": "Sets the current miter limit within the cairo context.\n\n If the current line join style is set to :obj:`MITER `\n (see :meth:`set_line_join`),\n the miter limit is used to determine\n whether the lines should be joined with a bevel instead of a miter.\n Cairo divides the length of the miter by the line width.\n If the result is greater than the miter limit,\n the style is converted to a bevel.\n\n As with the other stroke parameters,\n the current line cap style is examined by\n :meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,\n but does not have any effect during path construction.\n\n The default miter limit value is 10.0,\n which will convert joins with interior angles less than 11 degrees\n to bevels instead of miters.\n For reference,\n a miter limit of 2.0 makes the miter cutoff at 60 degrees,\n and a miter limit of 1.414 makes the cutoff at 90 degrees.\n\n A miter limit for a desired angle can be computed as:\n ``miter_limit = 1. / sin(angle / 2.)``\n\n :param limit: The miter limit to set.\n :type limit: float" - }, - { - "code": "def _init_item_marks(item_marks):\n if isinstance(item_marks, dict):\n return item_marks\n if item_marks:\n return {item_id:'>' for item_id in item_marks}", - "docstring": "Initialize the makred item dict." - }, - { - "code": "def read_label_file(path):\n labels = []\n for record in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=3):\n value = ''\n if len(record) > 2:\n value = str(record[2])\n labels.append([float(_clean_time(record[0])), float(_clean_time(record[1])), value])\n return labels", - "docstring": "Read the labels from an audacity label file.\n\n Args:\n path (str): Path to the label file.\n\n Returns:\n list: List of labels (start [sec], end [sec], label)\n\n Example::\n\n >>> read_label_file('/path/to/label/file.txt')\n [\n [0.0, 0.2, 'sie'],\n [0.2, 2.2, 'hallo']\n ]" - }, - { - "code": "def get_events_for_subscription(access_token, subscription_id, start_timestamp):\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/microsoft.insights/eventtypes/management/values?api-version=',\n INSIGHTS_API, '&$filter=eventTimestamp ge \\'', start_timestamp, '\\''])\n return do_get(endpoint, access_token)", - "docstring": "Get the insights evens for a subsctipion since the specific timestamp.\n\n Args:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'.\n Returns:\n HTTP response. JSON body of insights events." - }, - { - "code": "def finish():\n pretend = context.get('pretend', False)\n if not pretend and (git.staged() or git.unstaged()):\n log.err(\n \"You have uncommitted changes in your repo!\\n\"\n \"You need to stash them before you merge the hotfix branch\"\n )\n sys.exit(1)\n develop = conf.get('git.devel_branch', 'develop')\n master = conf.get('git.master_branch', 'master')\n branch = git.current_branch(refresh=True)\n common.assert_branch_type('hotfix')\n common.git_checkout(master)\n common.git_pull(master)\n common.git_merge(master, branch.name)\n common.git_checkout(develop)\n common.git_pull(develop)\n common.git_merge(develop, branch.name)\n common.git_branch_delete(branch.name)\n common.git_prune()\n common.git_checkout(master)", - "docstring": "Merge current feature into develop." - }, - { - "code": "def stripped_name(self):\n name = self.name\n while True:\n name, n = self._parenthesis_re.subn('', name)\n if not n:\n break\n name = self._const_re.sub('', name)\n while True:\n name, n = self._angles_re.subn('', name)\n if not n:\n break\n return name", - "docstring": "Remove extraneous information from C++ demangled function names." - }, - { - "code": "def create(dataset, transformers):\n err_msg = \"The parameters 'transformers' must be a valid Transformer object.\"\n cls = transformers.__class__\n _raise_error_if_not_sframe(dataset, \"dataset\")\n if (cls == list):\n transformers = TransformerChain(transformers)\n else:\n if not issubclass(cls, TransformerBase):\n raise TypeError(err_msg)\n transformers.fit(dataset)\n return transformers", - "docstring": "Create a Transformer object to transform data for feature engineering.\n\n Parameters\n ----------\n dataset : SFrame\n The dataset to use for training the model.\n\n transformers: Transformer | list[Transformer]\n An Transformer or a list of Transformers.\n\n See Also\n --------\n turicreate.toolkits.feature_engineering._feature_engineering._TransformerBase\n\n Examples\n --------\n\n .. sourcecode:: python\n\n # Create data.\n >>> sf = turicreate.SFrame({'a': [1,2,3], 'b' : [2,3,4]})\n\n >>> from turicreate.feature_engineering import FeatureHasher, \\\n QuadraticFeatures, OneHotEncoder\n\n # Create a single transformer.\n >>> encoder = turicreate.feature_engineering.create(sf,\n OneHotEncoder(max_categories = 10))\n\n # Create a chain of transformers.\n >>> chain = turicreate.feature_engineering.create(sf, [\n QuadraticFeatures(),\n FeatureHasher()\n ])\n\n # Create a chain of transformers with names for each of the steps.\n >>> chain = turicreate.feature_engineering.create(sf, [\n ('quadratic', QuadraticFeatures()),\n ('hasher', FeatureHasher())\n ])" - }, - { - "code": "def lp10(self, subset_k, subset_p, weights={}):\n if self._z is None:\n self._add_minimization_vars()\n positive = set(subset_k) - self._flipped\n negative = set(subset_k) & self._flipped\n v = self._v.set(positive)\n cs = self._prob.add_linear_constraints(v >= self._epsilon)\n self._temp_constr.extend(cs)\n v = self._v.set(negative)\n cs = self._prob.add_linear_constraints(v <= -self._epsilon)\n self._temp_constr.extend(cs)\n self._prob.set_objective(self._z.expr(\n (rxnid, -weights.get(rxnid, 1)) for rxnid in subset_p))\n self._solve()", - "docstring": "Force reactions in K above epsilon while minimizing support of P.\n\n This program forces reactions in subset K to attain flux > epsilon\n while minimizing the sum of absolute flux values for reactions\n in subset P (L1-regularization)." - }, - { - "code": "def images(self):\n if not getattr(self, '_images', False):\n self._images = [\n page['imageinfo'][0]['url']\n for page in self.__continued_query({\n 'generator': 'images',\n 'gimlimit': 'max',\n 'prop': 'imageinfo',\n 'iiprop': 'url',\n })\n if 'imageinfo' in page\n ]\n return self._images", - "docstring": "List of URLs of images on the page." - }, - { - "code": "def compare_config(self, target, init=True, indent_level=0):\n if init:\n fwd = self.full_path_fwd\n bwd = self.full_path_bwd\n else:\n fwd = self.rel_path_fwd\n bwd = self.rel_path_bwd\n indent = 4*indent_level*' '\n if indent_level == 0 and self.vdom is not None:\n if self.vdom == 'global':\n pre = 'conf global\\n'\n else:\n pre = 'conf vdom\\n edit %s\\n' % self.vdom\n post = 'end'\n else:\n pre = ''\n post = ''\n pre_block = '%s%s' % (indent, fwd)\n post_block = '%s%s' % (indent, bwd)\n my_params = self.parameters.keys()\n ot_params = target.parameters.keys()\n text = ''\n for param in my_params:\n if param not in ot_params:\n text += ' %sunset %s\\n' % (indent, param)\n else:\n if str(self.get_param(param)).replace('\"', '') != str(target.get_param(param)).replace('\"', ''):\n text += ' %sset %s %s\\n' % (indent, param, target.get_param(param))\n for param in ot_params:\n if param not in my_params:\n text += ' %sset %s %s\\n' % (indent, param, target.get_param(param))\n my_blocks = self.sub_blocks.keys()\n ot_blocks = target.sub_blocks.keys()\n for block_name in my_blocks:\n if block_name not in ot_blocks:\n text += \" %sdelete %s\\n\" % (indent, block_name)\n else:\n text += self[block_name].compare_config(target[block_name], False, indent_level+1)\n for block_name in ot_blocks:\n if block_name not in my_blocks:\n text += target[block_name].to_text(True, indent_level+1, True)\n if text == '':\n return ''\n else:\n return '%s%s%s%s%s' % (pre, pre_block, text, post_block, post)", - "docstring": "This method will return all the necessary commands to get from the config we are in to the target\n config.\n\n Args:\n * **target** (:class:`~pyFG.forticonfig.FortiConfig`) - Target config.\n * **init** (bool) - This tells to the method if this is the first call to the method or if we are inside\\\n the recursion. You can ignore this parameter.\n * **indent_level** (int) - This tells the method how deep you are in the recursion. You can ignore it.\n\n Returns:\n A string containing all the necessary commands to reach the target config." - }, - { - "code": "def get_func_cfg_with_tainted_args(self, definition):\n log.debug(\"Getting CFG for %s\", definition.name)\n func_cfg = make_cfg(\n definition.node,\n self.project_modules,\n self.local_modules,\n definition.path,\n definition.module_definitions\n )\n args = Arguments(definition.node.args)\n if args:\n function_entry_node = func_cfg.nodes[0]\n function_entry_node.outgoing = list()\n first_node_after_args = func_cfg.nodes[1]\n first_node_after_args.ingoing = list()\n definition_lineno = definition.node.lineno\n for i, arg in enumerate(args):\n node_type = TaintedNode\n if i == 0 and arg == 'self':\n node_type = AssignmentNode\n arg_node = node_type(\n label=arg,\n left_hand_side=arg,\n ast_node=None,\n right_hand_side_variables=[],\n line_number=definition_lineno,\n path=definition.path\n )\n function_entry_node.connect(arg_node)\n func_cfg.nodes.insert(1, arg_node)\n arg_node.connect(first_node_after_args)\n return func_cfg", - "docstring": "Build a function cfg and return it, with all arguments tainted." - }, - { - "code": "def service_provider(*services):\n def real_decorator(clazz):\n instance = clazz()\n for service in services:\n global_lookup.add(service, instance)\n return clazz\n return real_decorator", - "docstring": "This is a class decorator that declares a class to provide a set of services.\n It is expected that the class has a no-arg constructor and will be instantiated\n as a singleton." - }, - { - "code": "def init(req, model):\n limit = req.get_param('page[limit]') or goldman.config.PAGE_LIMIT\n offset = req.get_param('page[offset]') or 0\n try:\n return Paginator(limit, offset)\n except ValueError:\n raise InvalidQueryParams(**{\n 'detail': 'The page[\\'limit\\'] & page[\\'offset\\'] query '\n 'params may only be specified once each & must '\n 'both be an integer >= 0.',\n 'links': 'jsonapi.org/format/\n 'parameter': 'page',\n })", - "docstring": "Determine the pagination preference by query parameter\n\n Numbers only, >=0, & each query param may only be\n specified once.\n\n :return: Paginator object" - }, - { - "code": "def parse_group(self, stream):\n self.expect_in(stream, self.begin_group_tokens)\n self.ensure_assignment(stream)\n name = self.next_token(stream)\n self.skip_statement_delimiter(stream)\n statements = self.parse_block(stream, self.has_end_group)\n self.expect_in(stream, self.end_group_tokens)\n self.parse_end_assignment(stream, name)\n self.skip_statement_delimiter(stream)\n return name.decode('utf-8'), PVLGroup(statements)", - "docstring": "Block Name must match Block Name in paired End Group Statement if\n Block Name is present in End Group Statement.\n\n BeginGroupStmt ::=\n BeginGroupKeywd WSC AssignmentSymbol WSC BlockName StatementDelim" - }, - { - "code": "def page_count(self):\r\n postcount = self.post_set.count()\r\n max_pages = (postcount / get_paginate_by())\r\n if postcount % get_paginate_by() != 0:\r\n max_pages += 1\r\n return max_pages", - "docstring": "Get count of total pages" - }, - { - "code": "def call_extra(self, methods, kwargs):\n old = list(self._nonwrappers), list(self._wrappers)\n for method in methods:\n opts = dict(hookwrapper=False, trylast=False, tryfirst=False)\n hookimpl = HookImpl(None, \"\", method, opts)\n self._add_hookimpl(hookimpl)\n try:\n return self(**kwargs)\n finally:\n self._nonwrappers, self._wrappers = old", - "docstring": "Call the hook with some additional temporarily participating\n methods using the specified kwargs as call parameters." - }, - { - "code": "def get_root_gradebook_ids(self):\n if self._catalog_session is not None:\n return self._catalog_session.get_root_catalog_ids()\n return self._hierarchy_session.get_roots()", - "docstring": "Gets the root gradebook ``Ids`` in this hierarchy.\n\n return: (osid.id.IdList) - the root gradebook ``Ids``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def offline_plotly_data(data, filename=None, config=None, validate=True,\n default_width='100%', default_height=525, global_requirejs=False):\n r\n config_default = dict(DEFAULT_PLOTLY_CONFIG)\n if config is not None:\n config_default.update(config)\n with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f:\n js = f.read()\n html, divid, width, height = _plot_html(\n data,\n config=config_default,\n validate=validate,\n default_width=default_width, default_height=default_height,\n global_requirejs=global_requirejs)\n html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html)\n if filename and isinstance(filename, str):\n with open(filename, 'wt') as f:\n f.write(html)\n return html", - "docstring": "r\"\"\" Write a plotly scatter plot to HTML file that doesn't require server\n\n >>> from nlpia.loaders import get_data\n >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv')\n >>> df.columns = [eval(c) if c[0] in '\"\\'' else str(c) for c in df.columns]\n >>> data = {'data': [\n ... Scatter(x=df[continent+', x'],\n ... y=df[continent+', y'],\n ... text=df[continent+', text'],\n ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,),\n ... mode='markers',\n ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']\n ... ],\n ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log'))\n ... }\n >>> html = offline_plotly_data(data, filename=None)" - }, - { - "code": "def _initialize_trunk_interfaces_to_none(self, switch_ip, replay=True):\n try:\n switch_ifs = self._mdriver._get_switch_interfaces(\n switch_ip, cfg_only=(False if replay else True))\n if not switch_ifs:\n LOG.debug(\"Skipping switch %s which has no configured \"\n \"interfaces\",\n switch_ip)\n return\n self._driver.initialize_all_switch_interfaces(\n switch_ifs, switch_ip)\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.warning(\"Unable to initialize interfaces to \"\n \"switch %(switch_ip)s\",\n {'switch_ip': switch_ip})\n self._mdriver.register_switch_as_inactive(switch_ip,\n 'replay init_interface')\n if self._mdriver.is_replay_enabled():\n return", - "docstring": "Initialize all nexus interfaces to trunk allowed none." - }, - { - "code": "def _api_delete(self, url, **kwargs):\n kwargs['url'] = self.url + url\n kwargs['auth'] = self.auth\n headers = deepcopy(self.headers)\n headers.update(kwargs.get('headers', {}))\n kwargs['headers'] = headers\n self._delete(**kwargs)", - "docstring": "A convenience wrapper for _delete. Adds headers, auth and base url by\n default" - }, - { - "code": "def cancel(self, orderNumbers, account=None, **kwargs):\n if not account:\n if \"default_account\" in self.config:\n account = self.config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n account = Account(account, full=False, blockchain_instance=self)\n if not isinstance(orderNumbers, (list, set, tuple)):\n orderNumbers = {orderNumbers}\n op = []\n for order in orderNumbers:\n op.append(\n operations.Limit_order_cancel(\n **{\n \"fee\": {\"amount\": 0, \"asset_id\": \"1.3.0\"},\n \"fee_paying_account\": account[\"id\"],\n \"order\": order,\n \"extensions\": [],\n \"prefix\": self.prefix,\n }\n )\n )\n return self.finalizeOp(op, account[\"name\"], \"active\", **kwargs)", - "docstring": "Cancels an order you have placed in a given market. Requires\n only the \"orderNumbers\". An order number takes the form\n ``1.7.xxx``.\n\n :param str orderNumbers: The Order Object ide of the form\n ``1.7.xxxx``" - }, - { - "code": "def to_csv(self, filename=None,\n encoding=export.ENCODING, dialect=export.DIALECT,\n make_filename=export.MAKE_FILENAME):\n if filename is None:\n if make_filename is None:\n make_filename = export.MAKE_FILENAME\n infos = {\n 'id': self._spreadsheet._id,\n 'title': self._spreadsheet._title,\n 'sheet': self._title,\n 'gid': self._id,\n 'index': self._index,\n 'dialect': dialect,\n }\n if isinstance(make_filename, string_types):\n filename = make_filename % infos\n else:\n filename = make_filename(infos)\n with export.open_csv(filename, 'w', encoding=encoding) as fd:\n export.write_csv(fd, self._values, encoding, dialect)", - "docstring": "Dump the worksheet to a CSV file.\n\n Args:\n filename (str): result filename (if ``None`` use ``make_filename``)\n encoding (str): result string encoding\n dialect (str): :mod:`csv` dialect name or object to use\n make_filename: template or one-argument callable returning the filename\n\n If ``make_filename`` is a string, it is string-interpolated with an\n infos-dictionary with the fields ``id`` (spreadhseet id), ``title``\n (spreadsheet title), ``sheet`` (worksheet title), ``gid`` (worksheet\n id), ``index`` (worksheet index), and ``dialect`` CSV dialect to\n generate the filename: ``filename = make_filename % infos``.\n\n If ``make_filename`` is a callable, it will be called with the\n infos-dictionary to generate the filename:\n ``filename = make_filename(infos)``." - }, - { - "code": "def Verify(self):\n if not self.Hash.ToBytes() == GetGenesis().Hash.ToBytes():\n return False\n bc = GetBlockchain()\n if not bc.ContainsBlock(self.Index):\n return False\n if self.Index > 0:\n prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())\n if prev_header is None:\n return False\n if prev_header.Index + 1 != self.Index:\n return False\n if prev_header.Timestamp >= self.Timestamp:\n return False\n if not Helper.VerifyScripts(self):\n return False\n return True", - "docstring": "Verify block using the verification script.\n\n Returns:\n bool: True if valid. False otherwise." - }, - { - "code": "def _on_access_token(self, future, response):\n content = escape.json_decode(response.body)\n if 'error' in content:\n LOGGER.error('Error fetching access token: %s', content['error'])\n future.set_exception(auth.AuthError('Github auth error: %s' %\n str(content['error'])))\n return\n callback = self.async_callback(self._on_github_user, future,\n content['access_token'])\n self.github_request('user', callback, content['access_token'])", - "docstring": "Invoked as a callback when GitHub has returned a response to the\n access token request.\n\n :param method future: The callback method to pass along\n :param tornado.httpclient.HTTPResponse response: The HTTP response" - }, - { - "code": "def strel_octagon(radius):\n iradius = int(radius)\n i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)]\n dradius = float(iradius) * np.sqrt(2)\n strel = (((i+j) <= dradius) & ((i+j) >= -dradius) &\n ((i-j) <= dradius) & ((i-j) >= -dradius))\n return strel", - "docstring": "Create an octagonal structuring element for morphological operations\n \n radius - the distance from the origin to each edge of the octagon" - }, - { - "code": "def fetch(self, request, callback=None, raise_error=True, **kwargs):\n if isinstance(request, str):\n request = HTTPRequest(request, **kwargs)\n try:\n response = yield self._authorized_fetch(request,\n callback,\n raise_error=False,\n **kwargs)\n if response.code == BAD_TOKEN:\n yield self._token_manager.reset_token()\n elif response.error and raise_error:\n raise response.error\n else:\n raise gen.Return(response)\n response = yield self._authorized_fetch(request,\n callback,\n raise_error=raise_error,\n **kwargs)\n raise gen.Return(response)\n except TokenError as err:\n yield self._token_manager.reset_token()\n raise err", - "docstring": "Executes a request by AsyncHTTPClient,\n asynchronously returning an `tornado.HTTPResponse`.\n\n The ``raise_error=False`` argument currently suppresses\n *all* errors, encapsulating them in `HTTPResponse` objects\n following the tornado http-client standard" - }, - { - "code": "def create_alignment(self, x_align=0, y_align=0, x_scale=0, y_scale=0):\n align = Gtk.Alignment()\n align.set(x_align, y_align, x_scale, y_scale)\n return align", - "docstring": "Function creates an alignment" - }, - { - "code": "def batch_start(job, input_args):\n shared_files = ['ref.fa', 'ref.fa.amb', 'ref.fa.ann', 'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai']\n shared_ids = {}\n for fname in shared_files:\n url = input_args[fname]\n shared_ids[fname] = job.addChildJobFn(download_from_url, url, fname).rv()\n job.addFollowOnJobFn(spawn_batch_jobs, shared_ids, input_args)", - "docstring": "Downloads shared files that are used by all samples for alignment and places them in the jobstore." - }, - { - "code": "def _set_widths(self, row, proc_group):\n width_free = self.style[\"width_\"] - sum(\n [sum(self.fields[c].width for c in self.columns),\n self.width_separtor])\n if width_free < 0:\n width_fixed = sum(\n [sum(self.fields[c].width for c in self.columns\n if c not in self.autowidth_columns),\n self.width_separtor])\n assert width_fixed > self.style[\"width_\"], \"bug in width logic\"\n raise elements.StyleError(\n \"Fixed widths specified in style exceed total width\")\n elif width_free == 0:\n lgr.debug(\"Not checking widths; no free width left\")\n return False\n lgr.debug(\"Checking width for row %r\", row)\n adjusted = False\n for column in sorted(self.columns, key=lambda c: self.fields[c].width):\n if width_free < 1:\n lgr.debug(\"Giving up on checking widths; no free width left\")\n break\n if column in self.autowidth_columns:\n field = self.fields[column]\n lgr.debug(\"Checking width of column %r \"\n \"(field width: %d, free width: %d)\",\n column, field.width, width_free)\n if field.pre[proc_group]:\n value = field(row[column], keys=[proc_group],\n exclude_post=True)\n else:\n value = row[column]\n value = six.text_type(value)\n value_width = len(value)\n wmax = self.autowidth_columns[column][\"max\"]\n if value_width > field.width:\n width_old = field.width\n width_available = width_free + field.width\n width_new = min(value_width,\n wmax or width_available,\n width_available)\n if width_new > width_old:\n adjusted = True\n field.width = width_new\n lgr.debug(\"Adjusting width of %r column from %d to %d \"\n \"to accommodate value %r\",\n column, width_old, field.width, value)\n self._truncaters[column].length = field.width\n width_free -= field.width - width_old\n lgr.debug(\"Free width is %d after processing column %r\",\n width_free, column)\n return adjusted", - "docstring": "Update auto-width Fields based on `row`.\n\n Parameters\n ----------\n row : dict\n proc_group : {'default', 'override'}\n Whether to consider 'default' or 'override' key for pre- and\n post-format processors.\n\n Returns\n -------\n True if any widths required adjustment." - }, - { - "code": "def get_model(self):\n if self.network_type == 'BAYES':\n model = BayesianModel()\n model.add_nodes_from(self.variables)\n model.add_edges_from(self.edges)\n tabular_cpds = []\n for cpd in self.tables:\n child_var = cpd[0]\n states = int(self.domain[child_var])\n arr = list(map(float, cpd[1]))\n values = np.array(arr)\n values = values.reshape(states, values.size // states)\n tabular_cpds.append(TabularCPD(child_var, states, values))\n model.add_cpds(*tabular_cpds)\n return model\n elif self.network_type == 'MARKOV':\n model = MarkovModel(self.edges)\n factors = []\n for table in self.tables:\n variables = table[0]\n cardinality = [int(self.domain[var]) for var in variables]\n value = list(map(float, table[1]))\n factor = DiscreteFactor(variables=variables, cardinality=cardinality, values=value)\n factors.append(factor)\n model.add_factors(*factors)\n return model", - "docstring": "Returns an instance of Bayesian Model or Markov Model.\n Varibles are in the pattern var_0, var_1, var_2 where var_0 is\n 0th index variable, var_1 is 1st index variable.\n\n Return\n ------\n model: an instance of Bayesian or Markov Model.\n\n Examples\n --------\n >>> reader = UAIReader('TestUAI.uai')\n >>> reader.get_model()" - }, - { - "code": "def get_metrics(tag):\n if tag is None:\n return metrics.metrics_by_name_list(metrics.metrics())\n else:\n return metrics.metrics_by_tag(tag)", - "docstring": "Return the values for the metrics with the given tag or all the available metrics if None" - }, - { - "code": "def get_unavailable_brokers(zk, partition_metadata):\n topic_data = zk.get_topics(partition_metadata.topic)\n topic = partition_metadata.topic\n partition = partition_metadata.partition\n expected_replicas = set(topic_data[topic]['partitions'][str(partition)]['replicas'])\n available_replicas = set(partition_metadata.replicas)\n return expected_replicas - available_replicas", - "docstring": "Returns the set of unavailable brokers from the difference of replica\n set of given partition to the set of available replicas." - }, - { - "code": "def _ImportPythonModule(module_name):\n try:\n module_object = list(map(__import__, [module_name]))[0]\n except ImportError:\n return None\n if '.' in module_name:\n for submodule_name in module_name.split('.')[1:]:\n module_object = getattr(module_object, submodule_name, None)\n return module_object", - "docstring": "Imports a Python module.\n\n Args:\n module_name (str): name of the module.\n\n Returns:\n module: Python module or None if the module cannot be imported." - }, - { - "code": "def _get_ref_alt(self, var, boundary):\n if var.posedit.edit.type == \"ins\" or var.posedit.edit.type == \"dup\":\n ref = \"\"\n else:\n if var.posedit.edit.ref_s is None or var.posedit.edit.ref == \"\":\n ref = self._fetch_bounded_seq(var, var.posedit.pos.start.base - 1,\n var.posedit.pos.end.base, 0, boundary)\n else:\n ref = var.posedit.edit.ref\n if var.posedit.edit.type == \"sub\" or var.posedit.edit.type == \"delins\" or var.posedit.edit.type == \"ins\":\n alt = var.posedit.edit.alt\n elif var.posedit.edit.type == \"del\":\n alt = \"\"\n elif var.posedit.edit.type == \"dup\":\n alt = var.posedit.edit.ref or self._fetch_bounded_seq(\n var, var.posedit.pos.start.base - 1, var.posedit.pos.end.base, 0, boundary)\n elif var.posedit.edit.type == \"inv\":\n alt = reverse_complement(ref)\n elif var.posedit.edit.type == \"identity\":\n alt = ref\n return ref, alt", - "docstring": "Get reference allele and alternative allele of the variant" - }, - { - "code": "def format(self, record):\n s = super(ANSIFormatter, self).format(record)\n if hasattr(self.context, 'ansi'):\n s = self.context.ansi(s, **self.get_sgr(record))\n return s", - "docstring": "Overridden method that applies SGR codes to log messages." - }, - { - "code": "def tdb_minus_tt(jd_tdb):\n t = (jd_tdb - T0) / 36525.0\n return (0.001657 * sin ( 628.3076 * t + 6.2401)\n + 0.000022 * sin ( 575.3385 * t + 4.2970)\n + 0.000014 * sin (1256.6152 * t + 6.1969)\n + 0.000005 * sin ( 606.9777 * t + 4.0212)\n + 0.000005 * sin ( 52.9691 * t + 0.4444)\n + 0.000002 * sin ( 21.3299 * t + 5.5431)\n + 0.000010 * t * sin ( 628.3076 * t + 4.2490))", - "docstring": "Computes how far TDB is in advance of TT, given TDB.\n\n Given that the two time scales never diverge by more than 2ms, TT\n can also be given as the argument to perform the conversion in the\n other direction." - }, - { - "code": "def cleanParagraph(self):\n runs = self.block.content\n if not runs:\n self.block = None\n return\n if not self.clean_paragraphs:\n return\n joinedRuns = []\n hasContent = False\n for run in runs:\n if run.content[0]:\n hasContent = True\n else:\n continue\n if not run.content[0].strip():\n run.properties = {}\n if joinedRuns and (run.properties == joinedRuns[-1].properties):\n joinedRuns[-1].content[0] += run.content[0]\n else:\n joinedRuns.append(run)\n if hasContent:\n joinedRuns[0].content[0] = joinedRuns[0].content[0].lstrip()\n joinedRuns[-1].content[0] = joinedRuns[-1].content[0].rstrip()\n self.block.content = joinedRuns\n else:\n self.block = None", - "docstring": "Compress text runs, remove whitespace at start and end,\n skip empty blocks, etc" - }, - { - "code": "def generateComponent(m):\r\n freq = 25*np.random.random()\r\n phase = 2*np.pi*np.random.random()\r\n x = np.arange(m)\r\n return np.cos(x/freq-phase)**2", - "docstring": "Creates oscillating components to be mixed" - }, - { - "code": "def leave_group(self, group_id, timeout=None):\n self._post(\n '/v2/bot/group/{group_id}/leave'.format(group_id=group_id),\n timeout=timeout\n )", - "docstring": "Call leave group API.\n\n https://devdocs.line.me/en/#leave\n\n Leave a group.\n\n :param str group_id: Group ID\n :param timeout: (optional) How long to wait for the server\n to send data before giving up, as a float,\n or a (connect timeout, read timeout) float tuple.\n Default is self.http_client.timeout\n :type timeout: float | tuple(float, float)" - }, - { - "code": "def parse_env_file(env_file):\n environment = {}\n with open(env_file, 'r') as f:\n for line in f:\n if line[0] == '\n continue\n line = line.strip()\n if not line:\n continue\n parse_line = line.split('=', 1)\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n return environment", - "docstring": "Reads a line-separated environment file.\n The format of each line should be \"key=value\"." - }, - { - "code": "def list(self, per_page=None, page=None, status=None, service='facebook'):\n params = {}\n if per_page is not None:\n params['per_page'] = per_page\n if page is not None:\n params['page'] = page\n if status:\n params['status'] = status\n return self.request.get(service + '/task', params)", - "docstring": "Get a list of Pylon tasks\n\n :param per_page: How many tasks to display per page\n :type per_page: int\n :param page: Which page of tasks to display\n :type page: int\n :param status: The status of the tasks to list\n :type page: string\n :param service: The PYLON service (facebook)\n :type service: str\n :return: dict of REST API output with headers attached\n :rtype: :class:`~datasift.request.DictResponse`\n :raises: :class:`~datasift.exceptions.DataSiftApiException`,\n :class:`requests.exceptions.HTTPError`" - }, - { - "code": "def info(rq, ctx, path, interval, raw, only_queues, only_workers, by_queue,\n queues):\n \"RQ command-line monitor.\"\n return ctx.invoke(\n rq_cli.info,\n path=path,\n interval=interval,\n raw=raw,\n only_queues=only_queues,\n only_workers=only_workers,\n by_queue=by_queue,\n queues=queues or rq.queues,\n **shared_options(rq)\n )", - "docstring": "RQ command-line monitor." - }, - { - "code": "def _set_vibration_win(self, left_motor, right_motor, duration):\n self._start_vibration_win(left_motor, right_motor)\n stop_process = Process(target=delay_and_stop,\n args=(duration,\n self.manager.xinput_dll,\n self.__device_number))\n stop_process.start()", - "docstring": "Control the motors on Windows." - }, - { - "code": "def complete_watch(self, text, *_):\n return [t + \" \" for t in self.engine.cached_descriptions if t.startswith(text)]", - "docstring": "Autocomplete for watch" - }, - { - "code": "def confirm_register_form_factory(Form, app):\n if app.config.get('RECAPTCHA_PUBLIC_KEY') and \\\n app.config.get('RECAPTCHA_PRIVATE_KEY'):\n class ConfirmRegisterForm(Form):\n recaptcha = FormField(RegistrationFormRecaptcha, separator='.')\n return ConfirmRegisterForm\n return Form", - "docstring": "Return confirmation for extended registration form." - }, - { - "code": "def _split_classes_and_methods(folds):\n classes = []\n functions = []\n for fold in folds:\n if fold.def_type == OED.FUNCTION_TOKEN:\n functions.append(fold)\n elif fold.def_type == OED.CLASS_TOKEN:\n classes.append(fold)\n return classes, functions", - "docstring": "Split out classes and methods into two separate lists.\n\n Parameters\n ----------\n folds : list of :class:`FoldScopeHelper`\n The result of :func:`_get_fold_levels`.\n\n Returns\n -------\n classes, functions: list of :class:`FoldScopeHelper`\n Two separate lists of :class:`FoldScopeHelper` objects. The former\n contains only class definitions while the latter contains only\n function/method definitions." - }, - { - "code": "def _import_plugin(module_name, plugin_path, modnames, modlist):\r\n if module_name in modnames:\r\n return\r\n try:\r\n mock = _ModuleMock()\r\n mock.LOCALEPATH = osp.join(plugin_path, module_name, 'locale')\r\n sys.modules[module_name] = mock\r\n if osp.isdir(osp.join(plugin_path, module_name)):\r\n module = _import_module_from_path(module_name, plugin_path)\r\n else:\r\n module = None\r\n if module and getattr(module, 'PLUGIN_CLASS', False):\r\n sys.modules[module_name] = module\r\n modlist.append(module)\r\n modnames.append(module_name)\r\n except Exception:\r\n sys.stderr.write(\"ERROR: 3rd party plugin import failed for \"\r\n \"`{0}`\\n\".format(module_name))\r\n traceback.print_exc(file=sys.stderr)", - "docstring": "Import the plugin `module_name` from `plugin_path`, add it to `modlist`\r\n and adds its name to `modnames`." - }, - { - "code": "def create_release_vcs(path, vcs_name=None):\n from rez.plugin_managers import plugin_manager\n vcs_types = get_release_vcs_types()\n if vcs_name:\n if vcs_name not in vcs_types:\n raise ReleaseVCSError(\"Unknown version control system: %r\" % vcs_name)\n cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)\n return cls(path)\n classes_by_level = {}\n for vcs_name in vcs_types:\n cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)\n result = cls.find_vcs_root(path)\n if not result:\n continue\n vcs_path, levels_up = result\n classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))\n if not classes_by_level:\n raise ReleaseVCSError(\"No version control system for package \"\n \"releasing is associated with the path %s\" % path)\n lowest_level = sorted(classes_by_level)[0]\n clss = classes_by_level[lowest_level]\n if len(clss) > 1:\n clss_str = \", \".join(x[0].name() for x in clss)\n raise ReleaseVCSError(\"Several version control systems are associated \"\n \"with the path %s: %s. Use rez-release --vcs to \"\n \"choose.\" % (path, clss_str))\n else:\n cls, vcs_root = clss[0]\n return cls(pkg_root=path, vcs_root=vcs_root)", - "docstring": "Return a new release VCS that can release from this source path." - }, - { - "code": "def _executable_memory_regions(self, objects=None, force_segment=False):\n if objects is None:\n binaries = self.project.loader.all_objects\n else:\n binaries = objects\n memory_regions = [ ]\n for b in binaries:\n if isinstance(b, ELF):\n if not force_segment and b.sections:\n for section in b.sections:\n if section.is_executable:\n tpl = (section.min_addr, section.max_addr)\n memory_regions.append(tpl)\n else:\n for segment in b.segments:\n if segment.is_executable:\n tpl = (segment.min_addr, segment.max_addr)\n memory_regions.append(tpl)\n elif isinstance(b, PE):\n for section in b.sections:\n if section.is_executable:\n tpl = (section.min_addr, section.max_addr)\n memory_regions.append(tpl)\n elif isinstance(b, MachO):\n if b.segments:\n for seg in b.segments:\n if seg.is_executable:\n for section in seg.sections:\n tpl = (section.min_addr, section.max_addr)\n memory_regions.append(tpl)\n elif isinstance(b, Blob):\n tpl = (b.min_addr, b.max_addr)\n memory_regions.append(tpl)\n elif isinstance(b, self._cle_pseudo_objects):\n pass\n else:\n l.warning('Unsupported object format \"%s\". Treat it as an executable.', b.__class__.__name__)\n tpl = (b.min_addr, b.max_addr)\n memory_regions.append(tpl)\n if not memory_regions:\n memory_regions = [(start, start + len(backer)) for start, backer in self.project.loader.memory.backers()]\n memory_regions = sorted(memory_regions, key=lambda x: x[0])\n return memory_regions", - "docstring": "Get all executable memory regions from the binaries\n\n :param objects: A collection of binary objects to collect regions from. If None, regions from all project\n binary objects are used.\n :param bool force_segment: Rely on binary segments instead of sections.\n :return: A sorted list of tuples (beginning_address, end_address)" - }, - { - "code": "def export(self, name, columns, points):\n WHITELIST = '_-' + string.ascii_letters + string.digits\n SUBSTITUTE = '_'\n def whitelisted(s,\n whitelist=WHITELIST,\n substitute=SUBSTITUTE):\n return ''.join(c if c in whitelist else substitute for c in s)\n for sensor, value in zip(columns, points):\n try:\n sensor = [whitelisted(name) for name in sensor.split('.')]\n tobeexport = [self.topic, self.hostname, name]\n tobeexport.extend(sensor)\n topic = '/'.join(tobeexport)\n self.client.publish(topic, value)\n except Exception as e:\n logger.error(\"Can not export stats to MQTT server (%s)\" % e)", - "docstring": "Write the points in MQTT." - }, - { - "code": "def add_feature(self, obj=None, geometry=None, properties=None):\n properties = properties or {}\n if isinstance(obj, Feature):\n feat = obj._data\n elif isinstance(obj, dict):\n feat = obj.copy()\n else:\n feat = Feature(geometry=geometry, properties=properties).__geo_interface__\n self._data[\"features\"].append(feat)", - "docstring": "Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.\n\n Parameters:\n\n - **obj**: Another feature instance, an object with the \\_\\_geo_interface__ or a geojson dictionary of the Feature type.\n - **geometry** (optional): Anything that the Geometry instance can accept.\n - **properties** (optional): A dictionary of key-value property pairs." - }, - { - "code": "def generate(self):\n exp_name = self.exp_name()\n fname = os.path.basename(self.out_path)\n fname = \"{exp}_{prefix}_{name}{ending}\".format(\n exp=exp_name,\n prefix=os.path.splitext(fname)[0],\n ending=os.path.splitext(fname)[-1],\n name=\"full\")\n first = True\n for chunk in self.report():\n print(\"Writing chunk to :'{0}'\".format(fname))\n chunk.to_csv(fname, header=first, mode='a')\n first = False", - "docstring": "Fetch all rows associated with this experiment.\n\n This will generate a huge .csv." - }, - { - "code": "def ibis_schema_apply_to(schema, df):\n for column, dtype in schema.items():\n pandas_dtype = dtype.to_pandas()\n col = df[column]\n col_dtype = col.dtype\n try:\n not_equal = pandas_dtype != col_dtype\n except TypeError:\n not_equal = True\n if not_equal or dtype == dt.string:\n df[column] = convert(col_dtype, dtype, col)\n return df", - "docstring": "Applies the Ibis schema to a pandas DataFrame\n\n Parameters\n ----------\n schema : ibis.schema.Schema\n df : pandas.DataFrame\n\n Returns\n -------\n df : pandas.DataFrame\n\n Notes\n -----\n Mutates `df`" - }, - { - "code": "async def multi_call_async(self, calls):\n formatted_calls = [dict(method=call[0], params=call[1] if len(call) > 1 else {}) for call in calls]\n return await self.call_async('ExecuteMultiCall', calls=formatted_calls)", - "docstring": "Performs an async multi-call to the API\n\n :param calls: A list of call 2-tuples with method name and params (for example, ('Get', dict(typeName='Trip')) )\n :return: The JSON result (decoded into a dict) from the server\n :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server\n :raise TimeoutException: Raises when the request does not respond after some time." - }, - { - "code": "def _get_song(self):\n if self.at_beginning:\n if self.pos < len(self.start):\n return self.start[self.pos]\n self.at_beginning = False\n self._new_track()\n s = self._get_selectable()\n while self.pos >= len(s):\n self._new_song()\n s = self._get_selectable()\n return s[self.pos]", - "docstring": "Used internally to get the current track and make sure it exists." - }, - { - "code": "def isToneCal(self):\n return self.ui.calTypeCmbbx.currentIndex() == self.ui.calTypeCmbbx.count() -1", - "docstring": "Whether the currently selected calibration stimulus type is the calibration curve\n\n :returns: boolean -- if the current combo box selection is calibration curve" - }, - { - "code": "def check_key(self, key, raise_error=True, *args, **kwargs):\n return check_key(\n key, possible_keys=list(self), raise_error=raise_error,\n name='formatoption keyword', *args, **kwargs)", - "docstring": "Checks whether the key is a valid formatoption\n\n Parameters\n ----------\n %(check_key.parameters.no_possible_keys|name)s\n\n Returns\n -------\n %(check_key.returns)s\n\n Raises\n ------\n %(check_key.raises)s" - }, - { - "code": "def classify_class_attrs(cls):\n mro = getmro(cls)\n names = dir(cls)\n result = []\n for name in names:\n if name in cls.__dict__:\n obj = cls.__dict__[name]\n else:\n obj = getattr(cls, name)\n homecls = getattr(obj, \"__objclass__\", None)\n if homecls is None:\n for base in mro:\n if name in base.__dict__:\n homecls = base\n break\n if homecls is not None and name in homecls.__dict__:\n obj = homecls.__dict__[name]\n obj_via_getattr = getattr(cls, name)\n if isinstance(obj, staticmethod):\n kind = \"static method\"\n elif isinstance(obj, classmethod):\n kind = \"class method\"\n elif isinstance(obj, property):\n kind = \"property\"\n elif (ismethod(obj_via_getattr) or\n ismethoddescriptor(obj_via_getattr)):\n kind = \"method\"\n else:\n kind = \"data\"\n result.append((name, kind, homecls, obj))\n return result", - "docstring": "Return list of attribute-descriptor tuples.\n\n For each name in dir(cls), the return list contains a 4-tuple\n with these elements:\n\n 0. The name (a string).\n\n 1. The kind of attribute this is, one of these strings:\n 'class method' created via classmethod()\n 'static method' created via staticmethod()\n 'property' created via property()\n 'method' any other flavor of method\n 'data' not a method\n\n 2. The class which defined this attribute (a class).\n\n 3. The object as obtained directly from the defining class's\n __dict__, not via getattr. This is especially important for\n data attributes: C.data is just a data object, but\n C.__dict__['data'] may be a data descriptor with additional\n info, like a __doc__ string." - }, - { - "code": "def get_missing_value_key(d):\n _mv = \"nan\"\n try:\n _mv = d[\"missingValue\"]\n except KeyError as e:\n logger_misc.info(\"get_missing_value: No missing value key found: {}\".format(e))\n except AttributeError as e:\n logger_misc.warn(\"get_missing_value: Column is wrong data type: {}\".format(e))\n if not _mv:\n try:\n for k, v in d[\"columns\"].items():\n _mv = v[\"missingValue\"]\n break\n except KeyError:\n pass\n return _mv", - "docstring": "Get the Missing Value entry from a table of data. If none is found, try the columns.\n If still none found, prompt user.\n\n :param dict d: Table of data\n :return str _mv: Missing Value" - }, - { - "code": "def _apply_new_data_port_default_value(self, path, new_default_value_str):\n try:\n data_port_id = self.list_store[path][self.ID_STORAGE_ID]\n if isinstance(self.model.state, LibraryState):\n if self.list_store[path][self.USE_RUNTIME_VALUE_STORAGE_ID]:\n self.set_data_port_runtime_value(data_port_id, new_default_value_str)\n else:\n if str(self.state_data_port_dict[data_port_id].default_value) != new_default_value_str:\n self.state_data_port_dict[data_port_id].default_value = new_default_value_str\n except (TypeError, AttributeError) as e:\n logger.exception(\"Error while changing default value\")", - "docstring": "Applies the new default value of the data port defined by path\n\n :param str path: The path identifying the edited variable\n :param str new_default_value_str: New default value as string" - }, - { - "code": "def do_scan_trigger(sk, if_index, driver_id, mcid):\n _LOGGER.debug('Joining group %d.', mcid)\n ret = nl_socket_add_membership(sk, mcid)\n if ret < 0:\n return ret\n msg = nlmsg_alloc()\n genlmsg_put(msg, 0, 0, driver_id, 0, 0, nl80211.NL80211_CMD_TRIGGER_SCAN, 0)\n nla_put_u32(msg, nl80211.NL80211_ATTR_IFINDEX, if_index)\n ssids_to_scan = nlmsg_alloc()\n nla_put(ssids_to_scan, 1, 0, b'')\n nla_put_nested(msg, nl80211.NL80211_ATTR_SCAN_SSIDS, ssids_to_scan)\n err = ctypes.c_int(1)\n results = ctypes.c_int(-1)\n cb = libnl.handlers.nl_cb_alloc(libnl.handlers.NL_CB_DEFAULT)\n libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_VALID, libnl.handlers.NL_CB_CUSTOM, callback_trigger, results)\n libnl.handlers.nl_cb_err(cb, libnl.handlers.NL_CB_CUSTOM, error_handler, err)\n libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_ACK, libnl.handlers.NL_CB_CUSTOM, ack_handler, err)\n libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_SEQ_CHECK, libnl.handlers.NL_CB_CUSTOM,\n lambda *_: libnl.handlers.NL_OK, None)\n _LOGGER.debug('Sending NL80211_CMD_TRIGGER_SCAN...')\n ret = nl_send_auto(sk, msg)\n if ret < 0:\n return ret\n while err.value > 0:\n _LOGGER.debug('Retrieving NL80211_CMD_TRIGGER_SCAN acknowledgement...')\n ret = nl_recvmsgs(sk, cb)\n if ret < 0:\n return ret\n if err.value < 0:\n error('Unknown error {0} ({1})'.format(err.value, errmsg[abs(err.value)]))\n while results.value < 0:\n _LOGGER.debug('Retrieving NL80211_CMD_TRIGGER_SCAN final response...')\n ret = nl_recvmsgs(sk, cb)\n if ret < 0:\n return ret\n if results.value > 0:\n error('The kernel aborted the scan.')\n _LOGGER.debug('Leaving group %d.', mcid)\n return nl_socket_drop_membership(sk, mcid)", - "docstring": "Issue a scan request to the kernel and wait for it to reply with a signal.\n\n This function issues NL80211_CMD_TRIGGER_SCAN which requires root privileges.\n\n The way NL80211 works is first you issue NL80211_CMD_TRIGGER_SCAN and wait for the kernel to signal that the scan is\n done. When that signal occurs, data is not yet available. The signal tells us if the scan was aborted or if it was\n successful (if new scan results are waiting). This function handles that simple signal.\n\n May exit the program (sys.exit()) if a fatal error occurs.\n\n Positional arguments:\n sk -- nl_sock class instance (from nl_socket_alloc()).\n if_index -- interface index (integer).\n driver_id -- nl80211 driver ID from genl_ctrl_resolve() (integer).\n mcid -- nl80211 scanning group ID from genl_ctrl_resolve_grp() (integer).\n\n Returns:\n 0 on success or a negative error code." - }, - { - "code": "def __map_entity(self, entity: dal.AssetClass) -> AssetClass:\n mapper = self.__get_mapper()\n ac = mapper.map_entity(entity)\n return ac", - "docstring": "maps the entity onto the model object" - }, - { - "code": "def reset(self):\n [a().remove_observer(self, self.on_cache_changed) if (a() is not None) else None for [a, _] in self.cached_input_ids.values()]\n self.order = collections.deque()\n self.cached_inputs = {}\n self.cached_input_ids = {}\n self.cached_outputs = {}\n self.inputs_changed = {}", - "docstring": "Totally reset the cache" - }, - { - "code": "def _set_command_line_arguments(self, args):\n Global.LOGGER.debug(\"setting command line arguments\")\n if args.VERBOSE:\n Global.LOGGER.debug(\"verbose mode active\")\n Global.CONFIG_MANAGER.log_level = logging.DEBUG\n Global.LOGGER_INSTANCE.reconfigure_log_level()\n if args.STATS > 0:\n Global.LOGGER.debug(f\"stats requested every {args.STATS} seconds\")\n Global.CONFIG_MANAGER.show_stats = True\n Global.CONFIG_MANAGER.stats_timeout = args.STATS\n if args.INTERVAL > 0:\n Global.LOGGER.debug(f\"setting sleep interval to {args.INTERVAL} milliseconds\")\n Global.CONFIG_MANAGER.sleep_interval = float(args.INTERVAL)/1000\n if args.TRACE:\n Global.LOGGER.debug(\"tracing mode active\")\n Global.CONFIG_MANAGER.tracing_mode = True\n Global.CONFIG_MANAGER.log_level = logging.DEBUG\n Global.LOGGER_INSTANCE.reconfigure_log_level()\n if args.MESSAGEINTERVAL is not None and args.MESSAGEINTERVAL > 0:\n Global.LOGGER.debug(f\"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds\")\n Global.CONFIG_MANAGER.message_fetcher_sleep_interval = float(args.MESSAGEINTERVAL)/10000\n Global.CONFIG_MANAGER.fixed_message_fetcher_interval = True\n Global.LOGGER.debug(f\"recipes to be parsed: {args.FILENAME}\")\n Global.CONFIG_MANAGER.recipes = (args.FILENAME)", - "docstring": "Set internal configuration variables according to \n the input parameters" - }, - { - "code": "def from_totient(public_key, totient):\n p_plus_q = public_key.n - totient + 1\n p_minus_q = isqrt(p_plus_q * p_plus_q - public_key.n * 4)\n q = (p_plus_q - p_minus_q) // 2\n p = p_plus_q - q\n if not p*q == public_key.n:\n raise ValueError('given public key and totient do not match.')\n return PaillierPrivateKey(public_key, p, q)", - "docstring": "given the totient, one can factorize the modulus\n\n The totient is defined as totient = (p - 1) * (q - 1),\n and the modulus is defined as modulus = p * q\n\n Args:\n public_key (PaillierPublicKey): The corresponding public\n key\n totient (int): the totient of the modulus\n\n Returns:\n the :class:`PaillierPrivateKey` that corresponds to the inputs\n\n Raises:\n ValueError: if the given totient is not the totient of the modulus\n of the given public key" - }, - { - "code": "def cmd(self, command, args, prefix=None):\n if prefix is None:\n prefix = u''\n raw_cmd = u'{0} {1} {2}'.format(prefix, command, args).strip()\n self._send(raw_cmd)", - "docstring": "Sends a command to the server.\n\n :param command: IRC code to send.\n :type command: unicode\n :param args: arguments to pass with the command.\n :type args: basestring\n :param prefix: optional prefix to prepend to the command.\n :type prefix: str or None" - }, - { - "code": "def display_to_value(value, default_value, ignore_errors=True):\n from qtpy.compat import from_qvariant\n value = from_qvariant(value, to_text_string)\n try:\n np_dtype = get_numpy_dtype(default_value)\n if isinstance(default_value, bool):\n try:\n value = bool(float(value))\n except ValueError:\n value = value.lower() == \"true\"\n elif np_dtype is not None:\n if 'complex' in str(type(default_value)):\n value = np_dtype(complex(value))\n else:\n value = np_dtype(value)\n elif is_binary_string(default_value):\n value = to_binary_string(value, 'utf8')\n elif is_text_string(default_value):\n value = to_text_string(value)\n elif isinstance(default_value, complex):\n value = complex(value)\n elif isinstance(default_value, float):\n value = float(value)\n elif isinstance(default_value, int):\n try:\n value = int(value)\n except ValueError:\n value = float(value)\n elif isinstance(default_value, datetime.datetime):\n value = datestr_to_datetime(value)\n elif isinstance(default_value, datetime.date):\n value = datestr_to_datetime(value).date()\n elif isinstance(default_value, datetime.timedelta):\n value = str_to_timedelta(value)\n elif ignore_errors:\n value = try_to_eval(value)\n else:\n value = eval(value)\n except (ValueError, SyntaxError):\n if ignore_errors:\n value = try_to_eval(value)\n else:\n return default_value\n return value", - "docstring": "Convert back to value" - }, - { - "code": "def do_genesis_block_audit(genesis_block_path=None, key_id=None):\n signing_keys = GENESIS_BLOCK_SIGNING_KEYS\n if genesis_block_path is not None:\n genesis_block_load(genesis_block_path)\n if key_id is not None:\n gpg2_path = find_gpg2()\n assert gpg2_path, 'You need to install gpg2'\n p = subprocess.Popen([gpg2_path, '-a', '--export', key_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode != 0:\n log.error('Failed to load key {}\\n{}'.format(key_id, err))\n return False\n signing_keys = { key_id: out.strip() }\n res = genesis_block_audit(get_genesis_block_stages(), key_bundle=signing_keys)\n if not res:\n log.error('Genesis block is NOT signed by {}'.format(', '.join(signing_keys.keys())))\n return False\n return True", - "docstring": "Loads and audits the genesis block, optionally using an alternative key" - }, - { - "code": "def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]:\n return self.tags[tag]", - "docstring": "Get all registered dicts that are registered for a tag\n\n :param tag: str - single tag\n :return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for\n all parameter names with a given tag" - }, - { - "code": "def _is_socket(cls, stream):\n try:\n fd = stream.fileno()\n except ValueError:\n return False\n sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)\n try:\n sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)\n except socket.error as ex:\n if ex.args[0] != errno.ENOTSOCK:\n return True\n else:\n return True", - "docstring": "Check if the given stream is a socket." - }, - { - "code": "def _resolve_path(self, path):\n filepath = None\n mimetype = None\n for root, dirs, files in self.filter_files(self.path):\n error_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'error_templates', path)\n try:\n with open(error_path):\n mimetype, encoding = mimetypes.guess_type(error_path)\n filepath = error_path\n except IOError:\n pass\n if self.base:\n basepath = os.path.join(root, self.blueprint_name, path)\n try:\n with open(basepath):\n mimetype, encoding = mimetypes.guess_type(basepath)\n filepath = basepath\n except IOError:\n pass\n fullpath = os.path.join(root, path)\n try:\n with open(fullpath):\n mimetype, encoding = mimetypes.guess_type(fullpath)\n filepath = fullpath\n except IOError:\n pass\n return filepath, mimetype", - "docstring": "Resolve static file paths" - }, - { - "code": "def merge_context(self, tag, metadata):\n self.entities.append(tag)\n for k in metadata.keys():\n if k not in self.metadata:\n self.metadata[k] = k", - "docstring": "merge into contextManagerFrame new entity and metadata.\n\n Appends tag as new entity and adds keys in metadata to keys in\n self.metadata.\n\n Args:\n tag(str): entity to be added to self.entities\n metadata(object): metadata containes keys to be added to self.metadata" - }, - { - "code": "def serve_bower_components(path):\n res = pkg_resources.resource_filename(\n 'coil', os.path.join('data', 'bower_components'))\n return send_from_directory(res, path)", - "docstring": "Serve bower components.\n\n This is meant to be used ONLY by the internal dev server.\n Please configure your web server to handle requests to this URL::\n\n /bower_components/ => coil/data/bower_components" - }, - { - "code": "def marshal(self, v):\n if v:\n orig = [i for i in self.choices if self.choices[i] == v]\n if len(orig) == 1:\n return orig[0]\n elif len(orig) == 0:\n raise NotImplementedError(\"No such reverse choice {0} for field {1}.\".format(v, self))\n else:\n raise NotImplementedError(\"Too many reverse choices {0} for value {1} for field {2}\".format(orig, v, self))", - "docstring": "Turn this value into API format.\n\n Do a reverse dictionary lookup on choices to find the original value. If\n there are no keys or too many keys for now we raise a NotImplementedError\n as marshal is not used anywhere currently. In the future we will want to\n fail gracefully." - }, - { - "code": "def opendocs(where='index', how='default'):\n import webbrowser\n docs_dir = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'docs')\n index = os.path.join(docs_dir, '_build/html/%s.html' % where)\n builddocs('html')\n url = 'file://%s' % os.path.abspath(index)\n if how in ('d', 'default'):\n webbrowser.open(url)\n elif how in ('t', 'tab'):\n webbrowser.open_new_tab(url)\n elif how in ('n', 'w', 'window'):\n webbrowser.open_new(url)", - "docstring": "Rebuild documentation and opens it in your browser.\n\n Use the first argument to specify how it should be opened:\n\n `d` or `default`: Open in new tab or new window, using the default\n method of your browser.\n\n `t` or `tab`: Open documentation in new tab.\n\n `n`, `w` or `window`: Open documentation in new window." - }, - { - "code": "def register_view(self, view):\n self.view.set_text(self.model.credits)\n gobject.timeout_add(1500, self.on_begin_scroll)\n return", - "docstring": "Loads the text taking it from the model, then starts a\n timer to scroll it." - }, - { - "code": "def _negate_compare_text(atok: asttokens.ASTTokens, node: ast.Compare) -> str:\n assert len(node.ops) == 1, \"A single comparison expected, but got: {}\".format(len(node.ops))\n assert len(node.comparators) == 1, \"A single comparator expected, but got: {}\".format(len(node.comparators))\n operator = node.ops[0]\n left = node.left\n right = node.comparators[0]\n left_text = atok.get_text(node=left)\n right_text = atok.get_text(node=right)\n text = ''\n if isinstance(operator, ast.Eq):\n text = '{} != {}'.format(left_text, right_text)\n elif isinstance(operator, ast.NotEq):\n text = '{} == {}'.format(left_text, right_text)\n elif isinstance(operator, ast.Lt):\n text = '{} >= {}'.format(left_text, right_text)\n elif isinstance(operator, ast.LtE):\n text = '{} > {}'.format(left_text, right_text)\n elif isinstance(operator, ast.Gt):\n text = '{} <= {}'.format(left_text, right_text)\n elif isinstance(operator, ast.GtE):\n text = '{} < {}'.format(left_text, right_text)\n elif isinstance(operator, ast.Is):\n text = '{} is not {}'.format(left_text, right_text)\n elif isinstance(operator, ast.IsNot):\n text = '{} is {}'.format(left_text, right_text)\n elif isinstance(operator, ast.In):\n text = '{} not in {}'.format(left_text, right_text)\n elif isinstance(operator, ast.NotIn):\n text = '{} in {}'.format(left_text, right_text)\n else:\n raise NotImplementedError(\"Unhandled comparison operator: {}\".format(operator))\n return text", - "docstring": "Generate the text representing the negation of the comparison node.\n\n :param atok:\n parsing obtained with ``asttokens`` so that we can access the last tokens of a node.\n\n The standard ``ast`` module provides only the first token of an AST node. In lack of concrete syntax tree,\n getting text from first to last token is currently the simplest approach.\n :param node: AST node representing the comparison in a condition\n :return: text representation of the node's negation" - }, - { - "code": "def peek_ahead(header: str, pos: int) -> Optional[str]:\n return None if pos == len(header) else header[pos]", - "docstring": "Return the next character from ``header`` at the given position.\n\n Return ``None`` at the end of ``header``.\n\n We never need to peek more than one character ahead." - }, - { - "code": "def GetValue(self):\n self.AssertInitialization('Positional')\n if str(self._widget.GetValue()) == EMPTY:\n return None\n return self._widget.GetValue()", - "docstring": "Positionals have no associated options_string,\n so only the supplied arguments are returned.\n The order is assumed to be the same as the order\n of declaration in the client code\n\n Returns\n \"argument_value\"" - }, - { - "code": "def next_rise(self, latitude, longitude, altitude=None):\n rise = self.pass_times(latitude, longitude, altitude,\n 2)\n timestamp = rise[0]['risetime']\n return datetime.fromtimestamp(timestamp)", - "docstring": "The next rise of the ISS.\n\n :param latitude: latitude in degrees of location you want iss pass\n above\n :type latitude: float\n :param longitude: longitude in degrees of location you want iss pass\n above\n :type longitude: float\n :param altitude: altitude in meters of location you want iss pass\n above, default is 100 when not given\n :type altitude: float\n :return: Return the next date when ISS will be over 10 degree above the\n horizon\n :rtype: datetime" - }, - { - "code": "def connect(self, socket_or_address):\n if isinstance(socket_or_address, tuple):\n import socket\n self.socket = socket.create_connection(socket_or_address)\n else:\n self.socket = socket_or_address\n address = None\n self.handler = EPCClientHandler(self.socket, address, self)\n self.call = self.handler.call\n self.call_sync = self.handler.call_sync\n self.methods = self.handler.methods\n self.methods_sync = self.handler.methods_sync\n self.handler_thread = newthread(self, target=self.handler.start)\n self.handler_thread.daemon = self.thread_daemon\n self.handler_thread.start()\n self.handler.wait_until_ready()", - "docstring": "Connect to server and start serving registered functions.\n\n :type socket_or_address: tuple or socket object\n :arg socket_or_address: A ``(host, port)`` pair to be passed\n to `socket.create_connection`, or\n a socket object." - }, - { - "code": "def fft(xi, yi, axis=0) -> tuple:\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError(\"WrightTools.kit.fft: argument xi must be evenly spaced\")\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi", - "docstring": "Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi)." - }, - { - "code": "def is_older_than(before, seconds):\n if isinstance(before, six.string_types):\n before = parse_strtime(before).replace(tzinfo=None)\n else:\n before = before.replace(tzinfo=None)\n return utcnow() - before > datetime.timedelta(seconds=seconds)", - "docstring": "Return True if before is older than seconds." - }, - { - "code": "def add_param_annotations(\n logic: Callable, params: List[RequestParamAnnotation]) -> Callable:\n if hasattr(logic, '_doctor_signature'):\n sig = logic._doctor_signature\n doctor_params = logic._doctor_params\n else:\n sig = inspect.signature(logic)\n doctor_params = get_params_from_func(logic, sig)\n prev_parameters = {name: param for name, param in sig.parameters.items()}\n new_params = []\n for param in params:\n if param.name in prev_parameters:\n logging.warning('Not adding %s to signature of %s, function '\n 'already has that parameter in its signature.',\n param.name, logic.__name__)\n continue\n doctor_params.all.append(param.name)\n default = None\n if param.required:\n default = Parameter.empty\n doctor_params.required.append(param.name)\n else:\n doctor_params.optional.append(param.name)\n new_params.append(\n Parameter(param.name, Parameter.KEYWORD_ONLY, default=default,\n annotation=param.annotation))\n new_sig = sig.replace(\n parameters=list(prev_parameters.values()) + new_params)\n logic._doctor_signature = new_sig\n logic._doctor_params = doctor_params\n return logic", - "docstring": "Adds parameter annotations to a logic function.\n\n This adds additional required and/or optional parameters to the logic\n function that are not part of it's signature. It's intended to be used\n by decorators decorating logic functions or middleware.\n\n :param logic: The logic function to add the parameter annotations to.\n :param params: The list of RequestParamAnnotations to add to the logic func.\n :returns: The logic func with updated parameter annotations." - }, - { - "code": "def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None):\n batch_size_generator = batch_size_generator or SquareRootSingleBatch()\n compute_method = compute_method or BatchMeansMCSE()\n batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain))\n return np.min(list(compute_method.compute_standard_error(chain, b) for b in batch_sizes))", - "docstring": "Compute Monte Carlo standard errors for the expectations\n\n This is a convenience function that calls the compute method for each batch size and returns the lowest ESS\n over the used batch sizes.\n\n Args:\n chain (ndarray): the Markov chain\n batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes\n we will use. Per default it uses the :class:`SquareRootSingleBatch` method.\n compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.\n By default we will use the :class:`BatchMeansMCSE` method" - }, - { - "code": "def begin_auth():\n repository = request.headers['repository']\n if repository not in config['repositories']: return fail(no_such_repo_msg)\n repository_path = config['repositories'][repository]['path']\n conn = auth_db_connect(cpjoin(repository_path, 'auth_transient.db')); gc_tokens(conn)\n auth_token = base64.b64encode(pysodium.randombytes(35)).decode('utf-8')\n conn.execute(\"insert into tokens (expires, token, ip) values (?,?,?)\",\n (time.time() + 30, auth_token, request.environ['REMOTE_ADDR']))\n conn.commit()\n return success({'auth_token' : auth_token})", - "docstring": "Request authentication token to sign" - }, - { - "code": "def check_for_allowed_file(f):\n for ext in SUPPORTED_EXTENSIONS:\n if f.endswith(ext):\n return True\n log.error(\"Failed upload: Not an allowed file extension: %s\", f)\n raise SystemExit", - "docstring": "Checks a file extension against a list of seq file exts" - }, - { - "code": "def worker(job):\n ret = False\n try:\n if job.full_url is not None:\n req = requests.get(job.full_url, stream=True)\n ret = save_and_check(req, job.local_file, job.expected_checksum)\n if not ret:\n return ret\n ret = create_symlink(job.local_file, job.symlink_path)\n except KeyboardInterrupt:\n logging.debug(\"Ignoring keyboard interrupt.\")\n return ret", - "docstring": "Run a single download job." - }, - { - "code": "def spatialReference(self):\n if self._wkid == None and self._wkt is not None:\n return {\"wkt\": self._wkt}\n else:\n return {\"wkid\": self._wkid}", - "docstring": "returns the geometry spatial reference" - }, - { - "code": "def __grant_generate(grant,\n database,\n user,\n host='localhost',\n grant_option=False,\n escape=True,\n ssl_option=False):\n grant = re.sub(r'\\s*,\\s*', ', ', grant).upper()\n grant = __grant_normalize(grant)\n db_part = database.rpartition('.')\n dbc = db_part[0]\n table = db_part[2]\n if escape:\n if dbc != '*':\n dbc = quote_identifier(dbc, for_grants=(table == '*'))\n if table != '*':\n table = quote_identifier(table)\n qry = 'GRANT {0} ON {1}.{2} TO %(user)s@%(host)s'.format(grant, dbc, table)\n args = {}\n args['user'] = user\n args['host'] = host\n if ssl_option and isinstance(ssl_option, list):\n qry += __ssl_option_sanitize(ssl_option)\n if salt.utils.data.is_true(grant_option):\n qry += ' WITH GRANT OPTION'\n log.debug('Grant Query generated: %s args %s', qry, repr(args))\n return {'qry': qry, 'args': args}", - "docstring": "Validate grants and build the query that could set the given grants\n\n Note that this query contains arguments for user and host but not for\n grants or database." - }, - { - "code": "def textContent(self, text: str) -> None:\n self._set_text_content(text)\n if self.connected:\n self._set_text_content_web(text)", - "docstring": "Set textContent both on this node and related browser node." - }, - { - "code": "def submit(self, call, *args, **kwargs):\n future = StoredFuture(call, *args, **kwargs)\n self._queue.put(future)\n self._ensure_worker()\n return future", - "docstring": "Submit a call for future execution\n\n :return: future for the call execution\n :rtype: StoredFuture" - }, - { - "code": "def get_services_in_use(self):\n if self._state == Bundle.UNINSTALLED:\n raise BundleException(\n \"Can't call 'get_services_in_use' on an uninstalled bundle\"\n )\n return self.__framework._registry.get_bundle_imported_services(self)", - "docstring": "Returns this bundle's ServiceReference list for all services it is\n using or an empty list.\n A bundle is considered to be using a service if its use count for that\n service is greater than zero.\n\n The list is valid at the time of the call to this method, however, as\n the Framework is a very dynamic environment, services can be modified\n or unregistered at any time.\n\n :return: An array of ServiceReference objects\n :raise BundleException: If the bundle has been uninstalled" - }, - { - "code": "def _cleanup_temp_dir(self, base_dir):\n if self._should_cleanup_temp_dir:\n logging.debug('Cleaning up temporary directory %s.', base_dir)\n if self._user is None:\n util.rmtree(base_dir, onerror=util.log_rmtree_error)\n else:\n rm = subprocess.Popen(self._build_cmdline(['rm', '-rf', '--', base_dir]),\n stderr=subprocess.PIPE)\n rm_output = rm.stderr.read().decode()\n rm.stderr.close()\n if rm.wait() != 0 or rm_output:\n logging.warning(\"Failed to clean up temp directory %s: %s.\",\n base_dir, rm_output)\n else:\n logging.info(\"Skipping cleanup of temporary directory %s.\", base_dir)", - "docstring": "Delete given temporary directory and all its contents." - }, - { - "code": "def load(self):\n lg.info('Loading ' + str(self.xml_file))\n update_annotation_version(self.xml_file)\n xml = parse(self.xml_file)\n return xml.getroot()", - "docstring": "Load xml from file." - }, - { - "code": "def locale(self):\n name = self.json_data['hid_locale']\n if name is None:\n name = \"Undefined\"\n return (int(self.json_data['from_hid_locale'][name]), name)", - "docstring": "Do a lookup for the locale code that is set for this layout.\n\n NOTE: USB HID specifies only 35 different locales. If your layout does not fit, it should be set to Undefined/0\n\n @return: Tuple (, )" - }, - { - "code": "def model_changed(self, model, prop_name, info):\n current_enables = self._get_config_enables()\n if not self._enables == current_enables:\n filtered_buffer_update_needed = True\n if all(self._enables[key] == current_enables[key] for key in ['VERBOSE', 'DEBUG', 'INFO', 'WARNING', 'ERROR']):\n follow_mode_key = 'CONSOLE_FOLLOW_LOGGING'\n only_follow_mode_changed = self._enables[follow_mode_key] != current_enables[follow_mode_key]\n filtered_buffer_update_needed = not only_follow_mode_changed\n self._enables = current_enables\n self.view.set_enables(self._enables)\n if filtered_buffer_update_needed:\n self.update_filtered_buffer()\n else:\n self.view.scroll_to_cursor_onscreen()", - "docstring": "React to configuration changes\n\n Update internal hold enable state, propagates it to view and refresh the text buffer." - }, - { - "code": "def split_metadata_params(headers):\n params = {}\n metadata = {}\n for header_name in headers:\n if header_name.lower() in header_mapping:\n params[header_mapping[header_name.lower()]] = headers[header_name]\n else:\n metadata[header_name] = headers[header_name]\n return metadata, params", - "docstring": "Given a dict of headers for s3, seperates those that are boto3\n parameters and those that must be metadata" - }, - { - "code": "def authentication_url(self):\n params = {\n 'client_id': self.client_id,\n 'response_type': self.type,\n 'redirect_uri': self.callback_url\n }\n return AUTHENTICATION_URL + \"?\" + urlencode(params)", - "docstring": "Redirect your users to here to authenticate them." - }, - { - "code": "def Luv_to_XYZ(cobj, *args, **kwargs):\n illum = cobj.get_illuminant_xyz()\n if cobj.luv_l <= 0.0:\n xyz_x = 0.0\n xyz_y = 0.0\n xyz_z = 0.0\n return XYZColor(\n xyz_x, xyz_y, xyz_z,\n observer=cobj.observer, illuminant=cobj.illuminant)\n cie_k_times_e = color_constants.CIE_K * color_constants.CIE_E\n u_sub_0 = (4.0 * illum[\"X\"]) / (illum[\"X\"] + 15.0 * illum[\"Y\"] + 3.0 * illum[\"Z\"])\n v_sub_0 = (9.0 * illum[\"Y\"]) / (illum[\"X\"] + 15.0 * illum[\"Y\"] + 3.0 * illum[\"Z\"])\n var_u = cobj.luv_u / (13.0 * cobj.luv_l) + u_sub_0\n var_v = cobj.luv_v / (13.0 * cobj.luv_l) + v_sub_0\n if cobj.luv_l > cie_k_times_e:\n xyz_y = math.pow((cobj.luv_l + 16.0) / 116.0, 3.0)\n else:\n xyz_y = cobj.luv_l / color_constants.CIE_K\n xyz_x = xyz_y * 9.0 * var_u / (4.0 * var_v)\n xyz_z = xyz_y * (12.0 - 3.0 * var_u - 20.0 * var_v) / (4.0 * var_v)\n return XYZColor(\n xyz_x, xyz_y, xyz_z, illuminant=cobj.illuminant, observer=cobj.observer)", - "docstring": "Convert from Luv to XYZ." - }, - { - "code": "def decorate(decorator_cls, *args, **kwargs):\n global _wrappers\n wrapper_cls = _wrappers.get(decorator_cls, None)\n if wrapper_cls is None:\n class PythonWrapper(decorator_cls):\n pass\n wrapper_cls = PythonWrapper\n wrapper_cls.__name__ = decorator_cls.__name__ + \"PythonWrapper\"\n _wrappers[decorator_cls] = wrapper_cls\n def decorator(fn):\n wrapped = wrapper_cls(fn, *args, **kwargs)\n _update_wrapper(wrapped, fn)\n return wrapped\n return decorator", - "docstring": "Creates a decorator function that applies the decorator_cls that was passed in." - }, - { - "code": "def prepare_blobs(self):\n self.raw_header = self.extract_header()\n if self.cache_enabled:\n self._cache_offsets()", - "docstring": "Populate the blobs" - }, - { - "code": "def _get_closest_ansi_color(r, g, b, exclude=()):\n assert isinstance(exclude, tuple)\n saturation = abs(r - g) + abs(g - b) + abs(b - r)\n if saturation > 30:\n exclude += ('ansilightgray', 'ansidarkgray', 'ansiwhite', 'ansiblack')\n distance = 257*257*3\n match = 'ansidefault'\n for name, (r2, g2, b2) in ANSI_COLORS_TO_RGB.items():\n if name != 'ansidefault' and name not in exclude:\n d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2\n if d < distance:\n match = name\n distance = d\n return match", - "docstring": "Find closest ANSI color. Return it by name.\n\n :param r: Red (Between 0 and 255.)\n :param g: Green (Between 0 and 255.)\n :param b: Blue (Between 0 and 255.)\n :param exclude: A tuple of color names to exclude. (E.g. ``('ansired', )``.)" - }, - { - "code": "def skipper(func):\n @functools.wraps(func)\n def wrapped():\n key = (OPTIONS['base_dir'], OPTIONS['in_ext'], OPTIONS['out_ext'])\n if key not in seen:\n seen[key] = func()\n return seen[key]\n seen = {}\n return wrapped", - "docstring": "Decorator that memorizes base_dir, in_ext and out_ext from OPTIONS\n and skips execution for duplicates." - }, - { - "code": "def lossy_tokenize(text, lang, include_punctuation=False, external_wordlist=False):\n global _simplify_chinese\n info = get_language_info(lang)\n tokens = tokenize(text, lang, include_punctuation, external_wordlist)\n if info['lookup_transliteration'] == 'zh-Hans':\n from wordfreq.chinese import simplify_chinese as _simplify_chinese\n tokens = [_simplify_chinese(token) for token in tokens]\n return [smash_numbers(token) for token in tokens]", - "docstring": "Get a list of tokens for this text, with largely the same results and\n options as `tokenize`, but aggressively normalize some text in a lossy way\n that's good for counting word frequencies.\n\n In particular:\n\n - Any sequence of 2 or more adjacent digits, possibly with intervening\n punctuation such as a decimal point, will replace each digit with '0'\n so that frequencies for numbers don't have to be counted separately.\n\n This is similar to but not quite identical to the word2vec Google News\n data, which replaces digits with '#' in tokens with more than one digit.\n\n - In Chinese, unless Traditional Chinese is specifically requested using\n 'zh-Hant', all characters will be converted to Simplified Chinese." - }, - { - "code": "def load_script(zap_helper, **options):\n with zap_error_handler():\n if not os.path.isfile(options['file_path']):\n raise ZAPError('No file found at \"{0}\", cannot load script.'.format(options['file_path']))\n if not _is_valid_script_engine(zap_helper.zap, options['engine']):\n engines = zap_helper.zap.script.list_engines\n raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines)))\n console.debug('Loading script \"{0}\" from \"{1}\"'.format(options['name'], options['file_path']))\n result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'],\n options['file_path'], scriptdescription=options['description'])\n if result != 'OK':\n raise ZAPError('Error loading script: {0}'.format(result))\n console.info('Script \"{0}\" loaded'.format(options['name']))", - "docstring": "Load a script from a file." - }, - { - "code": "def check_docker_command_works():\n try:\n out = subprocess.check_output([\"docker\", \"version\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n except OSError:\n logger.info(\"docker binary is not available\")\n raise CommandDoesNotExistException(\n \"docker command doesn't seem to be available on your system. \"\n \"Please install and configure docker.\"\n )\n except subprocess.CalledProcessError as ex:\n logger.error(\"exception: %s\", ex)\n logger.error(\"rc: %s, output: %r\", ex.returncode, ex.output)\n raise ConuException(\n \"`docker version` call failed, it seems that your docker daemon is misconfigured or \"\n \"this user can't communicate with dockerd.\"\n )\n else:\n logger.info(\"docker environment info: %r\", out)\n return True", - "docstring": "Verify that dockerd and docker binary works fine. This is performed by calling `docker\n version`, which also checks server API version.\n\n :return: bool, True if all is good, otherwise ConuException or CommandDoesNotExistException\n is thrown" - }, - { - "code": "def get_id(date=None, project: str = 'sip',\n instance_id: int = None) -> str:\n if date is None:\n date = datetime.datetime.utcnow()\n if isinstance(date, datetime.datetime):\n date = date.strftime('%Y%m%d')\n if instance_id is None:\n instance_id = randint(0, 9999)\n return 'SBI-{}-{}-{:04d}'.format(date, project, instance_id)", - "docstring": "Get a SBI Identifier.\n\n Args:\n date (str or datetime.datetime, optional): UTC date of the SBI\n project (str, optional ): Project Name\n instance_id (int, optional): SBI instance identifier\n\n Returns:\n str, Scheduling Block Instance (SBI) ID." - }, - { - "code": "def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True):\n for filenode in (filenode_old, filenode_new):\n if not isinstance(filenode, FileNode):\n raise VCSError(\"Given object should be FileNode object, not %s\"\n % filenode.__class__)\n old_raw_id = getattr(filenode_old.changeset, 'raw_id', '0' * 40)\n new_raw_id = getattr(filenode_new.changeset, 'raw_id', '0' * 40)\n repo = filenode_new.changeset.repository\n vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path,\n ignore_whitespace)\n return vcs_gitdiff", - "docstring": "Returns git style diff between given ``filenode_old`` and ``filenode_new``.\n\n :param ignore_whitespace: ignore whitespaces in diff" - }, - { - "code": "def filter_uuid_list(stmts_in, uuids, **kwargs):\n invert = kwargs.get('invert', False)\n logger.info('Filtering %d statements for %d UUID%s...' %\n (len(stmts_in), len(uuids), 's' if len(uuids) > 1 else ''))\n stmts_out = []\n for st in stmts_in:\n if not invert:\n if st.uuid in uuids:\n stmts_out.append(st)\n else:\n if st.uuid not in uuids:\n stmts_out.append(st)\n logger.info('%d statements after filter...' % len(stmts_out))\n dump_pkl = kwargs.get('save')\n if dump_pkl:\n dump_statements(stmts_out, dump_pkl)\n return stmts_out", - "docstring": "Filter to Statements corresponding to given UUIDs\n\n Parameters\n ----------\n stmts_in : list[indra.statements.Statement]\n A list of statements to filter.\n uuids : list[str]\n A list of UUIDs to filter for.\n save : Optional[str]\n The name of a pickle file to save the results (stmts_out) into.\n invert : Optional[bool]\n Invert the filter to remove the Statements corresponding to the given\n UUIDs.\n\n Returns\n -------\n stmts_out : list[indra.statements.Statement]\n A list of filtered statements." - }, - { - "code": "def add_ref(self, wordlist):\n\t\trefname = wordlist[0][:-1]\n\t\tif(refname in self.refs):\n\t\t\traise ReferenceError(\"[line {}]:{} already defined here (word) {} (line) {}\".format(self.line_count, \n\t\t\t\t\t\trefname, self.refs[refname][0], self.refs[refname][1]))\n\t\tself.refs[refname] = (self.word_count, self.line_count)", - "docstring": "Adds a reference." - }, - { - "code": "def listify(values, N=1, delim=None):\n ans = [] if values is None else values\n if hasattr(ans, '__iter__') and not isinstance(ans, basestring):\n ans = list(ans)\n else:\n if isinstance(delim, basestring) and isinstance(ans, basestring):\n try:\n ans = ans.split(delim)\n except (IndexError, ValueError, AttributeError, TypeError):\n ans = [ans]\n else:\n ans = [ans]\n if len(ans):\n if len(ans) < N and N > 1:\n ans += [ans[-1]] * (N - len(ans))\n else:\n if N > 1:\n ans = [[]] * N\n return ans", - "docstring": "Return an N-length list, with elements values, extrapolating as necessary.\n\n >>> listify(\"don't split into characters\")\n [\"don't split into characters\"]\n >>> listify(\"len = 3\", 3)\n ['len = 3', 'len = 3', 'len = 3']\n >>> listify(\"But split on a delimeter, if requested.\", delim=',')\n ['But split on a delimeter', ' if requested.']\n >>> listify([\"obj 1\", \"obj 2\", \"len = 4\"], N=4)\n ['obj 1', 'obj 2', 'len = 4', 'len = 4']\n >>> listify(iter(\"len=7\"), N=7)\n ['l', 'e', 'n', '=', '7', '7', '7']\n >>> listify(iter(\"len=5\"))\n ['l', 'e', 'n', '=', '5']\n >>> listify(None, 3)\n [[], [], []]\n >>> listify([None],3)\n [None, None, None]\n >>> listify([], 3)\n [[], [], []]\n >>> listify('', 2)\n ['', '']\n >>> listify(0)\n [0]\n >>> listify(False, 2)\n [False, False]" - }, - { - "code": "def reversed_lines(path):\n with open(path, 'r') as handle:\n part = ''\n for block in reversed_blocks(handle):\n for c in reversed(block):\n if c == '\\n' and part:\n yield part[::-1]\n part = ''\n part += c\n if part: yield part[::-1]", - "docstring": "Generate the lines of file in reverse order." - }, - { - "code": "def go_stdlib(self):\n out = self._go_dist.create_go_cmd('list', args=['std']).check_output()\n return frozenset(out.decode('utf-8').strip().split())", - "docstring": "Return the set of all Go standard library import paths.\n\n :rtype: frozenset of string" - }, - { - "code": "def create_cache(self, **kwargs):\n cache = predix.admin.cache.Cache(**kwargs)\n cache.create(**kwargs)\n cache.add_to_manifest(self)\n return cache", - "docstring": "Creates an instance of the Cache Service." - }, - { - "code": "def Eg(self, **kwargs):\n return self.unstrained.Eg(**kwargs) + self.Eg_strain_shift(**kwargs)", - "docstring": "Returns the strain-shifted bandgap, ``Eg``." - }, - { - "code": "def oidcCredentials(self, *args, **kwargs):\n return self._makeApiCall(self.funcinfo[\"oidcCredentials\"], *args, **kwargs)", - "docstring": "Get Taskcluster credentials given a suitable `access_token`\n\n Given an OIDC `access_token` from a trusted OpenID provider, return a\n set of Taskcluster credentials for use on behalf of the identified\n user.\n\n This method is typically not called with a Taskcluster client library\n and does not accept Hawk credentials. The `access_token` should be\n given in an `Authorization` header:\n ```\n Authorization: Bearer abc.xyz\n ```\n\n The `access_token` is first verified against the named\n :provider, then passed to the provider's APIBuilder to retrieve a user\n profile. That profile is then used to generate Taskcluster credentials\n appropriate to the user. Note that the resulting credentials may or may\n not include a `certificate` property. Callers should be prepared for either\n alternative.\n\n The given credentials will expire in a relatively short time. Callers should\n monitor this expiration and refresh the credentials if necessary, by calling\n this endpoint again, if they have expired.\n\n This method gives output: ``v1/oidc-credentials-response.json#``\n\n This method is ``experimental``" - }, - { - "code": "async def stop(self, _task=None):\n self._logger.info(\"Stopping adapter wrapper\")\n if self._task.stopped:\n return\n for task in self._task.subtasks:\n await task.stop()\n self._logger.debug(\"Stopping underlying adapter %s\", self._adapter.__class__.__name__)\n await self._execute(self._adapter.stop_sync)", - "docstring": "Stop the device adapter.\n\n See :meth:`AbstractDeviceAdapter.stop`." - }, - { - "code": "def beds_to_boolean(beds, ref=None, beds_sorted=False, ref_sorted=False,\n **kwargs):\n beds = copy.deepcopy(beds)\n fns = []\n for i,v in enumerate(beds):\n if type(v) == str:\n fns.append(v)\n beds[i] = pbt.BedTool(v)\n else:\n fns.append(v.fn)\n if not beds_sorted:\n beds[i] = beds[i].sort()\n names = _sample_names(fns, kwargs)\n if ref:\n if type(ref) == str:\n ref = pbt.BedTool(ref)\n if not ref_sorted:\n ref = ref.sort()\n else:\n ref = combine(beds)\n ind = []\n for r in ref:\n ind.append('{}:{}-{}'.format(r.chrom, r.start, r.stop))\n bdf = pd.DataFrame(0, index=ind, columns=names)\n for i,bed in enumerate(beds):\n res = ref.intersect(bed, sorted=True, wa=True)\n ind = []\n for r in res:\n ind.append('{}:{}-{}'.format(r.chrom,\n r.start,\n r.stop))\n bdf.ix[ind, names[i]] = 1\n return bdf", - "docstring": "Compare a list of bed files or BedTool objects to a reference bed file and\n create a boolean matrix where each row is an interval and each column is a 1\n if that file has an interval that overlaps the row interval and a 0\n otherwise. If no reference bed is provided, the provided bed files will be\n merged into a single bed and compared to that.\n\n Parameters\n ----------\n beds : list\n List of paths to bed files or BedTool objects.\n\n ref : str or BedTool\n Reference bed file to compare against. If no reference bed is provided,\n the provided bed files will be merged into a single bed and compared to\n that.\n\n beds_sorted : boolean\n Whether the bed files in beds are already sorted. If False, all bed\n files in beds will be sorted.\n\n ref_sorted : boolean\n Whether the reference bed file is sorted. If False, ref will be sorted.\n\n names : list of strings\n Names to use for columns of output files. Overrides define_sample_name \n if provided.\n\n define_sample_name : function that takes string as input\n Function mapping filename to sample name (or basename). For instance,\n you may have the basename in the path and use a regex to extract it.\n The basenames will be used as the column names. If this is not provided,\n the columns will be named as the input files.\n\n Returns\n -------\n out : pandas.DataFrame\n Boolean data frame indicating whether each bed file has an interval\n that overlaps each interval in the reference bed file." - }, - { - "code": "def find_experiment_export(app_id):\n cwd = os.getcwd()\n data_filename = \"{}-data.zip\".format(app_id)\n path_to_data = os.path.join(cwd, \"data\", data_filename)\n if os.path.exists(path_to_data):\n try:\n Data(path_to_data)\n except IOError:\n from dallinger import logger\n logger.exception(\n \"Error reading local data file {}, checking remote.\".format(\n path_to_data\n )\n )\n else:\n return path_to_data\n path_to_data = os.path.join(tempfile.mkdtemp(), data_filename)\n buckets = [user_s3_bucket(), dallinger_s3_bucket()]\n for bucket in buckets:\n try:\n bucket.download_file(data_filename, path_to_data)\n except botocore.exceptions.ClientError:\n pass\n else:\n return path_to_data", - "docstring": "Attempt to find a zipped export of an experiment with the ID provided\n and return its path. Returns None if not found.\n\n Search order:\n 1. local \"data\" subdirectory\n 2. user S3 bucket\n 3. Dallinger S3 bucket" - }, - { - "code": "def editunivset(self):\n self.ignore(whitespace)\n if not self.nextstr('.'):\n self._raiseSyntaxExpects('.')\n univ = self.univprop()\n self.ignore(whitespace)\n self.nextmust('=')\n self.ignore(whitespace)\n valu = self.valu()\n return s_ast.EditPropSet(kids=(univ, valu))", - "docstring": ".foo = bar" - }, - { - "code": "def get_or_create(cls, **kwargs):\n q = cls._get_instance(**kwargs)\n if q:\n return q\n q = cls(**kwargs)\n _action_and_commit(q, session.add)\n return q", - "docstring": "If a record matching the instance already exists in the database, then\n return it, otherwise create a new record." - }, - { - "code": "def HumanReadableType(self):\n if isinstance(self.service_type, py2to3.STRING_TYPES):\n return self.service_type\n return human_readable_service_enums.SERVICE_ENUMS['Type'].get(\n self.service_type, '{0:d}'.format(self.service_type))", - "docstring": "Return a human readable string describing the type value.\n\n Returns:\n str: human readable description of the type value." - }, - { - "code": "def to_json_file(self, json_file_path):\n with open(json_file_path, \"w\", encoding='utf-8') as writer:\n writer.write(self.to_json_string())", - "docstring": "Save this instance to a json file." - }, - { - "code": "def get(self, sid):\n return AssignedAddOnExtensionContext(\n self._version,\n account_sid=self._solution['account_sid'],\n resource_sid=self._solution['resource_sid'],\n assigned_add_on_sid=self._solution['assigned_add_on_sid'],\n sid=sid,\n )", - "docstring": "Constructs a AssignedAddOnExtensionContext\n\n :param sid: The unique string that identifies the resource\n\n :returns: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext\n :rtype: twilio.rest.api.v2010.account.incoming_phone_number.assigned_add_on.assigned_add_on_extension.AssignedAddOnExtensionContext" - }, - { - "code": "def annotation_has_expired(event, key, timeout):\n anns = get_annotations(event, key)\n if anns:\n return (time.time() - anns[0][\"ts\"]) > timeout\n else:\n return False", - "docstring": "Check if an event error has expired." - }, - { - "code": "def print_functions(self, d):\n for c in self.contracts:\n for f in c.functions:\n f.cfg_to_dot(os.path.join(d, '{}.{}.dot'.format(c.name, f.name)))", - "docstring": "Export all the functions to dot files" - }, - { - "code": "def cache_control_expires(num_hours):\n num_seconds = int(num_hours * 60 * 60)\n def decorator(func):\n @wraps(func)\n def inner(request, *args, **kwargs):\n response = func(request, *args, **kwargs)\n patch_response_headers(response, num_seconds)\n return response\n return inner\n return decorator", - "docstring": "Set the appropriate Cache-Control and Expires headers for the given\n number of hours." - }, - { - "code": "def post_change_receiver(self, instance: Model, action: Action, **kwargs):\n try:\n old_group_names = instance.__instance_groups.observers[self]\n except (ValueError, KeyError):\n old_group_names = set()\n if action == Action.DELETE:\n new_group_names = set()\n else:\n new_group_names = set(self.group_names(instance))\n self.send_messages(\n instance,\n old_group_names - new_group_names,\n Action.DELETE,\n **kwargs\n )\n self.send_messages(\n instance,\n old_group_names & new_group_names,\n Action.UPDATE,\n **kwargs\n )\n self.send_messages(\n instance,\n new_group_names - old_group_names,\n Action.CREATE,\n **kwargs\n )", - "docstring": "Triggers the old_binding to possibly send to its group." - }, - { - "code": "def estimate_motion(self, time, intensity_grid, max_u, max_v):\n ti = np.where(time == self.times)[0][0]\n mask_vals = np.where(self.masks[ti].ravel() == 1)\n i_vals = self.i[ti].ravel()[mask_vals]\n j_vals = self.j[ti].ravel()[mask_vals]\n obj_vals = self.timesteps[ti].ravel()[mask_vals]\n u_shifts = np.arange(-max_u, max_u + 1)\n v_shifts = np.arange(-max_v, max_v + 1)\n min_error = 99999999999.0\n best_u = 0\n best_v = 0\n for u in u_shifts:\n j_shift = j_vals - u\n for v in v_shifts:\n i_shift = i_vals - v\n if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) &\n (0 <= j_shift) & (j_shift < intensity_grid.shape[1])):\n shift_vals = intensity_grid[i_shift, j_shift]\n else:\n shift_vals = np.zeros(i_shift.shape)\n error = np.abs(shift_vals - obj_vals).mean()\n if error < min_error:\n min_error = error\n best_u = u * self.dx\n best_v = v * self.dx\n self.u[ti] = best_u\n self.v[ti] = best_v\n return best_u, best_v, min_error", - "docstring": "Estimate the motion of the object with cross-correlation on the intensity values from the previous time step.\n\n Args:\n time: time being evaluated.\n intensity_grid: 2D array of intensities used in cross correlation.\n max_u: Maximum x-component of motion. Used to limit search area.\n max_v: Maximum y-component of motion. Used to limit search area\n\n Returns:\n u, v, and the minimum error." - }, - { - "code": "def yield_once(iterator):\n @wraps(iterator)\n def yield_once_generator(*args, **kwargs):\n yielded = set()\n for item in iterator(*args, **kwargs):\n if item not in yielded:\n yielded.add(item)\n yield item\n return yield_once_generator", - "docstring": "Decorator to make an iterator returned by a method yield each result only\n once.\n\n >>> @yield_once\n ... def generate_list(foo):\n ... return foo\n >>> list(generate_list([1, 2, 1]))\n [1, 2]\n\n :param iterator: Any method that returns an iterator\n :return: An method returning an iterator\n that yields every result only once at most." - }, - { - "code": "def someoneKnownSeen(self, home=None, camera=None):\n try:\n cam_id = self.cameraByName(camera=camera, home=home)['id']\n except TypeError:\n logger.warning(\"personSeenByCamera: Camera name or home is unknown\")\n return False\n if self.lastEvent[cam_id]['type'] == 'person':\n if self.lastEvent[cam_id]['person_id'] in self._knownPersons():\n return True\n return False", - "docstring": "Return True if someone known has been seen" - }, - { - "code": "def _extract(filename):\n random_string = '{:0d}'.format(randint(0, 10**6))\n tmp = '/tmp/kytos-napp-' + Path(filename).stem + '-' + random_string\n os.mkdir(tmp)\n with tarfile.open(filename, 'r:xz') as tar:\n tar.extractall(tmp)\n return Path(tmp)", - "docstring": "Extract package to a temporary folder.\n\n Return:\n pathlib.Path: Temp dir with package contents." - }, - { - "code": "def get_default_keychain(user=None, domain=\"user\"):\n cmd = \"security default-keychain -d {0}\".format(domain)\n return __salt__['cmd.run'](cmd, runas=user)", - "docstring": "Get the default keychain\n\n user\n The user to check the default keychain of\n\n domain\n The domain to use valid values are user|system|common|dynamic, the default is user\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' keychain.get_default_keychain" - }, - { - "code": "def read(self, size=None):\n if size is not None:\n return self.__sf.read(size)\n block_size = self.__class__.__block_size\n b = bytearray()\n received_bytes = 0\n while 1:\n partial = self.__sf.read(block_size)\n b.extend(partial)\n received_bytes += len(partial)\n if len(partial) < block_size:\n self.__log.debug(\"End of file.\")\n break\n self.__log.debug(\"Read (%d) bytes for total-file.\" % (received_bytes))\n return b", - "docstring": "Read a length of bytes. Return empty on EOF. If 'size' is omitted, \n return whole file." - }, - { - "code": "def index(request):\n kwargs = {'slug': getattr(settings, 'WAKAWAKA_DEFAULT_INDEX', 'WikiIndex')}\n redirect_to = reverse('wakawaka_page', kwargs=kwargs)\n return HttpResponseRedirect(redirect_to)", - "docstring": "Redirects to the default wiki index name." - }, - { - "code": "def save(self):\n response = self.session.request(\"save:Message\", [ self.data ])\n self.data = response\n self.message_id = self.data[\"id\"]\n return self", - "docstring": "Save current draft state." - }, - { - "code": "def iter_filths():\n for filth_cls in iter_filth_clss():\n if issubclass(filth_cls, RegexFilth):\n m = next(re.finditer(r\"\\s+\", \"fake pattern string\"))\n yield filth_cls(m)\n else:\n yield filth_cls()", - "docstring": "Iterate over all instances of filth" - }, - { - "code": "def exists_evaluator(self, index):\n attr_name = self.condition_data[index][0]\n return self.attributes.get(attr_name) is not None", - "docstring": "Evaluate the given exists match condition for the user attributes.\n\n Args:\n index: Index of the condition to be evaluated.\n\n Returns:\n Boolean: True if the user attributes have a non-null value for the given condition,\n otherwise False." - }, - { - "code": "def _GenerateStaticsTable(self, title='Current Statistics'):\n if len(self.__categories.keys()) < 1:\n return ''\n d = self.__categories\n keys = sorted(d.keys())\n cats = ', '.join(['\"%s\"' % k for k in keys])\n vals = ', '.join(['%d' % d[k] for k in keys])\n return r\n% (title, '-'*len(title), cats, vals)", - "docstring": "Generates a statics table based on set categories" - }, - { - "code": "def run_role(self, name,\n options=None,\n content=None):\n if options is None:\n options = {}\n if content is None:\n content = []\n role_fn, _ = role(name,\n self.language,\n self.node.line,\n self.reporter)\n vec, _ = role_fn(name,\n rawtext=str(content),\n text=str(content),\n lineno=self.node.line,\n inliner=self.memo.inliner,\n options=options,\n content=content)\n assert len(vec) == 1, 'only support one list in role'\n return vec[0]", - "docstring": "Generate a role node.\n\n options : dict\n key value arguments.\n content : content\n content of the directive\n\n Returns\n -------\n node : docutil Node\n Node generated by the arguments." - }, - { - "code": "def train_cb(self, param):\n if param.nbatch % self.frequent == 0:\n self._process_batch(param, 'train')", - "docstring": "Callback funtion for training." - }, - { - "code": "def path(self):\n location = self.client.table_location(self.table, self.database)\n if not location:\n raise Exception(\"Couldn't find location for table: {0}\".format(str(self)))\n return location", - "docstring": "Returns the path to this table in HDFS." - }, - { - "code": "def run_tensorboard(logdir, listen_on=\"0.0.0.0\", port=0, tensorboard_args=None, timeout=10):\n if tensorboard_args is None:\n tensorboard_args = []\n tensorboard_instance = Process.create_process(\n TENSORBOARD_BINARY.split(\" \") +\n [\"--logdir\", logdir, \"--host\", listen_on, \"--port\", str(port)] + tensorboard_args)\n try:\n tensorboard_instance.run()\n except FileNotFoundError as ex:\n raise TensorboardNotFoundError(ex)\n start = time.time()\n data = \"\"\n while time.time() - start < timeout:\n line = tensorboard_instance.read_line_stderr(time_limit=timeout)\n data += line\n if \"at http://\" in line:\n port = parse_port_from_tensorboard_output(line)\n return port\n elif \"TensorBoard attempted to bind to port\" in line:\n break\n tensorboard_instance.terminate()\n raise UnexpectedOutputError(\n data,\n expected=\"Confirmation that Tensorboard has started\"\n )", - "docstring": "Launch a new TensorBoard instance.\n\n :param logdir: Path to a TensorFlow summary directory\n :param listen_on: The IP address TensorBoard should listen on.\n :param port: Port number to listen on. 0 for a random port.\n :param tensorboard_args: Additional TensorBoard arguments.\n :param timeout: Timeout after which the Timeout\n :type timeout: float\n :return: Returns the port TensorBoard is listening on.\n :raise UnexpectedOutputError\n :raise TensorboardNotFoundError\n :raise TimeoutError" - }, - { - "code": "def savemat(file_name, mdict, appendmat=True, format='7.3',\n oned_as='row', store_python_metadata=True,\n action_for_matlab_incompatible='error',\n marshaller_collection=None, truncate_existing=False,\n truncate_invalid_matlab=False, **keywords):\n if float(format) < 7.3:\n import scipy.io\n scipy.io.savemat(file_name, mdict, appendmat=appendmat,\n format=format, oned_as=oned_as, **keywords)\n return\n if appendmat and not file_name.endswith('.mat'):\n file_name = file_name + '.mat'\n options = Options(store_python_metadata=store_python_metadata, \\\n matlab_compatible=True, oned_as=oned_as, \\\n action_for_matlab_incompatible=action_for_matlab_incompatible, \\\n marshaller_collection=marshaller_collection)\n writes(mdict=mdict, filename=file_name,\n truncate_existing=truncate_existing,\n truncate_invalid_matlab=truncate_invalid_matlab,\n options=options)", - "docstring": "Save a dictionary of python types to a MATLAB MAT file.\n\n Saves the data provided in the dictionary `mdict` to a MATLAB MAT\n file. `format` determines which kind/vesion of file to use. The\n '7.3' version, which is HDF5 based, is handled by this package and\n all types that this package can write are supported. Versions 4 and\n 5 are not HDF5 based, so everything is dispatched to the SciPy\n package's ``scipy.io.savemat`` function, which this function is\n modelled after (arguments not specific to this package have the same\n names, etc.).\n\n Parameters\n ----------\n file_name : str or file-like object\n Name of the MAT file to store in. The '.mat' extension is\n added on automatically if not present if `appendmat` is set to\n ``True``. An open file-like object can be passed if the writing\n is being dispatched to SciPy (`format` < 7.3).\n mdict : dict\n The dictionary of variables and their contents to store in the\n file.\n appendmat : bool, optional\n Whether to append the '.mat' extension to `file_name` if it\n doesn't already end in it or not.\n format : {'4', '5', '7.3'}, optional\n The MATLAB mat file format to use. The '7.3' format is handled\n by this package while the '4' and '5' formats are dispatched to\n SciPy.\n oned_as : {'row', 'column'}, optional\n Whether 1D arrays should be turned into row or column vectors.\n store_python_metadata : bool, optional\n Whether or not to store Python type information. Doing so allows\n most types to be read back perfectly. Only applicable if not\n dispatching to SciPy (`format` >= 7.3).\n action_for_matlab_incompatible: str, optional\n The action to perform writing data that is not MATLAB\n compatible. The actions are to write the data anyways\n ('ignore'), don't write the incompatible data ('discard'), or\n throw a ``TypeNotMatlabCompatibleError`` exception.\n marshaller_collection : MarshallerCollection, optional\n Collection of marshallers to disk to use. Only applicable if\n not dispatching to SciPy (`format` >= 7.3).\n truncate_existing : bool, optional\n Whether to truncate the file if it already exists before writing\n to it.\n truncate_invalid_matlab : bool, optional\n Whether to truncate a file if the file doesn't have the proper\n header (userblock in HDF5 terms) setup for MATLAB metadata to be\n placed.\n **keywords :\n Additional keywords arguments to be passed onto\n ``scipy.io.savemat`` if dispatching to SciPy (`format` < 7.3).\n\n Raises\n ------\n ImportError\n If `format` < 7.3 and the ``scipy`` module can't be found.\n NotImplementedError\n If writing a variable in `mdict` is not supported.\n exceptions.TypeNotMatlabCompatibleError\n If writing a type not compatible with MATLAB and\n `action_for_matlab_incompatible` is set to ``'error'``.\n\n Notes\n -----\n Writing the same data and then reading it back from disk using the\n HDF5 based version 7.3 format (the functions in this package) or the\n older format (SciPy functions) can lead to very different\n results. Each package supports a different set of data types and\n converts them to and from the same MATLAB types differently.\n\n See Also\n --------\n loadmat : Equivelent function to do reading.\n scipy.io.savemat : SciPy function this one models after and\n dispatches to.\n Options\n writes : Function used to do the actual writing." - }, - { - "code": "def encode_collection(collection, encoding='utf-8'):\n if isinstance(collection, dict):\n return dict((encode_collection(key), encode_collection(value)) for key, value in collection.iteritems())\n elif isinstance(collection, list):\n return [encode_collection(element) for element in input]\n elif isinstance(collection, unicode):\n return collection.encode(encoding)\n else:\n return collection", - "docstring": "Encodes all the string keys and values in a collection with specified encoding" - }, - { - "code": "def add(self, count, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n if self.last_data >= timestamp:\n raise ValueError(\"Time {} >= {} in load average calculation\".format(self.last_data, timestamp))\n self.last_data = timestamp\n for meta in self.intervals.values():\n meta.push(count, timestamp)", - "docstring": "Add a value at the specified time to the series.\n\n :param count: The number of work items ready at the specified\n time.\n :param timestamp: The timestamp to add. Defaults to None,\n meaning current time. It should be strictly greater (newer)\n than the last added timestamp." - }, - { - "code": "def gfrefn(t1, t2, s1, s2):\n t1 = ctypes.c_double(t1)\n t2 = ctypes.c_double(t2)\n s1 = ctypes.c_int(s1)\n s2 = ctypes.c_int(s2)\n t = ctypes.c_double()\n libspice.gfrefn_c(t1, t2, s1, s2, ctypes.byref(t))\n return t.value", - "docstring": "For those times when we can't do better, we use a bisection\n method to find the next time at which to test for state change.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfrefn_c.html\n\n :param t1: One of two values bracketing a state change.\n :type t1: float\n :param t2: The other value that brackets a state change.\n :type t2: float\n :param s1: State at t1.\n :type s1: bool\n :param s2: State at t2.\n :type s2: bool\n :return: New value at which to check for transition.\n :rtype: float" - }, - { - "code": "def current_offset(local_tz=None):\n if local_tz is None:\n local_tz = DEFAULT_LOCAL_TZ\n dt = local_tz.localize(datetime.now())\n return dt.utcoffset()", - "docstring": "Returns current utcoffset for a timezone. Uses\n DEFAULT_LOCAL_TZ by default. That value can be\n changed at runtime using the func below." - }, - { - "code": "def evaluate(self, x, *args):\n xx = x / self.x_0\n y = (self.amplitude * xx ** (-self.alpha)) * self._flux_unit\n flux = units.convert_flux(x, y, units.PHOTLAM)\n return flux.value", - "docstring": "Return flux in PHOTLAM. Assume input wavelength is in Angstrom." - }, - { - "code": "def center_kernel(kernel, iterations=20):\n kernel = kernel_norm(kernel)\n nx, ny = np.shape(kernel)\n if nx %2 == 0:\n raise ValueError(\"kernel needs odd number of pixels\")\n x_grid, y_grid = util.make_grid(nx, deltapix=1, left_lower=False)\n x_w = np.sum(kernel * util.array2image(x_grid))\n y_w = np.sum(kernel * util.array2image(y_grid))\n kernel_centered = de_shift_kernel(kernel, shift_x=-x_w, shift_y=-y_w, iterations=iterations)\n return kernel_norm(kernel_centered)", - "docstring": "given a kernel that might not be perfectly centered, this routine computes its light weighted center and then\n moves the center in an iterative process such that it is centered\n\n :param kernel: 2d array (odd numbers)\n :param iterations: int, number of iterations\n :return: centered kernel" - }, - { - "code": "def blackbox_and_coarse_grain(blackbox, coarse_grain):\n if blackbox is None:\n return\n for box in blackbox.partition:\n outputs = set(box) & set(blackbox.output_indices)\n if coarse_grain is None and len(outputs) > 1:\n raise ValueError(\n 'A blackboxing with multiple outputs per box must be '\n 'coarse-grained.')\n if (coarse_grain and not any(outputs.issubset(part)\n for part in coarse_grain.partition)):\n raise ValueError(\n 'Multiple outputs from a blackbox must be partitioned into '\n 'the same macro-element of the coarse-graining')", - "docstring": "Validate that a coarse-graining properly combines the outputs of a\n blackboxing." - }, - { - "code": "def ListPlugins(logdir):\n plugins_dir = os.path.join(logdir, _PLUGINS_DIR)\n try:\n entries = tf.io.gfile.listdir(plugins_dir)\n except tf.errors.NotFoundError:\n return []\n return [x.rstrip('/') for x in entries\n if x.endswith('/') or _IsDirectory(plugins_dir, x)]", - "docstring": "List all the plugins that have registered assets in logdir.\n\n If the plugins_dir does not exist, it returns an empty list. This maintains\n compatibility with old directories that have no plugins written.\n\n Args:\n logdir: A directory that was created by a TensorFlow events writer.\n\n Returns:\n a list of plugin names, as strings" - }, - { - "code": "def remove_all_attributes(element, exclude=None):\n if exclude is None:\n exclude = []\n for k in element.attrib.keys():\n if k not in exclude:\n element.attrib.pop(k)", - "docstring": "This method will remove all attributes of any provided element.\n\n A list of strings may be passed to the keyward-argument \"exclude\", which\n will serve as a list of attributes which will not be removed." - }, - { - "code": "def saveComicStrip(self, strip):\n allskipped = True\n for image in strip.getImages():\n try:\n if self.options.dry_run:\n filename, saved = \"\", False\n else:\n filename, saved = image.save(self.options.basepath)\n if saved:\n allskipped = False\n if self.stopped:\n break\n except Exception as msg:\n out.exception('Could not save image at %s to %s: %r' % (image.referrer, image.filename, msg))\n self.errors += 1\n return allskipped", - "docstring": "Save a comic strip which can consist of multiple images." - }, - { - "code": "def fix_insert_size(in_bam, config):\n fixed_file = os.path.splitext(in_bam)[0] + \".pi_fixed.bam\"\n if file_exists(fixed_file):\n return fixed_file\n header_file = os.path.splitext(in_bam)[0] + \".header.sam\"\n read_length = bam.estimate_read_length(in_bam)\n bam_handle= bam.open_samfile(in_bam)\n header = bam_handle.header.copy()\n rg_dict = header['RG'][0]\n if 'PI' not in rg_dict:\n return in_bam\n PI = int(rg_dict.get('PI'))\n PI = PI + 2*read_length\n rg_dict['PI'] = PI\n header['RG'][0] = rg_dict\n with pysam.Samfile(header_file, \"wb\", header=header) as out_handle:\n with bam.open_samfile(in_bam) as in_handle:\n for record in in_handle:\n out_handle.write(record)\n shutil.move(header_file, fixed_file)\n return fixed_file", - "docstring": "Tophat sets PI in the RG to be the inner distance size, but the SAM spec\n states should be the insert size. This fixes the RG in the alignment\n file generated by Tophat header to match the spec" - }, - { - "code": "def format_adjustments(self, dates, assets):\n make_adjustment = partial(make_adjustment_from_labels, dates, assets)\n min_date, max_date = dates[[0, -1]]\n if len(self.adjustments) == 0:\n return {}\n date_bounds = self.adjustment_apply_dates.slice_indexer(\n min_date,\n max_date,\n )\n dates_filter = zeros(len(self.adjustments), dtype='bool')\n dates_filter[date_bounds] = True\n dates_filter &= (self.adjustment_end_dates >= min_date)\n sids_filter = self.adjustment_sids.isin(assets.values)\n adjustments_to_use = self.adjustments.loc[\n dates_filter & sids_filter\n ].set_index('apply_date')\n out = {}\n previous_apply_date = object()\n for row in adjustments_to_use.itertuples():\n apply_date, sid, value, kind, start_date, end_date = row\n if apply_date != previous_apply_date:\n row_loc = dates.get_loc(apply_date, method='bfill')\n current_date_adjustments = out[row_loc] = []\n previous_apply_date = apply_date\n current_date_adjustments.append(\n make_adjustment(start_date, end_date, sid, kind, value)\n )\n return out", - "docstring": "Build a dict of Adjustment objects in the format expected by\n AdjustedArray.\n\n Returns a dict of the form:\n {\n # Integer index into `dates` for the date on which we should\n # apply the list of adjustments.\n 1 : [\n Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),\n Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),\n ...\n ],\n ...\n }" - }, - { - "code": "def generate_secret_key():\n import string\n import random\n rng = random.SystemRandom()\n return ''.join(\n rng.choice(string.ascii_letters + string.digits)\n for dummy in range(0, 256)\n )", - "docstring": "Generate secret key." - }, - { - "code": "def debug_complete():\n if not 'uniqueId' in request.args:\n raise ExperimentError('improper_inputs')\n else:\n unique_id = request.args['uniqueId']\n mode = request.args['mode']\n try:\n user = Participant.query.\\\n filter(Participant.uniqueid == unique_id).one()\n user.status = COMPLETED\n user.endhit = datetime.datetime.now()\n db_session.add(user)\n db_session.commit()\n except:\n raise ExperimentError('error_setting_worker_complete')\n else:\n if (mode == 'sandbox' or mode == 'live'):\n return render_template('closepopup.html')\n else:\n return render_template('complete.html')", - "docstring": "Debugging route for complete." - }, - { - "code": "def export_input_zip(ekey, dstore):\n dest = dstore.export_path('input.zip')\n nbytes = dstore.get_attr('input/zip', 'nbytes')\n zbytes = dstore['input/zip'].value\n zbytes += b'\\x00' * (nbytes - len(zbytes))\n open(dest, 'wb').write(zbytes)\n return [dest]", - "docstring": "Export the data in the `input_zip` dataset as a .zip file" - }, - { - "code": "def clear_data(self):\n self.tab = None\n self.cols = []\n self._idx = []\n self.x_col = ''\n self.y_col = ''\n self.w.xcombo.clear()\n self.w.ycombo.clear()\n self.w.x_lo.set_text('')\n self.w.x_hi.set_text('')\n self.w.y_lo.set_text('')\n self.w.y_hi.set_text('')", - "docstring": "Clear comboboxes and columns." - }, - { - "code": "def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):\n assert method in ('equal', 'cosine', 'random'), 'Invalid method'\n wind_eq = melodist.distribute_equally(wind_daily)\n if method == 'equal':\n wind_disagg = wind_eq\n elif method == 'cosine':\n assert None not in (a, b, t_shift)\n wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)\n elif method == 'random':\n wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3\n return wind_disagg", - "docstring": "general function for windspeed disaggregation\n\n Args:\n wind_daily: daily values\n method: keyword specifying the disaggregation method to be used\n a: parameter a for the cosine function\n b: parameter b for the cosine function\n t_shift: parameter t_shift for the cosine function\n \n Returns:\n Disaggregated hourly values of windspeed." - }, - { - "code": "def load_ratings(data_home, size):\n if size == '100k':\n with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f:\n lines = list(map(lambda l: list(map(int, l.rstrip().split('\\t'))), f.readlines()))\n elif size == '1m':\n with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f:\n lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines()))\n ratings = []\n for l in lines:\n if l[2] == 5:\n ratings.append(l)\n ratings = np.asarray(ratings)\n return ratings[np.argsort(ratings[:, 3])]", - "docstring": "Load all samples in the dataset." - }, - { - "code": "def queue(self):\n entry = self._proto.commandQueueEntry\n if entry.HasField('queueName'):\n return entry.queueName\n return None", - "docstring": "The name of the queue that this command was assigned to." - }, - { - "code": "def close(self):\n if VERBOSE:\n _print_out('\\nDummy_serial: Closing port\\n')\n if not self._isOpen:\n raise IOError('Dummy_serial: The port is already closed')\n self._isOpen = False\n self.port = None", - "docstring": "Close a port on dummy_serial." - }, - { - "code": "def update(cls, first_name=None, middle_name=None, last_name=None,\n public_nick_name=None, address_main=None, address_postal=None,\n avatar_uuid=None, tax_resident=None, document_type=None,\n document_number=None, document_country_of_issuance=None,\n document_front_attachment_id=None,\n document_back_attachment_id=None, date_of_birth=None,\n place_of_birth=None, country_of_birth=None, nationality=None,\n language=None, region=None, gender=None, status=None,\n sub_status=None, legal_guardian_alias=None, session_timeout=None,\n card_ids=None, card_limits=None,\n daily_limit_without_confirmation_login=None,\n notification_filters=None, display_name=None,\n custom_headers=None):\n if custom_headers is None:\n custom_headers = {}\n api_client = client.ApiClient(cls._get_api_context())\n request_map = {\n cls.FIELD_FIRST_NAME: first_name,\n cls.FIELD_MIDDLE_NAME: middle_name,\n cls.FIELD_LAST_NAME: last_name,\n cls.FIELD_PUBLIC_NICK_NAME: public_nick_name,\n cls.FIELD_ADDRESS_MAIN: address_main,\n cls.FIELD_ADDRESS_POSTAL: address_postal,\n cls.FIELD_AVATAR_UUID: avatar_uuid,\n cls.FIELD_TAX_RESIDENT: tax_resident,\n cls.FIELD_DOCUMENT_TYPE: document_type,\n cls.FIELD_DOCUMENT_NUMBER: document_number,\n cls.FIELD_DOCUMENT_COUNTRY_OF_ISSUANCE: document_country_of_issuance,\n cls.FIELD_DOCUMENT_FRONT_ATTACHMENT_ID: document_front_attachment_id,\n cls.FIELD_DOCUMENT_BACK_ATTACHMENT_ID: document_back_attachment_id,\n cls.FIELD_DATE_OF_BIRTH: date_of_birth,\n cls.FIELD_PLACE_OF_BIRTH: place_of_birth,\n cls.FIELD_COUNTRY_OF_BIRTH: country_of_birth,\n cls.FIELD_NATIONALITY: nationality,\n cls.FIELD_LANGUAGE: language,\n cls.FIELD_REGION: region,\n cls.FIELD_GENDER: gender,\n cls.FIELD_STATUS: status,\n cls.FIELD_SUB_STATUS: sub_status,\n cls.FIELD_LEGAL_GUARDIAN_ALIAS: legal_guardian_alias,\n cls.FIELD_SESSION_TIMEOUT: session_timeout,\n cls.FIELD_CARD_IDS: card_ids,\n cls.FIELD_CARD_LIMITS: card_limits,\n cls.FIELD_DAILY_LIMIT_WITHOUT_CONFIRMATION_LOGIN: daily_limit_without_confirmation_login,\n cls.FIELD_NOTIFICATION_FILTERS: notification_filters,\n cls.FIELD_DISPLAY_NAME: display_name\n }\n request_map_string = converter.class_to_json(request_map)\n request_map_string = cls._remove_field_for_request(request_map_string)\n request_bytes = request_map_string.encode()\n endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id())\n response_raw = api_client.put(endpoint_url, request_bytes,\n custom_headers)\n return BunqResponseInt.cast_from_bunq_response(\n cls._process_for_id(response_raw)\n )", - "docstring": "Modify a specific person object's data.\n\n :type user_person_id: int\n :param first_name: The person's first name.\n :type first_name: str\n :param middle_name: The person's middle name.\n :type middle_name: str\n :param last_name: The person's last name.\n :type last_name: str\n :param public_nick_name: The person's public nick name.\n :type public_nick_name: str\n :param address_main: The user's main address.\n :type address_main: object_.Address\n :param address_postal: The person's postal address.\n :type address_postal: object_.Address\n :param avatar_uuid: The public UUID of the user's avatar.\n :type avatar_uuid: str\n :param tax_resident: The user's tax residence numbers for different\n countries.\n :type tax_resident: list[object_.TaxResident]\n :param document_type: The type of identification document the person\n registered with.\n :type document_type: str\n :param document_number: The identification document number the person\n registered with.\n :type document_number: str\n :param document_country_of_issuance: The country which issued the\n identification document the person registered with.\n :type document_country_of_issuance: str\n :param document_front_attachment_id: The reference to the uploaded\n picture/scan of the front side of the identification document.\n :type document_front_attachment_id: int\n :param document_back_attachment_id: The reference to the uploaded\n picture/scan of the back side of the identification document.\n :type document_back_attachment_id: int\n :param date_of_birth: The person's date of birth. Accepts ISO8601 date\n formats.\n :type date_of_birth: str\n :param place_of_birth: The person's place of birth.\n :type place_of_birth: str\n :param country_of_birth: The person's country of birth. Formatted as a\n SO 3166-1 alpha-2 country code.\n :type country_of_birth: str\n :param nationality: The person's nationality. Formatted as a SO 3166-1\n alpha-2 country code.\n :type nationality: str\n :param language: The person's preferred language. Formatted as a ISO\n 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by\n an underscore.\n :type language: str\n :param region: The person's preferred region. Formatted as a ISO 639-1\n language code plus a ISO 3166-1 alpha-2 country code, seperated by an\n underscore.\n :type region: str\n :param gender: The person's gender. Can be: MALE, FEMALE and UNKNOWN.\n :type gender: str\n :param status: The user status. You are not allowed to update the status\n via PUT.\n :type status: str\n :param sub_status: The user sub-status. Can be updated to SUBMIT if\n status is RECOVERY.\n :type sub_status: str\n :param legal_guardian_alias: The legal guardian of the user. Required\n for minors.\n :type legal_guardian_alias: object_.Pointer\n :param session_timeout: The setting for the session timeout of the user\n in seconds.\n :type session_timeout: int\n :param card_ids: Card ids used for centralized card limits.\n :type card_ids: list[object_.BunqId]\n :param card_limits: The centralized limits for user's cards.\n :type card_limits: list[object_.CardLimit]\n :param daily_limit_without_confirmation_login: The amount the user can\n pay in the session without asking for credentials.\n :type daily_limit_without_confirmation_login: object_.Amount\n :param notification_filters: The types of notifications that will result\n in a push notification or URL callback for this UserPerson.\n :type notification_filters: list[object_.NotificationFilter]\n :param display_name: The person's legal name. Available legal names can\n be listed via the 'user/{user_id}/legal-name' endpoint.\n :type display_name: str\n :type custom_headers: dict[str, str]|None\n\n :rtype: BunqResponseInt" - }, - { - "code": "def Deserializer(stream_or_string, **options):\n def process_item(item):\n m = _LIST_RE.match(item)\n if m:\n contents = m.group(1)\n if not contents:\n item = []\n else:\n item = process_m2m(contents)\n else:\n if item == 'TRUE':\n item = True\n elif item == 'FALSE':\n item = False\n elif item == 'NULL':\n item = None\n elif (item in _QUOTED_BOOL_NULL or\n _QUOTED_LIST_RE.match(item)):\n item = item.strip('\\'\"')\n return item\n def process_m2m(contents):\n li = []\n if _NK_LIST_RE.match(contents):\n for item in _NK_SPLIT_RE.split(contents):\n li.append(process_item(item))\n else:\n li = _SPLIT_RE.split(contents)\n return li\n if isinstance(stream_or_string, six.string_types):\n stream = StringIO(stream_or_string)\n else:\n stream = stream_or_string\n reader = UnicodeReader(stream)\n header = next(reader)\n data = []\n for row in reader:\n if row[:2] == ['pk', 'model']:\n header = row\n continue\n d = dict(zip(header[:2], row[:2]))\n d['fields'] = dict(zip(header[2:], map(process_item, row[2:])))\n data.append(d)\n for obj in PythonDeserializer(data, **options):\n yield obj", - "docstring": "Deserialize a stream or string of CSV data." - }, - { - "code": "def handle_tabbed_response(self, tab_group, context):\n if self.request.is_ajax():\n if tab_group.selected:\n return http.HttpResponse(tab_group.selected.render())\n else:\n return http.HttpResponse(tab_group.render())\n return self.render_to_response(context)", - "docstring": "Sends back an AJAX-appropriate response for the tab group if needed.\n\n Otherwise renders the response as normal." - }, - { - "code": "def extract_arc (archive, compression, cmd, verbosity, interactive, outdir):\n cmdlist = [cmd, 'x', os.path.abspath(archive)]\n return (cmdlist, {'cwd': outdir})", - "docstring": "Extract a ARC archive." - }, - { - "code": "def crashlog_clean(name, timestamp, size, **kwargs):\n ctx = Context(**kwargs)\n ctx.execute_action('crashlog:clean', **{\n 'storage': ctx.repo.create_secure_service('storage'),\n 'name': name,\n 'size': size,\n 'timestamp': timestamp,\n })", - "docstring": "For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP." - }, - { - "code": "def report(self, msg, do_reset=False, file=sys.stdout):\n print >> file, \"%s (%s s)\" % (msg, time.time() - self.start)\n if do_reset:\n self.start = time.time()", - "docstring": "Print to stdout msg followed by the runtime.\n\n When true, do_reset will result in a reset of start time." - }, - { - "code": "def display(self):\n data = []\n first_row = ['Stat']\n first_row.extend(self._names)\n data.append(first_row)\n stats = self._stats()\n for stat in stats:\n k, n, f = stat\n if k is None:\n row = [''] * len(data[0])\n data.append(row)\n continue\n row = [n]\n for key in self._names:\n raw = getattr(self[key], k)\n if k == 'rf' and not type(raw) == float:\n row.append(np.nan)\n elif f is None:\n row.append(raw)\n elif f == 'p':\n row.append(fmtp(raw))\n elif f == 'n':\n row.append(fmtn(raw))\n elif f == 'dt':\n row.append(raw.strftime('%Y-%m-%d'))\n else:\n raise NotImplementedError('unsupported format %s' % f)\n data.append(row)\n print(tabulate(data, headers='firstrow'))", - "docstring": "Display summary stats table." - }, - { - "code": "def generate_psk(self, identity):\n def process_result(result):\n return result[ATTR_PSK]\n return Command('post', [ROOT_GATEWAY, ATTR_AUTH], {\n ATTR_IDENTITY: identity\n }, process_result=process_result)", - "docstring": "Generates the PRE_SHARED_KEY from the gateway.\n\n Returns a Command." - }, - { - "code": "def _call_fitter(fitter, psf, x, y, data, weights):\n if np.all(weights == 1.):\n return fitter(psf, x, y, data)\n else:\n return fitter(psf, x, y, data, weights=weights)", - "docstring": "Not all fitters have to support a weight array. This function\n includes the weight in the fitter call only if really needed." - }, - { - "code": "def delete(self, indexes):\n indexes = [indexes] if not isinstance(indexes, (list, blist)) else indexes\n if all([isinstance(i, bool) for i in indexes]):\n if len(indexes) != len(self._index):\n raise ValueError('boolean indexes list must be same size of existing indexes')\n indexes = [i for i, x in enumerate(indexes) if x]\n else:\n indexes = [sorted_index(self._index, x) for x in indexes] if self._sort \\\n else [self._index.index(x) for x in indexes]\n indexes = sorted(indexes, reverse=True)\n for i in indexes:\n del self._data[i]\n for i in indexes:\n del self._index[i]", - "docstring": "Delete rows from the DataFrame\n\n :param indexes: either a list of values or list of booleans for the rows to delete\n :return: nothing" - }, - { - "code": "def get_allowed_permissions_for(brain_or_object, user=None):\n allowed = []\n user = get_user(user)\n obj = api.get_object(brain_or_object)\n for permission in get_mapped_permissions_for(brain_or_object):\n if user.has_permission(permission, obj):\n allowed.append(permission)\n return allowed", - "docstring": "Get the allowed permissions for the given object\n\n Code extracted from `IRoleManager.manage_getUserRolesAndPermissions`\n\n :param brain_or_object: Catalog brain or object\n :param user: A user ID, user object or None (for the current user)\n :returns: List of allowed permissions" - }, - { - "code": "def _get_basic_term(self, C, rup, dists):\n if rup.mag > 5.:\n c4m = C['c4']\n elif rup.mag > 4.:\n c4m = C['c4'] - (C['c4']-1.) * (5. - rup.mag)\n else:\n c4m = 1.\n R = np.sqrt(dists.rrup**2. + c4m**2.)\n base_term = C['a1'] * np.ones_like(dists.rrup) + C['a17'] * dists.rrup\n if rup.mag >= C['m1']:\n base_term += (C['a5'] * (rup.mag - C['m1']) +\n C['a8'] * (8.5 - rup.mag)**2. +\n (C['a2'] + C['a3'] * (rup.mag - C['m1'])) *\n np.log(R))\n elif rup.mag >= self.CONSTS['m2']:\n base_term += (C['a4'] * (rup.mag - C['m1']) +\n C['a8'] * (8.5 - rup.mag)**2. +\n (C['a2'] + C['a3'] * (rup.mag - C['m1'])) *\n np.log(R))\n else:\n base_term += (C['a4'] * (self.CONSTS['m2'] - C['m1']) +\n C['a8'] * (8.5 - self.CONSTS['m2'])**2. +\n C['a6'] * (rup.mag - self.CONSTS['m2']) +\n C['a7'] * (rup.mag - self.CONSTS['m2'])**2. +\n (C['a2'] + C['a3'] * (self.CONSTS['m2'] - C['m1'])) *\n np.log(R))\n return base_term", - "docstring": "Compute and return basic form, see page 1030." - }, - { - "code": "def get_service_types(self):\n resp = self._get_resource_root().get(self._path() + '/serviceTypes')\n return resp[ApiList.LIST_KEY]", - "docstring": "Get all service types supported by this cluster.\n\n @return: A list of service types (strings)" - }, - { - "code": "def get_or_create_bucket(self, bucket_name):\n start_time = time.time()\n gs_buckets = self.gs.buckets()\n try:\n request = gs_buckets.get(bucket=bucket_name)\n self._retry_on_reset(request, request.execute)\n self.log.debug(\"Bucket: %r already exists, took: %.3fs\", bucket_name, time.time() - start_time)\n except HttpError as ex:\n if ex.resp[\"status\"] == \"404\":\n pass\n elif ex.resp[\"status\"] == \"403\":\n raise InvalidConfigurationError(\"Bucket {0!r} exists but isn't accessible\".format(bucket_name))\n else:\n raise\n else:\n return bucket_name\n try:\n req = gs_buckets.insert(project=self.project_id, body={\"name\": bucket_name})\n self._retry_on_reset(req, req.execute)\n self.log.debug(\"Created bucket: %r successfully, took: %.3fs\", bucket_name, time.time() - start_time)\n except HttpError as ex:\n error = json.loads(ex.content.decode(\"utf-8\"))[\"error\"]\n if error[\"message\"].startswith(\"You already own this bucket\"):\n self.log.debug(\"Bucket: %r already exists, took: %.3fs\", bucket_name, time.time() - start_time)\n elif error[\"message\"] == \"Invalid argument.\":\n raise InvalidConfigurationError(\"Invalid project id {0!r}\".format(self.project_id))\n elif error[\"message\"].startswith(\"Invalid bucket name\"):\n raise InvalidConfigurationError(\"Invalid bucket name {0!r}\".format(bucket_name))\n else:\n raise\n return bucket_name", - "docstring": "Look up the bucket if it already exists and try to create the\n bucket in case it doesn't. Note that we can't just always try to\n unconditionally create the bucket as Google imposes a strict rate\n limit on bucket creation operations, even if it doesn't result in a\n new bucket.\n\n Quietly handle the case where the bucket already exists to avoid\n race conditions. Note that we'll get a 400 Bad Request response for\n invalid bucket names (\"Invalid bucket name\") as well as for invalid\n project (\"Invalid argument\"), try to handle both gracefully." - }, - { - "code": "def worker_stop(obj, worker_ids):\n if len(worker_ids) == 0:\n msg = 'Would you like to stop all workers?'\n else:\n msg = '\\n{}\\n\\n{}'.format('\\n'.join(worker_ids),\n 'Would you like to stop these workers?')\n if click.confirm(msg, default=True, abort=True):\n stop_worker(obj['config'],\n worker_ids=list(worker_ids) if len(worker_ids) > 0 else None)", - "docstring": "Stop running workers.\n\n \\b\n WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all." - }, - { - "code": "def expect(p, prefixes, confidential=False):\n resp = p.stdout.readline()\n log.debug('%s -> %r', p.args, resp if not confidential else '********')\n for prefix in prefixes:\n if resp.startswith(prefix):\n return resp[len(prefix):]\n raise UnexpectedError(resp)", - "docstring": "Read a line and return it without required prefix." - }, - { - "code": "def collect_local(self, path):\n for f in os.listdir(path):\n lpath = os.path.join(path, f)\n if not os.path.isfile(lpath):\n continue\n Artifact(self, lpath)", - "docstring": "Collect artifacts from a local directory possibly previously\n collected from s3" - }, - { - "code": "def program_rtr_default_gw(self, tenant_id, rout_id, gw):\n args = ['route', 'add', 'default', 'gw', gw]\n ret = self.program_rtr(args, rout_id)\n if not ret:\n LOG.error(\"Program router returned error for %s\", rout_id)\n return False\n return True", - "docstring": "Program the default gateway of a router." - }, - { - "code": "def _adjusted_rand_index(reference_indices, estimated_indices):\n n_samples = len(reference_indices)\n ref_classes = np.unique(reference_indices)\n est_classes = np.unique(estimated_indices)\n if (ref_classes.shape[0] == est_classes.shape[0] == 1 or\n ref_classes.shape[0] == est_classes.shape[0] == 0 or\n (ref_classes.shape[0] == est_classes.shape[0] ==\n len(reference_indices))):\n return 1.0\n contingency = _contingency_matrix(reference_indices, estimated_indices)\n sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in\n contingency.sum(axis=1))\n sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in\n contingency.sum(axis=0))\n sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in\n contingency.flatten()))\n prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,\n 2))\n mean_comb = (sum_comb_k + sum_comb_c)/2.\n return (sum_comb - prod_comb)/(mean_comb - prod_comb)", - "docstring": "Compute the Rand index, adjusted for change.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n ari : float\n Adjusted Rand index\n\n .. note:: Based on sklearn.metrics.cluster.adjusted_rand_score" - }, - { - "code": "def result_to_dict(raw_result):\n result = {}\n for channel_index, channel in enumerate(raw_result):\n channel_id, channel_name = channel[0], channel[1]\n channel_result = {\n 'id': channel_id,\n 'name': channel_name,\n 'movies': []\n }\n for movie in channel[2]:\n channel_result['movies'].append({\n 'title': movie[1],\n 'start_time': datetime.fromtimestamp(movie[2]),\n 'end_time': datetime.fromtimestamp(movie[2] + movie[3]),\n 'inf': True if movie[3] else False,\n })\n result[channel_id] = channel_result\n return result", - "docstring": "Parse raw result from fetcher into readable dictionary\n\n Args:\n raw_result (dict) - raw data from `fetcher`\n\n Returns:\n dict - readable dictionary" - }, - { - "code": "def _get_minmax_edges(self, edge):\n if isinstance(edge, Line):\n depth_vals = np.array([node.depth for node in edge.points])\n else:\n depth_vals = edge[:, 2]\n temp_upper_depth = np.min(depth_vals)\n if not self.upper_depth:\n self.upper_depth = temp_upper_depth\n else:\n if temp_upper_depth < self.upper_depth:\n self.upper_depth = temp_upper_depth\n temp_lower_depth = np.max(depth_vals)\n if not self.lower_depth:\n self.lower_depth = temp_lower_depth\n else:\n if temp_lower_depth > self.lower_depth:\n self.lower_depth = temp_lower_depth", - "docstring": "Updates the upper and lower depths based on the input edges" - }, - { - "code": "def _is_big_endian(self):\n if self.endian == DataTypeMixIn.ENDIAN_NATIVE:\n return SYSTEM_ENDIAN == DataTypeMixIn.ENDIAN_BIG\n return self.endian in (DataTypeMixIn.ENDIAN_BIG, DataTypeMixIn.ENDIAN_NETWORK)", - "docstring": "Whether the current endian is big endian." - }, - { - "code": "def restrict_to(self, restriction):\n if type(restriction) != list:\n restriction = [ restriction ]\n self._restriction = restriction", - "docstring": "Restrict list operations to the hosts given in restriction. This is used\n to exclude failed hosts in main playbook code, don't use this for other\n reasons." - }, - { - "code": "def _organize_calls(out_file, hla_base, data):\n hla_truth = get_hla_truthset(data)\n sample = dd.get_sample_name(data)\n with file_transaction(data, out_file) as tx_out_file:\n with open(tx_out_file, \"w\") as out_handle:\n writer = csv.writer(out_handle)\n writer.writerow([\"sample\", \"locus\", \"mismatches\", \"options\", \"alleles\", \"p-groups\", \"expected\",\n \"validates\"])\n for genotype_file in glob.glob(\"%s.HLA-*.gt\" % (hla_base)):\n hla_locus = os.path.basename(genotype_file).replace(\n \"%s.HLA-\" % os.path.basename(hla_base), \"\").replace(\".gt\", \"\")\n with open(genotype_file) as in_handle:\n total_options = set([])\n for i, line in enumerate(in_handle):\n _, aone, atwo, m = line.split(\"\\t\")[:4]\n pgroups = (hla_groups.hla_protein(aone, data), hla_groups.hla_protein(atwo, data))\n if i == 0:\n call_alleles = [aone, atwo]\n call_pgroups = pgroups\n mismatches = m\n total_options.add(pgroups)\n if len(total_options) > 0:\n truth_alleles = tz.get_in([sample, hla_locus], hla_truth, [])\n writer.writerow([sample, hla_locus, mismatches, len(total_options),\n \";\".join(call_alleles), \";\".join(call_pgroups),\n \";\".join(truth_alleles), matches_truth(call_alleles, truth_alleles, data)])\n return out_file", - "docstring": "Prepare genotype calls, reporting best call along with quality metrics." - }, - { - "code": "def dict_unicodeize(some_dict):\n if isinstance(some_dict, (\"\".__class__, u\"\".__class__)):\n if sys.version_info >= (3, 0):\n return some_dict\n return some_dict.decode('utf-8')\n elif isinstance(some_dict, collections.Mapping):\n return dict(map(dict_unicodeize, iteritems(some_dict)))\n elif isinstance(some_dict, collections.Iterable):\n return type(some_dict)(map(dict_unicodeize, some_dict))\n return some_dict", - "docstring": "Ensure that every string in a dict is properly represented\n by unicode strings" - }, - { - "code": "def update_offset(self, new_offset):\n self.offset = new_offset\n self.data_points = self._data_points[self.offset:]\n self.timestamps = self._timestamps[self.offset:]", - "docstring": "Updates how many data points to skip in caculations.\n\n Always use this function to update offset instead of directly setting\n self.offset.\n\n Args:\n new_offset: The new offset." - }, - { - "code": "def any(self, axis=0, *args, **kwargs):\n nv.validate_any(args, kwargs)\n values = self.sp_values\n if len(values) != len(self) and np.any(self.fill_value):\n return True\n return values.any().item()", - "docstring": "Tests whether at least one of elements evaluate True\n\n Returns\n -------\n any : bool\n\n See Also\n --------\n numpy.any" - }, - { - "code": "def get_data_port(self, state_id, port_id):\n if state_id == self.state_id:\n return self.get_data_port_by_id(port_id)\n for child_state_id, child_state in self.states.items():\n if state_id != child_state_id:\n continue\n port = child_state.get_data_port_by_id(port_id)\n if port:\n return port\n return None", - "docstring": "Searches for a data port\n\n The data port specified by the state id and data port id is searched in the state itself and in its children.\n\n :param str state_id: The id of the state the port is in\n :param int port_id: The id of the port\n :return: The searched port or None if it is not found" - }, - { - "code": "def disable_all_breakpoints(cls):\n for bp in cls.breakpoints_by_number:\n if bp:\n bp.enabled = False\n cls.update_active_breakpoint_flag()\n return", - "docstring": "Disable all breakpoints and udate `active_breakpoint_flag`." - }, - { - "code": "def rotate_view(self, axis_ind=0, angle=0):\n camera = self.ren.GetActiveCamera()\n if axis_ind == 0:\n camera.Roll(angle)\n elif axis_ind == 1:\n camera.Azimuth(angle)\n else:\n camera.Pitch(angle)\n self.ren_win.Render()", - "docstring": "Rotate the camera view.\n\n Args:\n axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.\n angle: Angle to rotate by. Defaults to 0." - }, - { - "code": "def DeserializeUnsignedWithoutType(self, reader):\n self.Version = reader.ReadByte()\n self.DeserializeExclusiveData(reader)\n self.Attributes = reader.ReadSerializableArray('neo.Core.TX.TransactionAttribute.TransactionAttribute',\n max=self.MAX_TX_ATTRIBUTES)\n self.inputs = reader.ReadSerializableArray('neo.Core.CoinReference.CoinReference')\n self.outputs = reader.ReadSerializableArray('neo.Core.TX.Transaction.TransactionOutput')", - "docstring": "Deserialize object without reading transaction type data.\n\n Args:\n reader (neo.IO.BinaryReader):" - }, - { - "code": "def add_deploy_key(self, auth, username, repo_name, title, key_content):\n data = {\n \"title\": title,\n \"key\": key_content\n }\n response = self.post(\"/repos/{u}/{r}/keys\".format(u=username, r=repo_name), auth=auth, data=data)\n return GogsRepo.DeployKey.from_json(response.json())", - "docstring": "Add a deploy key to the specified repo.\n\n :param auth.Authentication auth: authentication object\n :param str username: username of owner of repository\n :param str repo_name: the name of the repo\n :param str title: title of the key to add\n :param str key_content: content of the key to add\n :return: a representation of the added deploy key\n :rtype: GogsRepo.DeployKey\n :raises NetworkFailure: if there is an error communicating with the server\n :raises ApiFailure: if the request cannot be serviced" - }, - { - "code": "def load_maf(\n path,\n optional_cols=[],\n sort_key=variant_ascending_position_sort_key,\n distinct=True,\n raise_on_error=True,\n encoding=None):\n maf_df = load_maf_dataframe(path, raise_on_error=raise_on_error, encoding=encoding)\n if len(maf_df) == 0 and raise_on_error:\n raise ValueError(\"Empty MAF file %s\" % path)\n ensembl_objects = {}\n variants = []\n metadata = {}\n for _, x in maf_df.iterrows():\n contig = x.Chromosome\n if isnull(contig):\n error_message = \"Invalid contig name: %s\" % (contig,)\n if raise_on_error:\n raise ValueError(error_message)\n else:\n logging.warn(error_message)\n continue\n start_pos = x.Start_Position\n ref = x.Reference_Allele\n ncbi_build = x.NCBI_Build\n if ncbi_build in ensembl_objects:\n ensembl = ensembl_objects[ncbi_build]\n else:\n if isinstance(ncbi_build, int):\n reference_name = \"B%d\" % ncbi_build\n else:\n reference_name = str(ncbi_build)\n ensembl = infer_genome(reference_name)\n ensembl_objects[ncbi_build] = ensembl\n if x.Tumor_Seq_Allele1 != ref:\n alt = x.Tumor_Seq_Allele1\n else:\n if x.Tumor_Seq_Allele2 == ref:\n error_message = (\n \"Both tumor alleles agree with reference %s: %s\" % (\n ref, x,))\n if raise_on_error:\n raise ValueError(error_message)\n else:\n logging.warn(error_message)\n continue\n alt = x.Tumor_Seq_Allele2\n variant = Variant(\n contig,\n start_pos,\n str(ref),\n str(alt),\n ensembl=ensembl)\n metadata[variant] = {\n 'Hugo_Symbol': x.Hugo_Symbol,\n 'Center': x.Center,\n 'Strand': x.Strand,\n 'Variant_Classification': x.Variant_Classification,\n 'Variant_Type': x.Variant_Type,\n 'dbSNP_RS': x.dbSNP_RS,\n 'dbSNP_Val_Status': x.dbSNP_Val_Status,\n 'Tumor_Sample_Barcode': x.Tumor_Sample_Barcode,\n 'Matched_Norm_Sample_Barcode': x.Matched_Norm_Sample_Barcode,\n }\n for optional_col in optional_cols:\n if optional_col in x:\n metadata[variant][optional_col] = x[optional_col]\n variants.append(variant)\n return VariantCollection(\n variants=variants,\n source_to_metadata_dict={path: metadata},\n sort_key=sort_key,\n distinct=distinct)", - "docstring": "Load reference name and Variant objects from MAF filename.\n\n Parameters\n ----------\n\n path : str\n Path to MAF (*.maf).\n\n optional_cols : list, optional\n A list of MAF columns to include as metadata if they are present in the MAF.\n Does not result in an error if those columns are not present.\n\n sort_key : fn\n Function which maps each element to a sorting criterion.\n Set to None to not to sort the variants.\n\n distinct : bool\n Don't keep repeated variants\n\n raise_on_error : bool\n Raise an exception upon encountering an error or just log a warning.\n\n encoding : str, optional\n Encoding to use for UTF when reading MAF file." - }, - { - "code": "def create_item(self, **kwargs):\n item, created = self.queryset.model.objects.get_or_create(**kwargs)\n return item", - "docstring": "Return a model instance created from kwargs." - }, - { - "code": "def poke(library, session, address, width, data):\n if width == 8:\n return poke_8(library, session, address, data)\n elif width == 16:\n return poke_16(library, session, address, data)\n elif width == 32:\n return poke_32(library, session, address, data)\n raise ValueError('%s is not a valid size. Valid values are 8, 16 or 32' % width)", - "docstring": "Writes an 8, 16 or 32-bit value from the specified address.\n\n Corresponds to viPoke* functions of the VISA library.\n\n :param library: the visa library wrapped by ctypes.\n :param session: Unique logical identifier to a session.\n :param address: Source address to read the value.\n :param width: Number of bits to read.\n :param data: Data to be written to the bus.\n :return: return value of the library call.\n :rtype: :class:`pyvisa.constants.StatusCode`" - }, - { - "code": "def _colorize(self, msg, color=None, encode=False):\n colors = {\n 'red': '31',\n 'green': '32',\n 'yellow': '33'\n }\n if not color or not color in colors:\n return msg\n if encode:\n return u'\\x1b[1;{}m{}\\x1b[0m'.format(colors[color], msg)\n return '\\x1b[1;{}m{}\\x1b[0m'.format(colors[color], msg)", - "docstring": "Colorize a string." - }, - { - "code": "def pix2sky(self, pixel):\n pixbox = numpy.array([pixel, pixel])\n skybox = self.wcs.all_pix2world(pixbox, 1)\n return [float(skybox[0][0]), float(skybox[0][1])]", - "docstring": "Get the sky coordinates for a given image pixel.\n\n Parameters\n ----------\n pixel : (float, float)\n Image coordinates.\n\n Returns\n -------\n ra,dec : float\n Sky coordinates (degrees)" - }, - { - "code": "def peek_char(self, lpBaseAddress):\n char = self.peek(lpBaseAddress, 1)\n if char:\n return ord(char)\n return 0", - "docstring": "Reads a single character from the memory of the process.\n\n @see: L{read_char}\n\n @type lpBaseAddress: int\n @param lpBaseAddress: Memory address to begin reading.\n\n @rtype: int\n @return: Character read from the process memory.\n Returns zero on error." - }, - { - "code": "def remove_image(self, image, force=False, noprune=False):\n params = {'force': force, 'noprune': noprune}\n res = self._delete(self._url(\"/images/{0}\", image), params=params)\n return self._result(res, True)", - "docstring": "Remove an image. Similar to the ``docker rmi`` command.\n\n Args:\n image (str): The image to remove\n force (bool): Force removal of the image\n noprune (bool): Do not delete untagged parents" - }, - { - "code": "def local_qvm() -> Iterator[Tuple[subprocess.Popen, subprocess.Popen]]:\n qvm = subprocess.Popen(['qvm', '-S'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n quilc = subprocess.Popen(['quilc', '-RP'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n try:\n yield (qvm, quilc)\n finally:\n qvm.terminate()\n quilc.terminate()", - "docstring": "A context manager for the Rigetti local QVM and QUIL compiler.\n\n You must first have installed the `qvm` and `quilc` executables from\n the forest SDK. [https://www.rigetti.com/forest]\n\n This context manager will start up external processes for both the\n compiler and virtual machine, and then terminate them when the context\n is exited.\n\n If `qvm` (or `quilc`) is already running, then the existing process will\n be used, and will not terminated at exit.\n\n >>> from pyquil import get_qc, Program\n >>> from pyquil.gates import CNOT, Z\n >>> from pyquil.api import local_qvm\n >>>\n >>> qvm = get_qc('9q-square-qvm')\n >>> prog = Program(Z(0), CNOT(0, 1))\n >>>\n >>> with local_qvm():\n >>> results = qvm.run_and_measure(prog, trials=10)\n\n :raises: FileNotFoundError: If either executable is not installed." - }, - { - "code": "def selectnone(table, field, complement=False):\n return select(table, field, lambda v: v is None, complement=complement)", - "docstring": "Select rows where the given field is `None`." - }, - { - "code": "def renderer_doc(*args):\n renderers_ = salt.loader.render(__opts__, [])\n docs = {}\n if not args:\n for func in six.iterkeys(renderers_):\n docs[func] = renderers_[func].__doc__\n return _strip_rst(docs)\n for module in args:\n if '*' in module or '.' in module:\n for func in fnmatch.filter(renderers_, module):\n docs[func] = renderers_[func].__doc__\n else:\n moduledot = module + '.'\n for func in six.iterkeys(renderers_):\n if func.startswith(moduledot):\n docs[func] = renderers_[func].__doc__\n return _strip_rst(docs)", - "docstring": "Return the docstrings for all renderers. Optionally, specify a renderer or a\n function to narrow the selection.\n\n The strings are aggregated into a single document on the master for easy\n reading.\n\n Multiple renderers can be specified.\n\n .. versionadded:: 2015.5.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' sys.renderer_doc\n salt '*' sys.renderer_doc cheetah\n salt '*' sys.renderer_doc jinja json\n\n Renderer names can be specified as globs.\n\n .. code-block:: bash\n\n salt '*' sys.renderer_doc 'c*' 'j*'" - }, - { - "code": "def get_items(self, paginator, current_page):\n fields = self.get_model_config().get_list_fields()\n page = paginator.page(current_page)\n items = []\n for item in page:\n items.append({\n 'id': item.id,\n 'url': item.get_absolute_url(),\n 'row_data': [\n fields[field]['renderer'](item, field)\n for field in self.get_current_fields()\n ]\n })\n return items", - "docstring": "Get list items for current page" - }, - { - "code": "def use_value(self, value):\n if self.check_value(value):\n return value\n return self.convert_value(value)", - "docstring": "Converts value to field type or use original" - }, - { - "code": "def export(self, pid, context=None, format=None, encoding=None,\n stream=False):\n http_args = {}\n if context:\n http_args['context'] = context\n if format:\n http_args['format'] = format\n if encoding:\n http_args['encoding'] = encoding\n uri = 'objects/%s/export' % pid\n return self.get(uri, params=http_args, stream=stream)", - "docstring": "Export an object to be migrated or archived.\n\n :param pid: object pid\n :param context: export context, one of: public, migrate, archive\n (default: public)\n :param format: export format (Fedora default is foxml 1.1)\n :param encoding: encoding (Fedora default is UTF-8)\n :param stream: if True, request a streaming response to be\n read in chunks\n :rtype: :class:`requests.models.Response`" - }, - { - "code": "def make_script_directory(cls, config):\n temporary_dir = config.get_main_option(\"temporary_dir\")\n migrations_dir = config.get_main_option(\"migrations_dir\")\n return cls(\n dir=temporary_dir,\n version_locations=[migrations_dir],\n )", - "docstring": "Alembic uses a \"script directory\" to encapsulate its `env.py` file, its migrations\n directory, and its `script.py.mako` revision template.\n\n We'd rather not have such a directory at all as the default `env.py` rarely works\n without manipulation, migrations are better saved in a location within the source tree,\n and revision templates shouldn't vary between projects.\n\n Instead, generate a `ScriptDirectory` object, injecting values from the config." - }, - { - "code": "def re_initiate_model_list(self, model_list_or_dict, core_objects_dict, model_name, model_class, model_key):\n if model_name == \"income\":\n if self.income.income != self.state.income:\n self._add_model(self.income, self.state.income, IncomeModel)\n return\n for _ in range(len(model_list_or_dict)):\n self.remove_additional_model(model_list_or_dict, core_objects_dict, model_name, model_key)\n if core_objects_dict:\n for _ in core_objects_dict:\n self.add_missing_model(model_list_or_dict, core_objects_dict, model_name, model_class, model_key)", - "docstring": "Recreate model list\n\n The method re-initiate a handed list or dictionary of models with the new dictionary of core-objects.\n\n :param model_list_or_dict: could be a list or dictionary of one model type\n :param core_objects_dict: new dictionary of one type of core-elements (rafcon.core)\n :param model_name: prop_name for the core-element hold by the model, this core-element is covered by the model\n :param model_class: model-class of the elements that should be insert\n :param model_key: if model_list_or_dict is a dictionary the key is the id of the respective element\n (e.g. 'state_id')\n :return:" - }, - { - "code": "def validate_totalflux(totalflux):\n if totalflux <= 0.0:\n raise exceptions.SynphotError('Integrated flux is <= 0')\n elif np.isnan(totalflux):\n raise exceptions.SynphotError('Integrated flux is NaN')\n elif np.isinf(totalflux):\n raise exceptions.SynphotError('Integrated flux is infinite')", - "docstring": "Check integrated flux for invalid values.\n\n Parameters\n ----------\n totalflux : float\n Integrated flux.\n\n Raises\n ------\n synphot.exceptions.SynphotError\n Input is zero, negative, or not a number." - }, - { - "code": "def _remove_pidfile(self):\n LOGGER.debug('Removing pidfile: %s', self.pidfile_path)\n try:\n os.unlink(self.pidfile_path)\n except OSError:\n pass", - "docstring": "Remove the pid file from the filesystem" - }, - { - "code": "def convert_unit(self, column, value):\n \"If the user has provided a unit in the query, convert it into the column unit, if present.\"\n if column not in self.units:\n return value\n value = self.ureg(value)\n if isinstance(value, numbers.Number):\n return value\n column_unit = self.ureg(self.units[column])\n return value.to(column_unit).magnitude", - "docstring": "If the user has provided a unit in the query, convert it into the column unit, if present." - }, - { - "code": "async def get_creds(self, proof_req_json: str, filt: dict = None, filt_dflt_incl: bool = False) -> (Set[str], str):\n LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt)\n if filt is None:\n filt = {}\n rv = None\n creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)\n creds = json.loads(creds_json)\n cred_ids = set()\n if filt:\n for cd_id in filt:\n try:\n json.loads(await self.get_cred_def(cd_id))\n except AbsentCredDef:\n LOGGER.warning('HolderProver.get_creds: ignoring filter criterion, no cred def on %s', cd_id)\n filt.pop(cd_id)\n for inner_creds in {**creds['attrs'], **creds['predicates']}.values():\n for cred in inner_creds:\n cred_info = cred['cred_info']\n if filt:\n cred_cd_id = cred_info['cred_def_id']\n if cred_cd_id not in filt:\n if filt_dflt_incl:\n cred_ids.add(cred_info['referent'])\n continue\n if 'attr-match' in (filt[cred_cd_id] or {}):\n if not {k: str(filt[cred_cd_id].get('attr-match', {})[k])\n for k in filt[cred_cd_id].get('attr-match', {})}.items() <= cred_info['attrs'].items():\n continue\n if 'minima' in (filt[cred_cd_id] or {}):\n minima = filt[cred_cd_id].get('minima', {})\n try:\n if any((attr not in cred_info['attrs'])\n or (int(cred_info['attrs'][attr]) < int(minima[attr]))\n for attr in minima):\n continue\n except ValueError:\n continue\n cred_ids.add(cred_info['referent'])\n else:\n cred_ids.add(cred_info['referent'])\n if filt:\n creds = json.loads(prune_creds_json(creds, cred_ids))\n rv = (cred_ids, json.dumps(creds))\n LOGGER.debug('HolderProver.get_creds <<< %s', rv)\n return rv", - "docstring": "Get credentials from HolderProver wallet corresponding to proof request and\n filter criteria; return credential identifiers from wallet and credentials json.\n Return empty set and empty production for no such credentials.\n\n :param proof_req_json: proof request json as Verifier creates; has entries for proof request's\n nonce, name, and version; plus credential's requested attributes, requested predicates. I.e.,\n\n ::\n\n {\n 'nonce': string, # indy-sdk makes no semantic specification on this value\n 'name': string, # indy-sdk makes no semantic specification on this value\n 'version': numeric-string, # indy-sdk makes no semantic specification on this value\n 'requested_attributes': {\n '': { # aka attr_referent, a proof-request local identifier\n 'name': string, # attribute name (matches case- and space-insensitively)\n 'restrictions' [ # optional\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n },\n {\n ... # if more than one restriction given, combined disjunctively (i.e., via OR)\n }\n ],\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': int, # optional, epoch seconds\n 'to': int # optional, epoch seconds\n }\n },\n ...\n },\n 'requested_predicates': {\n '': { # aka predicate_referent, a proof-request local predicate identifier\n 'name': string, # attribute name (matches case- and space-insensitively)\n 'p_type': '>=',\n 'p_value': int, # predicate value\n 'restrictions': [ # optional\n {\n \"schema_id\": string, # optional\n \"schema_issuer_did\": string, # optional\n \"schema_name\": string, # optional\n \"schema_version\": string, # optional\n \"issuer_did\": string, # optional\n \"cred_def_id\": string # optional\n },\n {\n ... # if more than one restriction given, combined disjunctively (i.e., via OR)\n }\n ],\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': int, # optional, epoch seconds\n 'to': int # optional, epoch seconds\n }\n },\n ...\n },\n 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet\n 'from': Optional,\n 'to': Optional\n }\n }\n\n :param filt: filter for matching attribute-value pairs and predicates; dict mapping each\n cred def id to dict (specify empty dict or none for no filter, matching all)\n mapping attributes to values to match or compare. E.g.,\n\n ::\n\n {\n 'Vx4E82R17q...:3:CL:16:0': {\n 'attr-match': {\n 'name': 'Alex',\n 'sex': 'M',\n 'favouriteDrink': None\n },\n 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)\n 'favouriteNumber' : 10,\n 'score': '100' # nicety: implementation converts to int for caller\n },\n },\n 'R17v42T4pk...:3:CL:19:0': {\n 'attr-match': {\n 'height': 175,\n 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)\n }\n },\n 'Z9ccax812j...:3:CL:27:0': {\n 'attr-match': {} # match all attributes on this cred def\n }\n ...\n }\n\n :param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not\n identify by cred def, or to exclude (False) all such credentials\n :return: tuple with (set of referents, creds json for input proof request);\n empty set and empty production for no such credential" - }, - { - "code": "def group_by(function):\n def _group_by(seq):\n result = {}\n for item in seq:\n result.setdefault(function(item), []).append(item)\n return dict_items(result)\n return _group_by", - "docstring": "Groups input sequence by `function`.\n\n Returns an iterator over a sequence of tuples where the first item is a\n result of `function` and the second one a list of items matching this\n result.\n\n Ordering of the resulting iterator is undefined, but ordering of the items\n in the groups is preserved.\n\n >>> [1, 2, 3, 4, 5, 6] > group_by(X % 2) | list\n [(0, [2, 4, 6]), (1, [1, 3, 5])]" - }, - { - "code": "def _validate_ssh_minion_opts(opts):\n ssh_minion_opts = opts.get('ssh_minion_opts', {})\n if not isinstance(ssh_minion_opts, dict):\n log.error('Invalidly-formatted ssh_minion_opts')\n opts.pop('ssh_minion_opts')\n for opt_name in list(ssh_minion_opts):\n if re.match('^[a-z0-9]+fs_', opt_name, flags=re.IGNORECASE) \\\n or ('pillar' in opt_name and not 'ssh_merge_pillar' == opt_name) \\\n or opt_name in ('fileserver_backend',):\n log.warning(\n '\\'%s\\' is not a valid ssh_minion_opts parameter, ignoring',\n opt_name\n )\n ssh_minion_opts.pop(opt_name)", - "docstring": "Ensure we're not using any invalid ssh_minion_opts. We want to make sure\n that the ssh_minion_opts does not override any pillar or fileserver options\n inherited from the master config. To add other items, modify the if\n statement in the for loop below." - }, - { - "code": "def put(self, rownr, value, matchingfields=True):\n self._put(rownr, value, matchingfields)", - "docstring": "Put the values into the given row.\n\n The value should be a dict (as returned by method :func:`get`.\n The names of the fields in the dict should match the names of the\n columns used in the `tablerow` object.\n\n `matchingfields=True` means that the value may contain more fields\n and only fields matching a column name will be used." - }, - { - "code": "def eqCoords(self, zerolat=False):\n lat = 0.0 if zerolat else self.lat\n return utils.eqCoords(self.lon, lat)", - "docstring": "Returns the Equatorial Coordinates of this object. \n Receives a boolean parameter to consider a zero latitude." - }, - { - "code": "def delete_contacts(self, ids):\n str_ids = self._return_comma_list(ids)\n self.request('ContactAction', {'action': {'op': 'delete',\n 'id': str_ids}})", - "docstring": "Delete selected contacts for the current user\n\n :param ids: list of ids" - }, - { - "code": "def to_html(self):\n if self.align is 'left':\n if self.style_class is None:\n self.style_class = 'text-left'\n else:\n self.style_class += ' text-left'\n elif self.align is 'right':\n if self.style_class is None:\n self.style_class = 'text-right'\n else:\n self.style_class += ' text-right'\n elif self.align is 'center':\n if self.style_class is None:\n self.style_class = 'text-center'\n else:\n self.style_class += ' text-center'\n class_name = self.content.__class__.__name__\n if class_name in ['BulletedList', 'Table', 'Image', 'Message']:\n html = self.content.to_html()\n else:\n html = self.content.to_html(wrap_slash=self.wrap_slash)\n if self.header_flag is True:\n return '%s\\n' % (\n self.html_attributes(), self.span, html)\n else:\n return '%s\\n' % (\n self.html_attributes(), self.span, html)", - "docstring": "Render a Cell MessageElement as html\n\n :returns: The html representation of the Cell MessageElement\n :rtype: basestring" - }, - { - "code": "def _read_parquet_columns(path, columns, num_splits, kwargs):\n import pyarrow.parquet as pq\n df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas()\n return _split_result_for_readers(0, num_splits, df) + [len(df.index)]", - "docstring": "Use a Ray task to read columns from Parquet into a Pandas DataFrame.\n\n Note: Ray functions are not detected by codecov (thus pragma: no cover)\n\n Args:\n path: The path of the Parquet file.\n columns: The list of column names to read.\n num_splits: The number of partitions to split the column into.\n\n Returns:\n A list containing the split Pandas DataFrames and the Index as the last\n element. If there is not `index_col` set, then we just return the length.\n This is used to determine the total length of the DataFrame to build a\n default Index." - }, - { - "code": "def addCategory(self, categoryUri, weight):\n assert isinstance(weight, (float, int)), \"weight value has to be a positive or negative integer\"\n self.topicPage[\"categories\"].append({\"uri\": categoryUri, \"wgt\": weight})", - "docstring": "add a relevant category to the topic page\n @param categoryUri: uri of the category to be added\n @param weight: importance of the provided category (typically in range 1 - 50)" - }, - { - "code": "def _do_select(self, start_bindex, end_bindex):\n self.select(QItemSelection(), QItemSelectionModel.Clear)\n if start_bindex > end_bindex:\n start_bindex, end_bindex = end_bindex, start_bindex\n selection = QItemSelection()\n if row_number(end_bindex) - row_number(start_bindex) == 0:\n self._bselect(selection, start_bindex, end_bindex)\n elif row_number(end_bindex) - row_number(start_bindex) == 1:\n self._bselect(selection, start_bindex, row_end_index(start_bindex))\n self._bselect(selection, row_start_index(end_bindex), end_bindex)\n else:\n self._bselect(selection, start_bindex, row_end_index(start_bindex))\n self._bselect(selection, row_start_index(start_bindex) + 0x10, row_end_index(end_bindex) - 0x10)\n self._bselect(selection, row_start_index(end_bindex), end_bindex)\n self.select(selection, QItemSelectionModel.SelectCurrent)\n self.start = start_bindex\n self.end = end_bindex\n self.selectionRangeChanged.emit(end_bindex)", - "docstring": "select the given range by buffer indices\n\n selects items like this:\n\n ..................\n ......xxxxxxxxxxxx\n xxxxxxxxxxxxxxxxxx\n xxxxxxxxxxxxxxxxxx\n xxxxxxxxxxxx......\n ..................\n\n *not* like this:\n\n ..................\n ......xxxxxx......\n ......xxxxxx......\n ......xxxxxx......\n ......xxxxxx......\n .................." - }, - { - "code": "def _local_times_from_hours_since_midnight(times, hours):\n tz_info = times.tz\n naive_times = times.tz_localize(None)\n return pd.DatetimeIndex(\n (naive_times.normalize().astype(np.int64) +\n (hours * NS_PER_HR).astype(np.int64)).astype('datetime64[ns]'),\n tz=tz_info)", - "docstring": "converts hours since midnight from an array of floats to localized times" - }, - { - "code": "def related_objects(self, related, objs):\n from versions.models import Versionable\n related_model = related.related_model\n if issubclass(related_model, Versionable):\n qs = related_model.objects.current\n else:\n qs = related_model._base_manager.all()\n return qs.using(self.using).filter(\n **{\"%s__in\" % related.field.name: objs}\n )", - "docstring": "Gets a QuerySet of current objects related to ``objs`` via the\n relation ``related``." - }, - { - "code": "def execute_all_rules(self, matches, context):\n ret = []\n for priority, priority_rules in groupby(sorted(self), lambda rule: rule.priority):\n sorted_rules = toposort_rules(list(priority_rules))\n for rules_group in sorted_rules:\n rules_group = list(sorted(rules_group, key=self.index))\n group_log_level = None\n for rule in rules_group:\n if group_log_level is None or group_log_level < rule.log_level:\n group_log_level = rule.log_level\n log(group_log_level, \"%s independent rule(s) at priority %s.\", len(rules_group), priority)\n for rule in rules_group:\n when_response = execute_rule(rule, matches, context)\n if when_response is not None:\n ret.append((rule, when_response))\n return ret", - "docstring": "Execute all rules from this rules list. All when condition with same priority will be performed before\n calling then actions.\n\n :param matches:\n :type matches:\n :param context:\n :type context:\n :return:\n :rtype:" - }, - { - "code": "def __parse_organizations(self, json):\n try:\n for company in json['companies']:\n name = self.__encode(company['company_name'])\n org = self._organizations.get(name, None)\n if not org:\n org = Organization(name=name)\n self._organizations[name] = org\n for domain in company['domains']:\n if not domain:\n continue\n dom = Domain(domain=domain)\n org.domains.append(dom)\n except KeyError as e:\n msg = \"invalid json format. Attribute %s not found\" % e.args\n raise InvalidFormatError(cause=msg)", - "docstring": "Parse Stackalytics organizations.\n\n The Stackalytics organizations format is a JSON document stored under the\n \"companies\" key. The next JSON shows the structure of the\n document:\n\n {\n \"companies\" : [\n {\n \"domains\": [\"alcatel-lucent.com\"],\n \"company_name\": \"Alcatel-Lucent\",\n \"aliases\": [\"Alcatel Lucent\", \"Alcatel-Lcuent\"]\n },\n {\n \"domains\": [\"allegrogroup.com\", \"allegro.pl\"],\n \"company_name\": \"Allegro\",\n \"aliases\": [\"Allegro Group\", \"Grupa Allegro\", \"Grupa Allegro Sp. z o.o.\"]\n },\n {\n \"domains\": [\"altiscale.com\"],\n \"company_name\": \"Altiscale\"\n },\n ]\n }\n\n :param json: JSON object to parse\n\n :raises InvalidFormatError: raised when the format of the JSON is\n not valid." - }, - { - "code": "def qdict_get_list(qdict, k):\n pks = qdict.getlist(k)\n return [e for e in pks if e]", - "docstring": "get list from QueryDict and remove blank date from list." - }, - { - "code": "def partial_declaration_path(decl):\n if not decl:\n return []\n if not decl.cache.partial_declaration_path:\n result = [decl.partial_name]\n parent = decl.parent\n while parent:\n if parent.cache.partial_declaration_path:\n result.reverse()\n decl.cache.partial_declaration_path \\\n = parent.cache.partial_declaration_path + result\n return decl.cache.partial_declaration_path\n else:\n result.append(parent.partial_name)\n parent = parent.parent\n result.reverse()\n decl.cache.partial_declaration_path = result\n return result\n return decl.cache.partial_declaration_path", - "docstring": "Returns a list of parent declarations names without template arguments that\n have default value.\n\n Args:\n decl (declaration_t): declaration for which the partial declaration\n path should be calculated.\n\n Returns:\n list[(str | basestring)]: list of names, where first item is the top\n parent name and last item the inputted\n declaration name." - }, - { - "code": "def config(config_dict: typing.Mapping) -> Config:\n logger.debug(f\"Updating with {config_dict}\")\n _cfg.update(config_dict)\n return _cfg", - "docstring": "Configures the konch shell. This function should be called in a\n .konchrc file.\n\n :param dict config_dict: Dict that may contain 'context', 'banner', and/or\n 'shell' (default shell class to use)." - }, - { - "code": "def wait_until_running(self, callback=None):\n status = self.machine.scheduler.wait_until_running(\n self.job, self.worker_config.time_out)\n if status.running:\n self.online = True\n if callback:\n callback(self)\n else:\n raise TimeoutError(\"Timeout while waiting for worker to run: \" +\n self.worker_config.name)", - "docstring": "Waits until the remote worker is running, then calls the callback.\n Usually, this method is passed to a different thread; the callback\n is then a function patching results through to the result queue." - }, - { - "code": "def _trig_auth_check(self, useriden):\n if not self.user.admin and useriden != self.user.iden:\n raise s_exc.AuthDeny(user=self.user.name, mesg='As non-admin, may only manipulate triggers created by you')", - "docstring": "Check that, as a non-admin, may only manipulate resources created by you." - }, - { - "code": "def from_celery(cls, name, worker_dict, queues):\n return WorkerStats(\n name=name,\n broker=BrokerStats.from_celery(worker_dict['broker']),\n pid=worker_dict['pid'],\n process_pids=worker_dict['pool']['processes'],\n concurrency=worker_dict['pool']['max-concurrency'],\n job_count=worker_dict['pool']['writes']['total'],\n queues=queues\n )", - "docstring": "Create a WorkerStats object from the dictionary returned by celery.\n\n Args:\n name (str): The name of the worker.\n worker_dict (dict): The dictionary as returned by celery.\n queues (list): A list of QueueStats objects that represent the queues this\n worker is listening on.\n\n Returns:\n WorkerStats: A fully initialized WorkerStats object." - }, - { - "code": "def write_float(self, value, little_endian=True):\n if little_endian:\n endian = \"<\"\n else:\n endian = \">\"\n return self.pack('%sf' % endian, value)", - "docstring": "Pack the value as a float and write 4 bytes to the stream.\n\n Args:\n value (number): the value to write to the stream.\n little_endian (bool): specify the endianness. (Default) Little endian.\n\n Returns:\n int: the number of bytes written." - }, - { - "code": "def dremove(self, **kwds):\n filtered_dr = self.dfilter(**kwds)\n for item in filtered_dr:\n self.remove(item)\n return filtered_dr", - "docstring": "Removes from the object any element that matches the\n given specification." - }, - { - "code": "def SetIndicated(self, node):\n self.indicated_node = node\n self.indicated = self.NodeToIndex(node)\n self.Refresh(False)\n return self.indicated", - "docstring": "Set this node to indicated status" - }, - { - "code": "def write(self, string):\n\t\tif (\"\" == string or '\\n' == string or '\\r' == string):\n\t\t\treturn\n\t\tstring = string.replace(\"\\r\", \"\")\n\t\tif self.printData:\n\t\t\tprint >> sys.__stdout__, string\n\t\tif self.logData:\n\t\t\tself.logWrite(string)", - "docstring": "The write method for a CalcpkgOutput object- print the string" - }, - { - "code": "def _property(methode, zone, key, value):\n ret = {'status': True}\n cfg_file = None\n if methode not in ['set', 'clear']:\n ret['status'] = False\n ret['message'] = 'unkown methode {0}!'.format(methode)\n else:\n cfg_file = salt.utils.files.mkstemp()\n with salt.utils.files.fpopen(cfg_file, 'w+', mode=0o600) as fp_:\n if methode == 'set':\n if isinstance(value, dict) or isinstance(value, list):\n value = _sanitize_value(value)\n value = six.text_type(value).lower() if isinstance(value, bool) else six.text_type(value)\n fp_.write(\"{0} {1}={2}\\n\".format(methode, key, _sanitize_value(value)))\n elif methode == 'clear':\n fp_.write(\"{0} {1}\\n\".format(methode, key))\n if cfg_file:\n _dump_cfg(cfg_file)\n res = __salt__['cmd.run_all']('zonecfg -z {zone} -f {path}'.format(\n zone=zone,\n path=cfg_file,\n ))\n ret['status'] = res['retcode'] == 0\n ret['message'] = res['stdout'] if ret['status'] else res['stderr']\n if ret['message'] == '':\n del ret['message']\n else:\n ret['message'] = _clean_message(ret['message'])\n if __salt__['file.file_exists'](cfg_file):\n __salt__['file.remove'](cfg_file)\n return ret", - "docstring": "internal handler for set and clear_property\n\n methode : string\n either set, add, or clear\n zone : string\n name of zone\n key : string\n name of property\n value : string\n value of property" - }, - { - "code": "def get(self, report_id):\n return Report(\n self._app,\n self._swimlane.request('get', \"reports/{0}\".format(report_id)).json()\n )", - "docstring": "Retrieve report by ID\n\n Args:\n report_id (str): Full report ID\n\n Returns:\n Report: Corresponding Report instance" - }, - { - "code": "def nonlinear_system(model, initial_dr=None, maxit=10, tol=1e-8, grid={}, distribution={}, verbose=True):\n if verbose:\n headline = '|{0:^4} | {1:10} | {2:8} |'\n headline = headline.format('N', ' Error', 'Time')\n stars = '-'*len(headline)\n print(stars)\n print(headline)\n print(stars)\n fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} |'\n f = model.functions['arbitrage']\n g = model.functions['transition']\n p = model.calibration['parameters']\n distrib = model.get_distribution(**distribution)\n nodes, weights = distrib.discretize()\n approx = model.get_grid(**grid)\n ms = create_interpolator(approx, approx.interpolation)\n grid = ms.grid\n if initial_dr is None:\n dr = approximate_controls(model)\n else:\n dr = initial_dr\n ms.set_values(dr(grid))\n x = dr(grid)\n x0 = x.copy()\n it = 0\n err = 10\n a0 = x0.copy().reshape((x0.shape[0]*x0.shape[1],))\n a = a0.copy()\n while err > tol and it < maxit:\n it += 1\n t1 = time.time()\n r, da = residuals(f, g, grid, a.reshape(x0.shape), ms, nodes, weights, p, diff=True)[:2]\n r = r.flatten()\n err = abs(r).max()\n t2 = time.time()\n if verbose:\n print(fmt_str.format(it, err, t2-t1))\n if err > tol:\n a -= scipy.sparse.linalg.spsolve(da, r)\n if verbose:\n print(stars)\n return ms", - "docstring": "Finds a global solution for ``model`` by solving one large system of equations\n using a simple newton algorithm.\n\n Parameters\n ----------\n model: NumericModel\n \"dtcscc\" model to be solved\n verbose: boolean\n if True, display iterations\n initial_dr: decision rule\n initial guess for the decision rule\n maxit: int\n maximum number of iterationsd\n tol: tolerance criterium for successive approximations\n grid: grid options\n distribution: distribution options\n\n Returns\n -------\n decision rule :\n approximated solution" - }, - { - "code": "def add_time_variables(df, reindex = True):\n if not \"datetime\" in df.columns:\n log.error(\"field datetime not found in DataFrame\")\n return False\n df[\"datetime\"] = pd.to_datetime(df[\"datetime\"])\n df[\"month\"] = df[\"datetime\"].dt.month\n df[\"month_name\"] = df[\"datetime\"].dt.strftime(\"%B\")\n df[\"weekday\"] = df[\"datetime\"].dt.weekday\n df[\"weekday_name\"] = df[\"datetime\"].dt.weekday_name\n df[\"time_through_day\"] = df[\"datetime\"].map(\n lambda x: x - datetime.datetime.combine(\n x.date(),\n datetime.time()\n )\n )\n df[\"fraction_through_day\"] = df[\"time_through_day\"].map(\n lambda x: x / datetime.timedelta(hours = 24)\n )\n df[\"hour\"] = df[\"datetime\"].dt.hour\n df[\"hours_through_day\"] = df[\"fraction_through_day\"] * 24\n df[\"days_through_week\"] = df.apply(\n lambda row: row[\"weekday\"] + row[\"fraction_through_day\"],\n axis = 1\n )\n df[\"days_through_year\"] = df[\"datetime\"].dt.dayofyear\n df.index = df[\"datetime\"]\n return df", - "docstring": "Return a DataFrame with variables for weekday index, weekday name, timedelta\n through day, fraction through day, hours through day and days through week\n added, optionally with the index set to datetime and the variable `datetime`\n removed. It is assumed that the variable `datetime` exists." - }, - { - "code": "def load_policy_config(filters=None,\n prepend=True,\n pillar_key='acl',\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n only_lower_merge=False,\n revision_id=None,\n revision_no=None,\n revision_date=True,\n revision_date_format='%Y/%m/%d',\n test=False,\n commit=True,\n debug=False,\n **kwargs):\n if not filters:\n filters = []\n platform = _get_capirca_platform()\n policy_config = __salt__['capirca.get_policy_config'](platform,\n filters=filters,\n prepend=prepend,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n only_lower_merge=only_lower_merge,\n revision_id=revision_id,\n revision_no=revision_no,\n revision_date=revision_date,\n revision_date_format=revision_date_format)\n return __salt__['net.load_config'](text=policy_config,\n test=test,\n commit=commit,\n debug=debug,\n inherit_napalm_device=napalm_device)", - "docstring": "Generate and load the configuration of the whole policy.\n\n .. note::\n\n The order of the filters and their terms is very important.\n The configuration loaded on the device respects the order\n defined in the ``filters`` and/or inside the pillar.\n\n When merging the ``filters`` with the pillar data, consider the\n ``prepend`` argument to make sure the order is correct!\n\n filters\n List of filters for this policy.\n If not specified or empty, will try to load the configuration from the pillar,\n unless ``merge_pillar`` is set as ``False``.\n\n prepend: ``True``\n When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging\n the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended\n at the beginning, while existing ones will preserve the position. To add the new filters\n at the end of the list, set this argument to ``False``.\n\n pillar_key: ``acl``\n The key in the pillar containing the default attributes values. Default: ``acl``.\n\n pillarenv\n Query the master to generate fresh pillar data on the fly,\n specifically from the requested pillar environment.\n\n saltenv\n Included only for compatibility with\n :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.\n\n merge_pillar: ``True``\n Merge the CLI variables with the pillar. Default: ``True``.\n\n The merge logic depends on the ``prepend`` argument and\n the CLI has higher priority than the pillar.\n\n only_lower_merge: ``False``\n Specify if it should merge only the filters and terms fields. Otherwise it will try\n to merge everything at the policy level. Default: ``False``.\n This option requires ``merge_pillar``, otherwise it is ignored.\n\n revision_id\n Add a comment in the policy config having the description for the changes applied.\n\n revision_no\n The revision count.\n\n revision_date: ``True``\n Boolean flag: display the date when the policy configuration was generated. Default: ``True``.\n\n revision_date_format: ``%Y/%m/%d``\n The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (//).\n\n test: ``False``\n Dry run? If set as ``True``, will apply the config, discard and return the changes.\n Default: ``False`` and will commit the changes on the device.\n\n commit: ``True``\n Commit? Default: ``True``.\n\n debug: ``False``\n Debug mode. Will insert a new key under the output dictionary,\n as ``loaded_config`` containing the raw configuration loaded on the device.\n\n The output is a dictionary having the same form as :mod:`net.load_config `.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'edge01.flw01' netacl.load_policy_config debug=True\n\n Output Example:\n\n .. code-block:: text\n\n edge01.flw01:\n ----------\n already_configured:\n False\n comment:\n diff:\n ---\n +++\n @@ -1228,9 +1228,24 @@\n !\n +ipv4 access-list my-filter\n + 10 remark my-term\n + 20 deny tcp host 1.2.3.4 eq 1234 any\n + 30 deny udp host 1.2.3.4 eq 1234 any\n + 40 deny tcp host 1.2.3.4 eq 1235 any\n + 50 deny udp host 1.2.3.4 eq 1235 any\n + 60 remark my-other-term\n + 70 permit tcp any range 5678 5680 any\n +!\n +!\n +ipv4 access-list block-icmp\n + 10 remark first-term\n + 20 deny icmp any any\n !\n loaded_config:\n ! $Date: 2017/03/22 $\n no ipv4 access-list my-filter\n ipv4 access-list my-filter\n remark my-term\n deny tcp host 1.2.3.4 eq 1234 any\n deny udp host 1.2.3.4 eq 1234 any\n deny tcp host 1.2.3.4 eq 1235 any\n deny udp host 1.2.3.4 eq 1235 any\n remark my-other-term\n permit tcp any range 5678 5680 any\n exit\n no ipv4 access-list block-icmp\n ipv4 access-list block-icmp\n remark first-term\n deny icmp any any\n exit\n result:\n True\n\n The policy configuration has been loaded from the pillar, having the following structure:\n\n .. code-block:: yaml\n\n acl:\n - my-filter:\n terms:\n - my-term:\n source_port:\n - 1234\n - 1235\n protocol:\n - tcp\n - udp\n source_address: 1.2.3.4\n action: reject\n - my-other-term:\n source_port:\n - [5678, 5680]\n protocol: tcp\n action: accept\n - block-icmp:\n terms:\n - first-term:\n protocol:\n - icmp\n action: reject" - }, - { - "code": "def read(self, config_dir=None, config_name=None, clear=False):\n if not config_dir:\n config_dir = self._defaults.get('config_dir', None)\n if not config_dir:\n raise KeyError('config_dir not set')\n if not config_name:\n config_name = self._defaults.get('config_name', None)\n if not config_name:\n raise KeyError('config_name not set')\n conf_path = os.path.expanduser(config_dir)\n if not os.path.exists(conf_path):\n raise IOError(\"config dir not found at %s\" % (conf_path,))\n config = munge.load_datafile(config_name, conf_path, default=None)\n if not config:\n raise IOError(\"config file not found in %s\" % (conf_path,))\n if clear:\n self.clear()\n munge.util.recursive_update(self.data, config)\n self._meta_config_dir = conf_path\n return self", - "docstring": "read config from config_dir\n if config_dir is None, clear to default config\n clear will clear to default before reading new file" - }, - { - "code": "def count(self):\n return functools.reduce(lambda x, y: x * y, (x.count for x in self.bounds))", - "docstring": "Total number of array cells" - }, - { - "code": "def create(cls, session, web_hook):\n cls(\n '/hooks.json',\n data=web_hook.to_api(),\n request_type=RequestPaginator.POST,\n session=session,\n )\n return True", - "docstring": "Create a web hook.\n\n Note that creating a new web hook will overwrite the web hook that is\n already configured for this company. There is also no way to\n programmatically determine if a web hook already exists for the\n company. This is a limitation of the HelpScout API and cannot be\n circumvented.\n\n Args:\n session (requests.sessions.Session): Authenticated session.\n web_hook (helpscout.models.WebHook): The web hook to be created.\n\n Returns:\n bool: ``True`` if the creation was a success. Errors otherwise." - }, - { - "code": "def set_popup_menu(self, menu):\n self.popup_menu = menu\n self.in_queue.put(MPImagePopupMenu(menu))", - "docstring": "set a popup menu on the frame" - }, - { - "code": "def read(self, prompt='', clean=lambda x: x):\n ans = read(prompt + ' ')\n return clean(ans)", - "docstring": "Display a prompt and ask user for input\n\n A function to clean the user input can be passed as ``clean`` argument.\n This function takes a single value, which is the string user entered,\n and returns a cleaned value. Default is a pass-through function, which\n is an equivalent of::\n\n def clean(val):\n return val" - }, - { - "code": "def _precompile_substitution(self, kind, pattern):\n if pattern not in self._regexc[kind]:\n qm = re.escape(pattern)\n self._regexc[kind][pattern] = {\n \"qm\": qm,\n \"sub1\": re.compile(r'^' + qm + r'$'),\n \"sub2\": re.compile(r'^' + qm + r'(\\W+)'),\n \"sub3\": re.compile(r'(\\W+)' + qm + r'(\\W+)'),\n \"sub4\": re.compile(r'(\\W+)' + qm + r'$'),\n }", - "docstring": "Pre-compile the regexp for a substitution pattern.\n\n This will speed up the substitutions that happen at the beginning of\n the reply fetching process. With the default brain, this took the\n time for _substitute down from 0.08s to 0.02s\n\n :param str kind: One of ``sub``, ``person``.\n :param str pattern: The substitution pattern." - }, - { - "code": "def to_bytes(instance, encoding='utf-8', error='strict'):\n if isinstance(instance, bytes):\n return instance\n elif hasattr(instance, 'encode'):\n return instance.encode(encoding, error)\n elif isinstance(instance, list):\n return list([to_bytes(item, encoding, error) for item in instance])\n elif isinstance(instance, tuple):\n return tuple([to_bytes(item, encoding, error) for item in instance])\n elif isinstance(instance, dict):\n return dict(\n [(to_bytes(key, encoding, error), to_bytes(value, encoding, error))\n for key, value in instance.items()])\n else:\n return instance", - "docstring": "Convert an instance recursively to bytes." - }, - { - "code": "def read_hierarchy(self, fid):\n lin = self.read_line(fid)\n while lin != 'end':\n parts = lin.split()\n if lin != 'begin':\n ind = self.get_index_by_name(parts[0])\n for i in range(1, len(parts)):\n self.vertices[ind].children.append(self.get_index_by_name(parts[i]))\n lin = self.read_line(fid)\n lin = self.read_line(fid)\n return lin", - "docstring": "Read hierarchy information from acclaim skeleton file stream." - }, - { - "code": "def geometric_partitions(iterable, floor=1, ceiling=32768):\r\n partition_size = floor\r\n run_length = multiprocessing.cpu_count()\r\n run_count = 0\r\n try:\r\n while True:\r\n partition, iterable = itertools.tee(iterable)\r\n yield Queryable(partition).take(partition_size)\r\n for i in range(partition_size):\r\n next(iterable)\r\n run_count += 1\r\n if run_count >= run_length:\r\n partition_size *= 2\r\n run_count = 0\r\n if partition_size > ceiling:\r\n partition_size = ceiling\r\n except StopIteration:\r\n pass", - "docstring": "Partition an iterable into chunks. Returns an iterator over partitions." - }, - { - "code": "def add_rect(self, width, height, rid=None):\n assert(width > 0 and height > 0)\n if width > max(self.width, self.height) or\\\n height > max(self.height, self.width):\n return None\n rect = None\n if self._waste_management:\n rect = self._waste.add_rect(width, height, rid)\n if not rect:\n rect, _ = self._select_position(width, height)\n if rect:\n self._add_skyline(rect)\n if rect is None:\n return None\n rect.rid = rid\n self.rectangles.append(rect)\n return rect", - "docstring": "Add new rectangle" - }, - { - "code": "def _upload_folder_in_background(self, folder_path, container, ignore,\n upload_key, ttl=None):\n uploader = FolderUploader(folder_path, container, ignore, upload_key,\n self, ttl=ttl)\n uploader.start()", - "docstring": "Runs the folder upload in the background." - }, - { - "code": "def clean_source_files(self):\n self.build_source_files.file(File.BSFILE.SOURCESCHEMA).remove()\n self.build_source_files.file(File.BSFILE.SCHEMA).remove()\n self.commit()", - "docstring": "Remove the schema.csv and source_schema.csv files" - }, - { - "code": "def upload_file_mp(self, container, src_file_path, dst_name=None,\n content_type=None):\n if not os.path.exists(src_file_path):\n raise RuntimeError('file not found: ' + src_file_path)\n if not dst_name:\n dst_name = os.path.basename(src_file_path)\n if not content_type:\n content_type = \"application/octet.stream\"\n url = self.make_url(container, None, None)\n headers = self._base_headers\n with open(src_file_path, 'rb') as up_file:\n files = {'file': (dst_name, up_file, content_type)}\n try:\n rsp = requests.post(url, headers=headers, files=files,\n timeout=self._timeout)\n except requests.exceptions.ConnectionError as e:\n RestHttp._raise_conn_error(e)\n return self._handle_response(rsp)", - "docstring": "Upload a file using multi-part encoding." - }, - { - "code": "def run_function_on_all_workers(self, function,\n run_on_other_drivers=False):\n if self.mode is None:\n self.cached_functions_to_run.append(function)\n else:\n pickled_function = pickle.dumps(function)\n function_to_run_id = hashlib.sha1(pickled_function).digest()\n key = b\"FunctionsToRun:\" + function_to_run_id\n function({\"worker\": self})\n function_exported = self.redis_client.setnx(b\"Lock:\" + key, 1)\n if not function_exported:\n return\n check_oversized_pickle(pickled_function, function.__name__,\n \"function\", self)\n self.redis_client.hmset(\n key, {\n \"driver_id\": self.task_driver_id.binary(),\n \"function_id\": function_to_run_id,\n \"function\": pickled_function,\n \"run_on_other_drivers\": str(run_on_other_drivers)\n })\n self.redis_client.rpush(\"Exports\", key)", - "docstring": "Run arbitrary code on all of the workers.\n\n This function will first be run on the driver, and then it will be\n exported to all of the workers to be run. It will also be run on any\n new workers that register later. If ray.init has not been called yet,\n then cache the function and export it later.\n\n Args:\n function (Callable): The function to run on all of the workers. It\n takes only one argument, a worker info dict. If it returns\n anything, its return values will not be used.\n run_on_other_drivers: The boolean that indicates whether we want to\n run this function on other drivers. One case is we may need to\n share objects across drivers." - }, - { - "code": "def normalizeToTag(val):\n try:\n val = val.upper()\n except AttributeError:\n raise KeyError(\"{} is not a tag or name string\".format(val))\n if val not in tagsAndNameSetUpper:\n raise KeyError(\"{} is not a tag or name string\".format(val))\n else:\n try:\n return fullToTagDictUpper[val]\n except KeyError:\n return val", - "docstring": "Converts tags or full names to 2 character tags, case insensitive\n\n # Parameters\n\n _val_: `str`\n\n > A two character string giving the tag or its full name\n\n # Returns\n\n `str`\n\n > The short name of _val_" - }, - { - "code": "def compile(self, options=[]):\n try:\n self._interface.nvrtcCompileProgram(self._program, options)\n ptx = self._interface.nvrtcGetPTX(self._program)\n return ptx\n except NVRTCException as e:\n log = self._interface.nvrtcGetProgramLog(self._program)\n raise ProgramException(log)", - "docstring": "Compiles the program object to PTX using the compiler options\n specified in `options`." - }, - { - "code": "def _update_rr_ce_entry(self, rec):\n if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None:\n celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area\n added_block, block, offset = self.pvd.add_rr_ce_entry(celen)\n rec.rock_ridge.update_ce_block(block)\n rec.rock_ridge.dr_entries.ce_record.update_offset(offset)\n if added_block:\n return self.pvd.logical_block_size()\n return 0", - "docstring": "An internal method to update the Rock Ridge CE entry for the given\n record.\n\n Parameters:\n rec - The record to update the Rock Ridge CE entry for (if it exists).\n Returns:\n The number of additional bytes needed for this Rock Ridge CE entry." - }, - { - "code": "def gen_mu(K, delta, c):\n S = c * log(K/delta) * sqrt(K) \n tau = gen_tau(S, K, delta)\n rho = gen_rho(K)\n normalizer = sum(rho) + sum(tau)\n return [(rho[d] + tau[d])/normalizer for d in range(K)]", - "docstring": "The Robust Soliton Distribution on the degree of \n transmitted blocks" - }, - { - "code": "def has_gis(wrapped, instance, args, kwargs):\n if gis:\n return wrapped(*args, **kwargs)\n else:\n warn(MISSING_GIS)", - "docstring": "Skip function execution if there are no presamples" - }, - { - "code": "def publish_workflow_status(self, workflow_uuid, status,\n logs='', message=None):\n msg = {\n \"workflow_uuid\": workflow_uuid,\n \"logs\": logs,\n \"status\": status,\n \"message\": message\n }\n self._publish(msg)", - "docstring": "Publish workflow status using the configured.\n\n :param workflow_uudid: String which represents the workflow UUID.\n :param status: Integer which represents the status of the workflow,\n this is defined in the `reana-db` `Workflow` models.\n :param logs: String which represents the logs which the workflow\n has produced as output.\n :param message: Dictionary which includes additional information\n can be attached such as the overall progress of the workflow." - }, - { - "code": "def send_to_room(self, message, room_name):\r\n room = self.get_room(room_name)\r\n if room is not None:\r\n room.send_message(message)", - "docstring": "Sends a given message to a given room" - }, - { - "code": "def _stieltjes_analytical(dist, order, normed):\n dimensions = len(dist)\n mom_order = numpy.arange(order+1).repeat(dimensions)\n mom_order = mom_order.reshape(order+1, dimensions).T\n coeff1, coeff2 = dist.ttr(mom_order)\n coeff2[:, 0] = 1.\n poly = chaospy.poly.collection.core.variable(dimensions)\n if normed:\n orth = [\n poly**0*numpy.ones(dimensions),\n (poly-coeff1[:, 0])/numpy.sqrt(coeff2[:, 1]),\n ]\n for order_ in range(1, order):\n orth.append(\n (orth[-1]*(poly-coeff1[:, order_])\n -orth[-2]*numpy.sqrt(coeff2[:, order_]))\n /numpy.sqrt(coeff2[:, order_+1])\n )\n norms = numpy.ones(coeff2.shape)\n else:\n orth = [poly-poly, poly**0*numpy.ones(dimensions)]\n for order_ in range(order):\n orth.append(\n orth[-1]*(poly-coeff1[:, order_])\n - orth[-2]*coeff2[:, order_]\n )\n orth = orth[1:]\n norms = numpy.cumprod(coeff2, 1)\n return orth, norms, coeff1, coeff2", - "docstring": "Stieltjes' method with analytical recurrence coefficients." - }, - { - "code": "def gravatar(self, size):\n hash = md5(self.email.encode('utf-8')).hexdigest()\n url = 'http://www.gravatar.com/avatar/{}?d=mm&s={}'\n return url.format(hash, size)", - "docstring": "Get url to gravatar" - }, - { - "code": "def get_pointlist(self):\n try:\n pointlist = json.loads(self.raw_data_json)\n except Exception as inst:\n logging.debug(\"pointStrokeList: strokelistP\")\n logging.debug(self.raw_data_json)\n logging.debug(\"didn't work\")\n raise inst\n if len(pointlist) == 0:\n logging.warning(\"Pointlist was empty. Search for '\" +\n self.raw_data_json + \"' in `wm_raw_draw_data`.\")\n return pointlist", - "docstring": "Get a list of lists of tuples from JSON raw data string. Those lists\n represent strokes with control points.\n\n Returns\n -------\n list :\n A list of strokes. Each stroke is a list of dictionaries\n {'x': 123, 'y': 42, 'time': 1337}" - }, - { - "code": "def resetSelection(self):\n cursor = self.textCursor()\n cursor.setPosition(cursor.position())\n self.setTextCursor(cursor)", - "docstring": "Reset selection. Nothing will be selected." - }, - { - "code": "def delete_fallbackserver(self, serverid, data):\n return self.api_call(\n ENDPOINTS['fallbackservers']['delete'],\n dict(serverid=serverid),\n body=data)", - "docstring": "Delete Fallback server" - }, - { - "code": "def tangent_curve_single_list(obj, param_list, normalize):\n ret_vector = []\n for param in param_list:\n temp = tangent_curve_single(obj, param, normalize)\n ret_vector.append(temp)\n return tuple(ret_vector)", - "docstring": "Evaluates the curve tangent vectors at the given list of parameter values.\n\n :param obj: input curve\n :type obj: abstract.Curve\n :param param_list: parameter list\n :type param_list: list or tuple\n :param normalize: if True, the returned vector is converted to a unit vector\n :type normalize: bool\n :return: a list containing \"point\" and \"vector\" pairs\n :rtype: tuple" - }, - { - "code": "def periodic_callback(self):\n if self.stopped:\n return\n if not self.scanning and len(self._connections) == 0 and self.connecting_count == 0:\n self._logger.info(\"Restarting scan for devices\")\n self.start_scan(self._active_scan)\n self._logger.info(\"Finished restarting scan for devices\")", - "docstring": "Periodic cleanup tasks to maintain this adapter, should be called every second" - }, - { - "code": "def Run(self, unused_arg):\n out = self.out_rdfvalues[0]()\n for descriptor in config.CONFIG.type_infos:\n if descriptor.name in self.BLOCKED_PARAMETERS:\n value = \"[Redacted]\"\n else:\n try:\n value = config.CONFIG.Get(descriptor.name, default=None)\n except (config_lib.Error, KeyError, AttributeError, ValueError) as e:\n logging.info(\"Config reading error: %s\", e)\n continue\n if value is not None:\n out[descriptor.name] = value\n self.SendReply(out)", - "docstring": "Retrieve the configuration except for the blocked parameters." - }, - { - "code": "def confirm_or_abort(prompt, exitcode=os.EX_TEMPFAIL, msg=None, **extra_args):\n if click.confirm(prompt, **extra_args):\n return True\n else:\n if msg:\n sys.stderr.write(msg)\n sys.stderr.write('\\n')\n sys.exit(exitcode)", - "docstring": "Prompt user for confirmation and exit on negative reply.\n\n Arguments `prompt` and `extra_args` will be passed unchanged to\n `click.confirm`:func: (which is used for actual prompting).\n\n :param str prompt: Prompt string to display.\n :param int exitcode: Program exit code if negative reply given.\n :param str msg: Message to display before exiting." - }, - { - "code": "def is_hide(self, value, header=\"\"):\n return not all(j is None for j in [re.match(i, value.lower()) for i in self.get_conf_value('hide', header=header)])", - "docstring": "Return True if the value is in the hide configuration list.\n\n The hide configuration list is defined in the glances.conf file.\n It is a comma separed list of regexp.\n Example for diskio:\n hide=sda2,sda5,loop.*" - }, - { - "code": "def get(self, key, default=NoDefault):\n key = normalize_key(key)\n if default is NoDefault:\n defaults = []\n else:\n defaults = [default]\n for options in self.options:\n try:\n value = options[key]\n except KeyError:\n continue\n if isinstance(value, Default):\n defaults.append(value.value)\n continue\n else:\n return value\n if defaults:\n return defaults[0]\n return NoDefault", - "docstring": "Retrieve a value from its key.\n\n Retrieval steps are:\n 1) Normalize the key\n 2) For each option group:\n a) Retrieve the value at that key\n b) If no value exists, continue\n c) If the value is an instance of 'Default', continue\n d) Otherwise, return the value\n 3) If no option had a non-default value for the key, return the\n first Default() option for the key (or :arg:`default`)." - }, - { - "code": "def unicode_is_ascii(u_string):\n assert isinstance(u_string, str)\n try:\n u_string.encode('ascii')\n return True\n except UnicodeEncodeError:\n return False", - "docstring": "Determine if unicode string only contains ASCII characters.\n\n :param str u_string: unicode string to check. Must be unicode\n and not Python 2 `str`.\n :rtype: bool" - }, - { - "code": "def _check_instance(url, insecure):\n res = grimoire_con(insecure).get(url)\n if res.status_code != 200:\n logger.error(\"Didn't get 200 OK from url %s\", url)\n raise ElasticConnectException\n else:\n try:\n version_str = res.json()['version']['number']\n version_major = version_str.split('.')[0]\n return version_major\n except Exception:\n logger.error(\"Could not read proper welcome message from url %s\",\n ElasticSearch.anonymize_url(url))\n logger.error(\"Message read: %s\", res.text)\n raise ElasticConnectException", - "docstring": "Checks if there is an instance of Elasticsearch in url.\n\n Actually, it checks if GET on the url returns a JSON document\n with a field tagline \"You know, for search\",\n and a field version.number.\n\n :value url: url of the instance to check\n :value insecure: don't verify ssl connection (boolean)\n :returns: major version of Ellasticsearch, as string." - }, - { - "code": "def store_dcnm_net_dict(self, net_dict, direc):\n if direc == 'in':\n self.in_dcnm_net_dict = net_dict\n else:\n self.out_dcnm_net_dict = net_dict", - "docstring": "Storing the DCNM net dict." - }, - { - "code": "def link_bus(self, bus_idx):\n ret = []\n if not self._config['is_series']:\n self.log(\n 'link_bus function is not valid for non-series model <{}>'.\n format(self.name))\n return []\n if isinstance(bus_idx, (int, float, str)):\n bus_idx = [bus_idx]\n fkey = list(self._ac.keys())\n if 'bus' in fkey:\n fkey.remove('bus')\n nfkey = len(fkey)\n fkey_val = [self.__dict__[i] for i in fkey]\n for item in bus_idx:\n idx = []\n key = []\n for i in range(self.n):\n for j in range(nfkey):\n if fkey_val[j][i] == item:\n idx.append(self.idx[i])\n key.append(fkey[j])\n break\n if len(idx) == 0:\n idx = None\n if len(key) == 0:\n key = None\n ret.append((idx, key))\n return ret", - "docstring": "Return the indices of elements linking the given buses\n\n :param bus_idx:\n :return:" - }, - { - "code": "def list_extensions():\n import nnabla_ext.cpu\n from os.path import dirname, join, realpath\n from os import listdir\n ext_dir = realpath((join(dirname(nnabla_ext.cpu.__file__), '..')))\n return listdir(ext_dir)", - "docstring": "List up available extensions.\n\n Note:\n It may not work on some platforms/environments since it depends\n on the directory structure of the namespace packages.\n\n Returns: list of str\n Names of available extensions." - }, - { - "code": "def matrix_rank(model):\n s_matrix, _, _ = con_helpers.stoichiometry_matrix(\n model.metabolites, model.reactions\n )\n return con_helpers.rank(s_matrix)", - "docstring": "Return the rank of the model's stoichiometric matrix.\n\n Parameters\n ----------\n model : cobra.Model\n The metabolic model under investigation." - }, - { - "code": "def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n self.__dict__.update(state_dict)", - "docstring": "Load the schedulers state.\n\n Parameters\n ----------\n state_dict : ``Dict[str, Any]``\n Scheduler state. Should be an object returned from a call to ``state_dict``." - }, - { - "code": "def create(self, file_or_path, **kwargs):\n opened = False\n if isinstance(file_or_path, str_type()):\n file_or_path = open(file_or_path, 'rb')\n opened = True\n elif not getattr(file_or_path, 'read', False):\n raise Exception(\"A file or path to a file is required for this operation.\")\n try:\n return self.client._post(\n self._url(),\n file_or_path,\n headers=self._resource_class.create_headers({}),\n file_upload=True\n )\n finally:\n if opened:\n file_or_path.close()", - "docstring": "Creates an upload for the given file or path." - }, - { - "code": "def parser(self, type, **meta):\n def decorator(f):\n self.register_parser(type, f)\n if meta:\n self.register_meta(type, **meta)\n return f\n return decorator", - "docstring": "Registers the decorated method as the parser of a format.\n\n :param type: The unique name of the format\n :param meta: The extra information associated with the format" - }, - { - "code": "def _determine_weights(self, other, settings):\n first_is_used = settings['first']['required'] or \\\n self.first and other.first\n first_weight = settings['first']['weight'] if first_is_used else 0\n middle_is_used = settings['middle']['required'] or \\\n self.middle and other.middle\n middle_weight = settings['middle']['weight'] if middle_is_used else 0\n last_is_used = settings['last']['required'] or \\\n self.last and other.last\n last_weight = settings['last']['weight'] if last_is_used else 0\n return first_weight, middle_weight, last_weight", - "docstring": "Return weights of name components based on whether or not they were\n omitted" - }, - { - "code": "def generate(env):\n global PSAction\n if PSAction is None:\n PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')\n global DVIPSAction\n if DVIPSAction is None:\n DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)\n global PSBuilder\n if PSBuilder is None:\n PSBuilder = SCons.Builder.Builder(action = PSAction,\n prefix = '$PSPREFIX',\n suffix = '$PSSUFFIX',\n src_suffix = '.dvi',\n src_builder = 'DVI',\n single_source=True)\n env['BUILDERS']['PostScript'] = PSBuilder\n env['DVIPS'] = 'dvips'\n env['DVIPSFLAGS'] = SCons.Util.CLVar('')\n env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'\n env['PSPREFIX'] = ''\n env['PSSUFFIX'] = '.ps'", - "docstring": "Add Builders and construction variables for dvips to an Environment." - }, - { - "code": "def add(self, finalidade_txt, cliente_txt, ambiente_p44_txt, description):\n environmentvip_map = dict()\n environmentvip_map['finalidade_txt'] = finalidade_txt\n environmentvip_map['cliente_txt'] = cliente_txt\n environmentvip_map['ambiente_p44_txt'] = ambiente_p44_txt\n environmentvip_map['description'] = description\n code, xml = self.submit(\n {'environment_vip': environmentvip_map}, 'POST', 'environmentvip/')\n return self.response(code, xml)", - "docstring": "Inserts a new Environment VIP and returns its identifier.\n\n :param finalidade_txt: Finality. String with a maximum of 50 characters and respect [a-zA-Z\\_-]\n :param cliente_txt: ID Client. String with a maximum of 50 characters and respect [a-zA-Z\\_-]\n :param ambiente_p44_txt: Environment P44. String with a maximum of 50 characters and respect [a-zA-Z\\_-]\n\n :return: Following dictionary:\n\n ::\n\n {'environment_vip': {'id': < id >}}\n\n :raise InvalidParameterError: The value of finalidade_txt, cliente_txt or ambiente_p44_txt is invalid.\n :raise DataBaseError: Networkapi failed to access the database.\n :raise XMLError: Networkapi failed to generate the XML response." - }, - { - "code": "def get_page_children_dict(self, page_qs=None):\n children_dict = defaultdict(list)\n for page in page_qs or self.pages_for_display:\n children_dict[page.path[:-page.steplen]].append(page)\n return children_dict", - "docstring": "Returns a dictionary of lists, where the keys are 'path' values for\n pages, and the value is a list of children pages for that page." - }, - { - "code": "def distance_to_edge(labels):\n colors = color_labels(labels)\n max_color = np.max(colors)\n result = np.zeros(labels.shape)\n if max_color == 0:\n return result\n for i in range(1, max_color+1):\n mask = (colors==i)\n result[mask] = scind.distance_transform_edt(mask)[mask]\n return result", - "docstring": "Compute the distance of a pixel to the edge of its object\n \n labels - a labels matrix\n \n returns a matrix of distances" - }, - { - "code": "def validate(self,\n asset,\n amount,\n portfolio,\n algo_datetime,\n algo_current_data):\n if portfolio.positions[asset].amount + amount < 0:\n self.handle_violation(asset, amount, algo_datetime)", - "docstring": "Fail if we would hold negative shares of asset after completing this\n order." - }, - { - "code": "def InstallGRR(self, path):\n cmd64 = [self.pip64, \"install\"]\n cmd32 = [self.pip32, \"install\"]\n if args.wheel_dir:\n cmd64 += [\"--no-index\", r\"--find-links=file:///%s\" % args.wheel_dir]\n cmd32 += [\"--no-index\", r\"--find-links=file:///%s\" % args.wheel_dir]\n cmd64.append(path)\n cmd32.append(path)\n subprocess.check_call(cmd64)\n if args.build_32:\n subprocess.check_call(cmd32)", - "docstring": "Installs GRR." - }, - { - "code": "def to_hierarchical(self, n_repeat, n_shuffle=1):\n levels = self.levels\n codes = [np.repeat(level_codes, n_repeat) for\n level_codes in self.codes]\n codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes]\n names = self.names\n warnings.warn(\"Method .to_hierarchical is deprecated and will \"\n \"be removed in a future version\",\n FutureWarning, stacklevel=2)\n return MultiIndex(levels=levels, codes=codes, names=names)", - "docstring": "Return a MultiIndex reshaped to conform to the\n shapes given by n_repeat and n_shuffle.\n\n .. deprecated:: 0.24.0\n\n Useful to replicate and rearrange a MultiIndex for combination\n with another Index with n_repeat items.\n\n Parameters\n ----------\n n_repeat : int\n Number of times to repeat the labels on self\n n_shuffle : int\n Controls the reordering of the labels. If the result is going\n to be an inner level in a MultiIndex, n_shuffle will need to be\n greater than one. The size of each label must divisible by\n n_shuffle.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'),\n (2, 'one'), (2, 'two')])\n >>> idx.to_hierarchical(3)\n MultiIndex(levels=[[1, 2], ['one', 'two']],\n codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])" - }, - { - "code": "def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):\n if os.getenv(\"LAL_SRC\") is None:\n raise ValueError(\"The environment variable LAL_SRC must be set to a \"\n \"location containing the file lalsuite.git\")\n else:\n lalDir = os.getenv(\"LAL_SRC\")\n sci_seg = segments.segment(int(cp.get(\"workflow\", \"start-time\")),\n int(cp.get(\"workflow\", \"end-time\")))\n file_list = FileList([])\n if bank_veto:\n shutil.copy(\"%s/lalapps/src/ring/coh_PTF_config_files/\" \\\n \"bank_veto_bank.xml\" % lalDir, \"%s\" % run_dir)\n bank_veto_url = \"file://localhost%s/bank_veto_bank.xml\" % run_dir\n bank_veto = File(ifos, \"bank_veto_bank\", sci_seg,\n file_url=bank_veto_url)\n bank_veto.PFN(bank_veto.cache_entry.path, site=\"local\")\n file_list.extend(FileList([bank_veto]))\n if summary_files:\n shutil.copy(\"%s/lalapps/src/ring/coh_PTF_config_files/\" \\\n \"coh_PTF_html_summary.js\" % lalDir, \"%s\" % run_dir)\n summary_js_url = \"file://localhost%s/coh_PTF_html_summary.js\" \\\n % run_dir\n summary_js = File(ifos, \"coh_PTF_html_summary_js\", sci_seg,\n file_url=summary_js_url)\n summary_js.PFN(summary_js.cache_entry.path, site=\"local\")\n file_list.extend(FileList([summary_js]))\n shutil.copy(\"%s/lalapps/src/ring/coh_PTF_config_files/\" \\\n \"coh_PTF_html_summary.css\" % lalDir, \"%s\" % run_dir)\n summary_css_url = \"file://localhost%s/coh_PTF_html_summary.css\" \\\n % run_dir\n summary_css = File(ifos, \"coh_PTF_html_summary_css\", sci_seg,\n file_url=summary_css_url)\n summary_css.PFN(summary_css.cache_entry.path, site=\"local\")\n file_list.extend(FileList([summary_css]))\n return file_list", - "docstring": "Retrieve files needed to run coh_PTF jobs within a PyGRB workflow\n\n Parameters\n ----------\n cp : pycbc.workflow.configuration.WorkflowConfigParser object\n The parsed configuration options of a pycbc.workflow.core.Workflow.\n\n ifos : str\n String containing the analysis interferometer IDs.\n\n run_dir : str\n The run directory, destination for retrieved files.\n\n bank_veto : Boolean\n If true, will retrieve the bank_veto_bank.xml file.\n\n summary_files : Boolean\n If true, will retrieve the summary page style files.\n\n Returns\n -------\n file_list : pycbc.workflow.FileList object\n A FileList containing the retrieved files." - }, - { - "code": "def right(self):\n return self.source.directory[self.right_sibling_id] \\\n if self.right_sibling_id != NOSTREAM else None", - "docstring": "Entry is right sibling of current directory entry" - }, - { - "code": "def GetKey(self, path, cycle=9999, rootpy=True, **kwargs):\n key = super(_DirectoryBase, self).GetKey(path, cycle)\n if not key:\n raise DoesNotExist\n if rootpy:\n return asrootpy(key, **kwargs)\n return key", - "docstring": "Override TDirectory's GetKey and also handle accessing keys nested\n arbitrarily deep in subdirectories." - }, - { - "code": "def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs):\n code = HTTPStatus(code)\n if code is HTTPStatus.NO_CONTENT:\n assert model is None\n if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}:\n if code.value not in http_exceptions.default_exceptions:\n raise ValueError(\"`model` parameter is required for code %d\" % code)\n model = self.model(\n name='HTTPError%d' % code,\n model=DefaultHTTPErrorSchema(http_code=code)\n )\n if description is None:\n description = code.description\n def response_serializer_decorator(func):\n def dump_wrapper(*args, **kwargs):\n response = func(*args, **kwargs)\n extra_headers = None\n if response is None:\n if model is not None:\n raise ValueError(\"Response cannot not be None with HTTP status %d\" % code)\n return flask.Response(status=code)\n elif isinstance(response, flask.Response) or model is None:\n return response\n elif isinstance(response, tuple):\n response, _code, extra_headers = unpack(response)\n else:\n _code = code\n if HTTPStatus(_code) is code:\n response = model.dump(response).data\n return response, _code, extra_headers\n return dump_wrapper\n def decorator(func_or_class):\n if code.value in http_exceptions.default_exceptions:\n decorated_func_or_class = func_or_class\n elif isinstance(func_or_class, type):\n func_or_class._apply_decorator_to_methods(response_serializer_decorator)\n decorated_func_or_class = func_or_class\n else:\n decorated_func_or_class = wraps(func_or_class)(\n response_serializer_decorator(func_or_class)\n )\n if model is None:\n api_model = None\n else:\n if isinstance(model, Model):\n api_model = model\n else:\n api_model = self.model(model=model)\n if getattr(model, 'many', False):\n api_model = [api_model]\n doc_decorator = self.doc(\n responses={\n code.value: (description, api_model)\n }\n )\n return doc_decorator(decorated_func_or_class)\n return decorator", - "docstring": "Endpoint response OpenAPI documentation decorator.\n\n It automatically documents HTTPError%(code)d responses with relevant\n schemas.\n\n Arguments:\n model (flask_marshmallow.Schema) - it can be a class or an instance\n of the class, which will be used for OpenAPI documentation\n purposes. It can be omitted if ``code`` argument is set to an\n error HTTP status code.\n code (int) - HTTP status code which is documented.\n description (str)\n\n Example:\n >>> @namespace.response(BaseTeamSchema(many=True))\n ... @namespace.response(code=HTTPStatus.FORBIDDEN)\n ... def get_teams():\n ... if not user.is_admin:\n ... abort(HTTPStatus.FORBIDDEN)\n ... return Team.query.all()" - }, - { - "code": "async def process_response(self, request, response):\n await super().process_response(request, response)\n if COOKIE_AUTH_KEY in request:\n if response.started:\n raise RuntimeError(\"Cannot save cookie into started response\")\n cookie = request[COOKIE_AUTH_KEY]\n if cookie == '':\n response.del_cookie(self.cookie_name)\n else:\n response.set_cookie(self.cookie_name, cookie)", - "docstring": "Called to perform any processing of the response required.\n\n This function stores any cookie data in the COOKIE_AUTH_KEY as a\n cookie in the response object. If the value is a empty string, the\n associated cookie is deleted instead.\n\n This function requires the response to be a aiohttp Response object,\n and assumes that the response has not started if the remember or\n forget functions are called during the request.\n\n Args:\n request: aiohttp Request object.\n response: response object returned from the handled view\n\n Raises:\n RuntimeError: Raised if response has already started." - }, - { - "code": "def NCBISequenceLink(title, default=None):\n url = NCBISequenceLinkURL(title)\n if url is None:\n return default\n else:\n return '%s' % (url, title)", - "docstring": "Given a sequence title, like \"gi|42768646|gb|AY516849.1| Homo sapiens\",\n return an HTML A tag dispalying a link to the info page at NCBI.\n\n title: the sequence title to produce an HTML link for.\n default: the value to return if the title cannot be parsed." - }, - { - "code": "def new(self, size, fill):\n return Image(PIL.Image.new(\"RGB\", size, fill))", - "docstring": "Return a new Image instance filled with a color." - }, - { - "code": "def SetFileContext(self, file_name, row_num, row, headers):\n self._context = (file_name, row_num, row, headers)", - "docstring": "Save the current context to be output with any errors.\n\n Args:\n file_name: string\n row_num: int\n row: list of strings\n headers: list of column headers, its order corresponding to row's" - }, - { - "code": "def profile(self, profile):\n self._staging_data = None\n lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON')\n profile_args = ArgBuilder(lang, self.profile_args(profile.get('args')))\n self._profile = profile\n self._profile['profile_args'] = profile_args\n self.load_tcex()\n self.reports.profile(profile.get('profile_name'))\n self._create_tc_dirs()", - "docstring": "Set the current profile.\n\n Args:\n profile (dict): The profile data." - }, - { - "code": "def RegisterValue(self, value):\n if self.bins:\n for b in self.bins:\n if b.range_max_value > value:\n b.num += 1\n return\n self.bins[-1].num += 1", - "docstring": "Puts a given value into an appropriate bin." - }, - { - "code": "def generate_route(self, route):\n self.emit('')\n self.emit('route %s (%s, %s, %s)' % (\n route.name,\n self.format_data_type(route.arg_data_type),\n self.format_data_type(route.result_data_type),\n self.format_data_type(route.error_data_type)\n ))\n with self.indent():\n if route.doc is not None:\n self.emit(self.format_string(route.doc))", - "docstring": "Output a route definition." - }, - { - "code": "def local(self):\n assert self.name in CFG[\"container\"][\"images\"].value\n tmp_dir = local.path(str(CFG[\"tmp_dir\"]))\n target_dir = tmp_dir / self.name\n if not target_dir.exists() or not is_valid(self, target_dir):\n unpack(self, target_dir)\n return target_dir", - "docstring": "Finds the current location of a container.\n Also unpacks the project if necessary.\n\n Returns:\n target: The path, where the container lies in the end." - }, - { - "code": "def remove_files():\n logger.info(\"Removing local track files that were not downloaded...\")\n files = [f for f in os.listdir('.') if os.path.isfile(f)]\n for f in files:\n if f not in fileToKeep:\n os.remove(f)", - "docstring": "Removes any pre-existing tracks that were not just downloaded" - }, - { - "code": "def print_params(self):\n print(\"Parameters\")\n print(\"--------------------\")\n for param in PARAM_ORDER:\n print(' {:>11s} = {}'.format(param, self.parameter[param]))", - "docstring": "Print the current best set of parameters" - }, - { - "code": "def modify_classes():\n import copy\n from django.conf import settings\n from django.contrib.admin.sites import site\n from django.utils.importlib import import_module\n from django.utils.module_loading import module_has_submodule\n for app in settings.INSTALLED_APPS:\n mod = import_module(app)\n try:\n before_import_registry = copy.copy(site._registry)\n import_module('%s.class_modifiers' % app)\n except:\n site._registry = before_import_registry\n if module_has_submodule(mod, 'class_modifiers'):\n raise", - "docstring": "Auto-discover INSTALLED_APPS class_modifiers.py modules and fail silently when\n not present. This forces an import on them to modify any classes they\n may want." - }, - { - "code": "def disown(cmd):\n subprocess.Popen(cmd,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)", - "docstring": "Call a system command in the background,\n disown it and hide it's output." - }, - { - "code": "def commit_token_operation(self, token_op, current_block_number):\n if self.disposition != DISPOSITION_RW:\n log.error(\"FATAL: borrowing violation: not a read-write connection\")\n traceback.print_stack()\n os.abort()\n cur = self.db.cursor()\n opcode = token_op.get('opcode', None)\n clean_token_op = self.sanitize_op(token_op)\n try:\n assert token_operation_is_valid(token_op), 'Invalid token operation'\n assert opcode is not None, 'No opcode given'\n assert 'txid' in token_op, 'No txid'\n assert 'vtxindex' in token_op, 'No vtxindex'\n except Exception as e:\n log.exception(e)\n log.error('FATAL: failed to commit token operation')\n self.db.rollback()\n os.abort()\n table = token_operation_get_table(token_op)\n account_payment_info = token_operation_get_account_payment_info(token_op)\n account_credit_info = token_operation_get_account_credit_info(token_op)\n try:\n for key in account_payment_info:\n assert account_payment_info[key] is not None, 'BUG: payment info key {} is None'.format(key)\n for key in account_credit_info:\n assert account_credit_info[key] is not None, 'BUG: credit info key {} is not None'.format(key)\n except Exception as e:\n log.exception(e)\n log.error(\"FATAL: invalid token debit or credit info\")\n os.abort()\n self.log_accept(current_block_number, token_op['vtxindex'], token_op['op'], token_op)\n self.commit_account_debit(token_op, account_payment_info, current_block_number, token_op['vtxindex'], token_op['txid'])\n self.commit_account_credit(token_op, account_credit_info, current_block_number, token_op['vtxindex'], token_op['txid'])\n namedb_history_save(cur, opcode, token_op['address'], None, None, current_block_number, token_op['vtxindex'], token_op['txid'], clean_token_op)\n return clean_token_op", - "docstring": "Commit a token operation that debits one account and credits another\n\n Returns the new canonicalized record (with all compatibility quirks preserved)\n\n DO NOT CALL THIS DIRECTLY" - }, - { - "code": "def _list(self):\n if get_order_args().get(self.__class__.__name__):\n order_column, order_direction = get_order_args().get(\n self.__class__.__name__\n )\n else:\n order_column, order_direction = \"\", \"\"\n page = get_page_args().get(self.__class__.__name__)\n page_size = get_page_size_args().get(self.__class__.__name__)\n get_filter_args(self._filters)\n widgets = self._get_list_widget(\n filters=self._filters,\n order_column=order_column,\n order_direction=order_direction,\n page=page,\n page_size=page_size,\n )\n form = self.search_form.refresh()\n self.update_redirect()\n return self._get_search_widget(form=form, widgets=widgets)", - "docstring": "list function logic, override to implement different logic\n returns list and search widget" - }, - { - "code": "def _record(self, entries):\n outlist = []\n if entries.sp_record is not None:\n outlist.append(entries.sp_record.record())\n if entries.rr_record is not None:\n outlist.append(entries.rr_record.record())\n for nm_record in entries.nm_records:\n outlist.append(nm_record.record())\n if entries.px_record is not None:\n outlist.append(entries.px_record.record(self.rr_version))\n for sl_record in entries.sl_records:\n outlist.append(sl_record.record())\n if entries.tf_record is not None:\n outlist.append(entries.tf_record.record())\n if entries.cl_record is not None:\n outlist.append(entries.cl_record.record())\n if entries.pl_record is not None:\n outlist.append(entries.pl_record.record())\n if entries.re_record is not None:\n outlist.append(entries.re_record.record())\n for es_record in entries.es_records:\n outlist.append(es_record.record())\n if entries.er_record is not None:\n outlist.append(entries.er_record.record())\n if entries.ce_record is not None:\n outlist.append(entries.ce_record.record())\n for pd_record in entries.pd_records:\n outlist.append(pd_record.record())\n if entries.st_record is not None:\n outlist.append(entries.st_record.record())\n if entries.sf_record is not None:\n outlist.append(entries.sf_record.record())\n return b''.join(outlist)", - "docstring": "Return a string representing the Rock Ridge entry.\n\n Parameters:\n entries - The dr_entries or ce_entries to generate a record for.\n Returns:\n A string representing the Rock Ridge entry." - }, - { - "code": "def goto_position(self, position_for_motors, duration, control=None, wait=False):\n for i, (motor_name, position) in enumerate(position_for_motors.iteritems()):\n w = False if i < len(position_for_motors) - 1 else wait\n m = getattr(self, motor_name)\n m.goto_position(position, duration, control, wait=w)", - "docstring": "Moves a subset of the motors to a position within a specific duration.\n\n :param dict position_for_motors: which motors you want to move {motor_name: pos, motor_name: pos,...}\n :param float duration: duration of the move\n :param str control: control type ('dummy', 'minjerk')\n :param bool wait: whether or not to wait for the end of the move\n\n .. note::In case of dynamixel motors, the speed is automatically adjusted so the goal position is reached after the chosen duration." - }, - { - "code": "def getGradient(self,j):\n i = int(self.indicator['term'][j])\n r = int(self.indicator['row'][j])\n c = int(self.indicator['col'][j])\n rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])\n return rv", - "docstring": "get rotated gradient for fixed effect i" - }, - { - "code": "def do_gh(self, arg):\n if self.cmdprefix:\n raise CmdError(\"prefix not allowed\")\n if arg:\n raise CmdError(\"too many arguments\")\n if self.lastEvent:\n self.lastEvent.continueStatus = win32.DBG_EXCEPTION_HANDLED\n return self.do_go(arg)", - "docstring": "gh - go with exception handled" - }, - { - "code": "def ansible_perform_operation(self,\n host_list=None,\n remote_user=None,\n remote_pass=None,\n module=None,\n complex_args=None,\n module_args='',\n environment=None,\n check=False,\n sudo=False,\n sudo_user=None,\n sudo_pass=None,\n forks=20):\n (host_list, remote_user) = \\\n self.validate_host_parameters(host_list, remote_user)\n if (host_list, remote_user) is (None, None):\n return None\n if module is None:\n print \"ANSIBLE Perform operation: No module specified\"\n return None\n runner = ansible.runner.Runner(\n module_name=module,\n host_list=host_list,\n remote_user=remote_user,\n remote_pass=remote_pass,\n module_args=module_args,\n complex_args=complex_args,\n environment=environment,\n check=check,\n become=sudo,\n become_user=sudo_user,\n become_pass=sudo_pass,\n forks=forks)\n results = runner.run()\n results, failed_hosts = self.validate_results(results)\n if results['status'] != 'PASS':\n print \"ANSIBLE: [%s] operation failed [%s] [hosts: %s]\" % \\\n (module, complex_args, failed_hosts)\n return results, failed_hosts", - "docstring": "Perform any ansible operation." - }, - { - "code": "def log_env(self, level, env):\n self.log(level, \"ENVIRONMENT:\")\n for k, v in env.items():\n self.log(level, \" {} = {}\".format(k, pformat(v)))", - "docstring": "dump env into debug logger in readable format" - }, - { - "code": "def retrieve_product(self, product_id):\n response = self.request(E.retrieveProductSslCertRequest(\n E.id(product_id)\n ))\n return response.as_model(SSLProduct)", - "docstring": "Retrieve details on a single product." - }, - { - "code": "def get_socket(self, all_credentials, checkout=False):\n sock_info = self._get_socket_no_auth()\n try:\n sock_info.check_auth(all_credentials)\n yield sock_info\n except:\n self.return_socket(sock_info)\n raise\n else:\n if not checkout:\n self.return_socket(sock_info)", - "docstring": "Get a socket from the pool. Use with a \"with\" statement.\n\n Returns a :class:`SocketInfo` object wrapping a connected\n :class:`socket.socket`.\n\n This method should always be used in a with-statement::\n\n with pool.get_socket(credentials, checkout) as socket_info:\n socket_info.send_message(msg)\n data = socket_info.receive_message(op_code, request_id)\n\n The socket is logged in or out as needed to match ``all_credentials``\n using the correct authentication mechanism for the server's wire\n protocol version.\n\n Can raise ConnectionFailure or OperationFailure.\n\n :Parameters:\n - `all_credentials`: dict, maps auth source to MongoCredential.\n - `checkout` (optional): keep socket checked out." - }, - { - "code": "def send_command(self, command):\n logger.debug(\"Connecting to socket %s\", self.socket_file_path)\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n sock.connect(self.socket_file_path)\n except IOError as e:\n if e.errno == errno.ECONNREFUSED:\n logger.error(\"Connection refused. Is HAProxy running?\")\n return\n else:\n raise\n sock.sendall((command + \"\\n\").encode())\n response = b\"\"\n while True:\n try:\n chunk = sock.recv(SOCKET_BUFFER_SIZE)\n if chunk:\n response += chunk\n else:\n break\n except IOError as e:\n if e.errno not in (errno.EAGAIN, errno.EINTR):\n raise\n sock.close()\n return self.process_command_response(command, response)", - "docstring": "Sends a given command to the HAProxy control socket.\n\n Returns the response from the socket as a string.\n\n If a known error response (e.g. \"Permission denied.\") is given then\n the appropriate exception is raised." - }, - { - "code": "def progress(self, loaded, total, msg=''):\n self.fire('progress', {\n 'loaded': loaded,\n 'total': total,\n 'msg': msg\n })", - "docstring": "Notify on a progress change" - }, - { - "code": "def badRequestMethod(self, environ, start_response):\n response = \"400 Bad Request\\n\\nTo access this PyAMF gateway you \" \\\n \"must use POST requests (%s received)\" % environ['REQUEST_METHOD']\n start_response('400 Bad Request', [\n ('Content-Type', 'text/plain'),\n ('Content-Length', str(len(response))),\n ('Server', gateway.SERVER_NAME),\n ])\n return [response]", - "docstring": "Return HTTP 400 Bad Request." - }, - { - "code": "def find_vcs_root(cls, path):\n if cls.search_parents_for_root():\n valid_dirs = walk_up_dirs(path)\n else:\n valid_dirs = [path]\n for i, current_path in enumerate(valid_dirs):\n if cls.is_valid_root(current_path):\n return current_path, i\n return None", - "docstring": "Try to find a version control root directory of this type for the\n given path.\n\n If successful, returns (vcs_root, levels_up), where vcs_root is the\n path to the version control root directory it found, and levels_up is an\n integer indicating how many parent directories it had to search through\n to find it, where 0 means it was found in the indicated path, 1 means it\n was found in that path's parent, etc. If not sucessful, returns None" - }, - { - "code": "def taropen(cls, name, mode=\"r\", fileobj=None, **kwargs):\n if len(mode) > 1 or mode not in \"raw\":\n raise ValueError(\"mode must be 'r', 'a' or 'w'\")\n return cls(name, mode, fileobj, **kwargs)", - "docstring": "Open uncompressed tar archive name for reading or writing." - }, - { - "code": "def _prepare_graph_terms(self, default_screen):\n columns = self.columns.copy()\n screen = self.screen\n if screen is None:\n screen = default_screen\n columns[SCREEN_NAME] = screen\n return columns", - "docstring": "Helper for to_graph and to_execution_plan." - }, - { - "code": "def set_all_pwm(self, on, off):\n self.i2c.write8(ALL_LED_ON_L, on & 0xFF)\n self.i2c.write8(ALL_LED_ON_H, on >> 8)\n self.i2c.write8(ALL_LED_OFF_L, off & 0xFF)\n self.i2c.write8(ALL_LED_OFF_H, off >> 8)", - "docstring": "Sets all PWM channels." - }, - { - "code": "def get_assign_annotation(node):\n annotation = None\n annotation_node = None\n try:\n annotation_node = node.annotation\n except AttributeError:\n annotation_node = getattr(node, \"type_annotation\", None)\n if annotation_node:\n if isinstance(annotation_node, astroid.nodes.Const):\n annotation = node.value\n else:\n annotation = annotation_node.as_string()\n return annotation", - "docstring": "Get the type annotation of the assignment of the given node.\n\n :param node: The node to get the annotation for.\n :type node: astroid.nodes.Assign or astroid.nodes.AnnAssign\n\n :returns: The type annotation as a string, or None if one does not exist.\n :type: str or None" - }, - { - "code": "async def search_and_download(album, artist, format, size, out_filepath, *, size_tolerance_prct, amazon_tlds, no_lq_sources,\n async_loop):\n source_args = (size, size_tolerance_prct)\n cover_sources = [sources.LastFmCoverSource(*source_args),\n sources.AmazonCdCoverSource(*source_args),\n sources.AmazonDigitalCoverSource(*source_args)]\n for tld in amazon_tlds:\n cover_sources.append(sources.AmazonCdCoverSource(*source_args, tld=tld))\n if not no_lq_sources:\n cover_sources.append(sources.GoogleImagesWebScrapeCoverSource(*source_args))\n search_futures = []\n for cover_source in cover_sources:\n coroutine = cover_source.search(album, artist)\n future = asyncio.ensure_future(coroutine, loop=async_loop)\n search_futures.append(future)\n await asyncio.wait(search_futures, loop=async_loop)\n results = []\n for future in search_futures:\n source_results = future.result()\n results.extend(source_results)\n results = await CoverSourceResult.preProcessForComparison(results, size, size_tolerance_prct)\n results.sort(reverse=True,\n key=functools.cmp_to_key(functools.partial(CoverSourceResult.compare,\n target_size=size,\n size_tolerance_prct=size_tolerance_prct)))\n if not results:\n logging.getLogger(\"Main\").info(\"No results\")\n for result in results:\n try:\n await result.get(format, size, size_tolerance_prct, out_filepath)\n except Exception as e:\n logging.getLogger(\"Main\").warning(\"Download of %s failed: %s %s\" % (result,\n e.__class__.__qualname__,\n e))\n continue\n else:\n return True\n return False", - "docstring": "Search and download a cover, return True if success, False instead." - }, - { - "code": "def update(self):\n for s in self.sensors:\n s.colliding = self.io.get_collision_state(collision_name=s.name)", - "docstring": "Update the state of the collision detectors." - }, - { - "code": "def meta(self):\n if not self._pv.meta_data_property or not self._meta_target:\n return {}\n return getattr(self._meta_target, self._pv.meta_data_property)", - "docstring": "Value of the bound meta-property on the target." - }, - { - "code": "def find_and_read_ebins(hdulist):\n from fermipy import utils\n ebins = None\n if 'ENERGIES' in hdulist:\n hdu = hdulist['ENERGIES']\n ectr = hdu.data.field(hdu.columns[0].name)\n ebins = np.exp(utils.center_to_edge(np.log(ectr)))\n elif 'EBOUNDS' in hdulist:\n hdu = hdulist['EBOUNDS']\n emin = hdu.data.field('E_MIN') / 1E3\n emax = hdu.data.field('E_MAX') / 1E3\n ebins = np.append(emin, emax[-1])\n return ebins", - "docstring": "Reads and returns the energy bin edges.\n\n This works for both the CASE where the energies are in the ENERGIES HDU\n and the case where they are in the EBOUND HDU" - }, - { - "code": "def read_raster(raster_file):\n ds = gdal_Open(raster_file)\n band = ds.GetRasterBand(1)\n data = band.ReadAsArray()\n xsize = band.XSize\n ysize = band.YSize\n nodata_value = band.GetNoDataValue()\n geotrans = ds.GetGeoTransform()\n dttype = band.DataType\n srs = osr_SpatialReference()\n srs.ImportFromWkt(ds.GetProjection())\n if nodata_value is None:\n nodata_value = DEFAULT_NODATA\n band = None\n ds = None\n return Raster(ysize, xsize, data, nodata_value, geotrans, srs, dttype)", - "docstring": "Read raster by GDAL.\n\n Args:\n raster_file: raster file path.\n\n Returns:\n Raster object." - }, - { - "code": "def retrieve_pt(cls, request, service):\n try:\n pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt\n except cls.DoesNotExist:\n raise ProxyError(\n \"INVALID_TICKET\",\n \"No proxy ticket found for this HttpRequest object\"\n )\n else:\n client = get_cas_client(service_url=service, request=request)\n try:\n return client.get_proxy_ticket(pgt)\n except CASError as error:\n raise ProxyError(*error.args)\n except Exception as e:\n raise ProxyError(e)", - "docstring": "`request` should be the current HttpRequest object\n `service` a string representing the service for witch we want to\n retrieve a ticket.\n The function return a Proxy Ticket or raise `ProxyError`" - }, - { - "code": "def shade(renderbuffer, bufferbbox, img3d, bbox):\n if not Bbox.intersects(bufferbbox, bbox):\n return\n spt = max2(bbox.minpt, bufferbbox.minpt)\n ept = min2(bbox.maxpt, bufferbbox.maxpt)\n ZERO3 = Vec(0,0,0)\n istart = max2(spt - bbox.minpt, ZERO3)\n iend = min2(ept - bbox.maxpt, ZERO3) + img3d.shape[:3]\n rbox = Bbox(spt, ept) - bufferbbox.minpt\n if len(img3d.shape) == 3:\n img3d = img3d[ :, :, :, np.newaxis]\n renderbuffer[ rbox.to_slices() ] = img3d[ istart.x:iend.x, istart.y:iend.y, istart.z:iend.z, : ]", - "docstring": "Shade a renderbuffer with a downloaded chunk. \n The buffer will only be painted in the overlapping\n region of the content." - }, - { - "code": "def firstChild(hot_map, index):\n children = hot_map[index][2]\n if children:\n return children[0][1]\n else:\n return hot_map[index][1]", - "docstring": "Return the first child of the node indicated by index." - }, - { - "code": "def unsubscribe(self, tag, match_type=None):\n if tag is None:\n return\n match_func = self._get_match_func(match_type)\n self.pending_tags.remove([tag, match_func])\n old_events = self.pending_events\n self.pending_events = []\n for evt in old_events:\n if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):\n self.pending_events.append(evt)", - "docstring": "Un-subscribe to events matching the passed tag." - }, - { - "code": "def reset(self):\n if not self.request_list.conflict:\n phase = _ResolvePhase(self.request_list.requirements, solver=self)\n self.pr(\"resetting...\")\n self._init()\n self._push_phase(phase)", - "docstring": "Reset the solver, removing any current solve." - }, - { - "code": "def csv(self):\n lines = self._parsecsv(self.raw)\n keys = next(lines)\n for line in lines:\n yield dict(zip(keys, line))", - "docstring": "Parse raw response as csv and return row object list." - }, - { - "code": "def qteInsertKey(self, keysequence: QtmacsKeysequence, macroName: str):\n keyMap = self\n keysequence = keysequence.toQtKeylist()\n for key in keysequence[:-1]:\n if key not in keyMap:\n keyMap[key] = {}\n if not isinstance(keyMap[key], dict):\n keyMap[key] = {}\n keyMap = keyMap[key]\n keyMap[keysequence[-1]] = macroName", - "docstring": "Insert a new key into the key map and associate it with a\n macro.\n\n If the key sequence is already associated with a macro then it\n will be overwritten.\n\n |Args|\n\n * ``keysequence`` (**QtmacsKeysequence**): associate a macro with\n a key sequence in this key map.\n * ``macroName`` (**str**): macro name.\n\n |Returns|\n\n **None**\n\n |Raises|\n\n * **QtmacsArgumentError** if at least one argument has an invalid type." - }, - { - "code": "def fetch_vest_scores(vest_dict,\n ref_aa, somatic_aa, codon_pos,\n default_vest=0.0):\n vest_score_list = []\n for i in range(len(somatic_aa)):\n if codon_pos[i] is not None:\n tmp_score = vest_dict.get(codon_pos[i]+1, {}).get(ref_aa[i], {}).get(somatic_aa[i], default_vest)\n else:\n tmp_score = 0.0\n vest_score_list.append(tmp_score)\n return vest_score_list", - "docstring": "Get VEST scores from pre-computed scores in dictionary.\n\n Note: either all mutations should be missense or non-missense intended\n to have value equal to default.\n\n Parameters\n ----------\n vest_dict : dict\n dictionary containing vest scores across the gene of interest\n ref_aa: list of str\n list of reference amino acids\n somatic_aa: list of str\n somatic mutation aa\n codon_pos: list of int\n position of codon in protein sequence\n default_vest: float, default=0.0\n value to use if VEST score not available for a given mutation\n\n Returns\n -------\n vest_score_list: list\n score results for mutations" - }, - { - "code": "def get_duplicates(self):\n warnings.warn(\"'get_duplicates' is deprecated and will be removed in \"\n \"a future release. You can use \"\n \"idx[idx.duplicated()].unique() instead\",\n FutureWarning, stacklevel=2)\n return self[self.duplicated()].unique()", - "docstring": "Extract duplicated index elements.\n\n .. deprecated:: 0.23.0\n Use idx[idx.duplicated()].unique() instead\n\n Returns a sorted list of index elements which appear more than once in\n the index.\n\n Returns\n -------\n array-like\n List of duplicated indexes.\n\n See Also\n --------\n Index.duplicated : Return boolean array denoting duplicates.\n Index.drop_duplicates : Return Index with duplicates removed.\n\n Examples\n --------\n\n Works on different Index of types.\n\n >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates() # doctest: +SKIP\n [2, 3]\n\n Note that for a DatetimeIndex, it does not return a list but a new\n DatetimeIndex:\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',\n ... '2018-01-03', '2018-01-04', '2018-01-04'],\n ... format='%Y-%m-%d')\n >>> pd.Index(dates).get_duplicates() # doctest: +SKIP\n DatetimeIndex(['2018-01-03', '2018-01-04'],\n dtype='datetime64[ns]', freq=None)\n\n Sorts duplicated elements even when indexes are unordered.\n\n >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates() # doctest: +SKIP\n [2, 3]\n\n Return empty array-like structure when all elements are unique.\n\n >>> pd.Index([1, 2, 3, 4]).get_duplicates() # doctest: +SKIP\n []\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],\n ... format='%Y-%m-%d')\n >>> pd.Index(dates).get_duplicates() # doctest: +SKIP\n DatetimeIndex([], dtype='datetime64[ns]', freq=None)" - }, - { - "code": "def npoints_between(lon1, lat1, depth1, lon2, lat2, depth2, npoints):\n hdist = geodetic_distance(lon1, lat1, lon2, lat2)\n vdist = depth2 - depth1\n rlons, rlats, rdepths = npoints_towards(\n lon1, lat1, depth1, azimuth(lon1, lat1, lon2, lat2),\n hdist, vdist, npoints\n )\n rlons[-1] = lon2\n rlats[-1] = lat2\n rdepths[-1] = depth2\n return rlons, rlats, rdepths", - "docstring": "Find a list of specified number of points between two given ones that are\n equally spaced along the great circle arc connecting given points.\n\n :param float lon1, lat1, depth1:\n Coordinates of a point to start from. The first point in a resulting\n list has these coordinates.\n :param float lon2, lat2, depth2:\n Coordinates of a point to finish at. The last point in a resulting\n list has these coordinates.\n :param npoints:\n Integer number of points to return. First and last points count,\n so if there have to be two intervals, ``npoints`` should be 3.\n :returns:\n Tuple of three 1d numpy arrays: longitudes, latitudes and depths\n of resulting points respectively.\n\n Finds distance between two reference points and calls\n :func:`npoints_towards`." - }, - { - "code": "def GetDatabaseAccount(self, url_connection=None):\n if url_connection is None:\n url_connection = self.url_connection\n initial_headers = dict(self.default_headers)\n headers = base.GetHeaders(self,\n initial_headers,\n 'get',\n '',\n '',\n '',\n {})\n request = request_object._RequestObject('databaseaccount', documents._OperationType.Read, url_connection)\n result, self.last_response_headers = self.__Get('',\n request,\n headers)\n database_account = documents.DatabaseAccount()\n database_account.DatabasesLink = '/dbs/'\n database_account.MediaLink = '/media/'\n if (http_constants.HttpHeaders.MaxMediaStorageUsageInMB in\n self.last_response_headers):\n database_account.MaxMediaStorageUsageInMB = (\n self.last_response_headers[\n http_constants.HttpHeaders.MaxMediaStorageUsageInMB])\n if (http_constants.HttpHeaders.CurrentMediaStorageUsageInMB in\n self.last_response_headers):\n database_account.CurrentMediaStorageUsageInMB = (\n self.last_response_headers[\n http_constants.HttpHeaders.CurrentMediaStorageUsageInMB])\n database_account.ConsistencyPolicy = result.get(constants._Constants.UserConsistencyPolicy)\n if constants._Constants.WritableLocations in result:\n database_account._WritableLocations = result[constants._Constants.WritableLocations]\n if constants._Constants.ReadableLocations in result:\n database_account._ReadableLocations = result[constants._Constants.ReadableLocations]\n if constants._Constants.EnableMultipleWritableLocations in result:\n database_account._EnableMultipleWritableLocations = result[constants._Constants.EnableMultipleWritableLocations]\n self._useMultipleWriteLocations = self.connection_policy.UseMultipleWriteLocations and database_account._EnableMultipleWritableLocations\n return database_account", - "docstring": "Gets database account info.\n\n :return:\n The Database Account.\n :rtype:\n documents.DatabaseAccount" - }, - { - "code": "def django_api(\n self,\n name,\n introduced_at,\n undocumented=False,\n deprecated_at=None,\n title=None,\n **options):\n from acceptable.djangoutil import DjangoAPI\n location = get_callsite_location()\n api = DjangoAPI(\n self,\n name,\n introduced_at,\n options,\n location=location,\n undocumented=undocumented,\n deprecated_at=deprecated_at,\n title=title,\n )\n self.metadata.register_api(self.name, self.group, api)\n return api", - "docstring": "Add a django API handler to the service.\n\n :param name: This is the name of the django url to use.\n\n The 'methods' paramater can be supplied as normal, you can also user\n the @api.handler decorator to link this API to its handler." - }, - { - "code": "def draw(self, X, y, **kwargs):\n nan_col_counts = self.get_nan_col_counts()\n self.ind = np.arange(len(self.features_))\n if y is None:\n self.ax.barh(self.ind - self.width / 2, nan_col_counts, self.width,\n color=self.color, label=None)\n else:\n self.draw_stacked_bar(nan_col_counts)", - "docstring": "Called from the fit method, this method generated a horizontal bar plot.\n\n If y is none, then draws a simple horizontal bar chart.\n If y is not none, then draws a stacked horizontal bar chart for each nan count per\n target values." - }, - { - "code": "def _get_enrollments_list_page(self, params=None):\n req_url = urljoin(self.base_url, self.enrollment_list_url)\n resp = self.requester.get(req_url, params=params)\n resp.raise_for_status()\n resp_json = resp.json()\n results = resp_json['results']\n next_url_str = resp_json.get('next')\n cursor = None\n qstr_cursor = None\n if next_url_str:\n next_url = urlparse(next_url_str)\n qstr = parse_qs(next_url.query)\n qstr_cursor = qstr.get('cursor')\n if qstr_cursor and isinstance(qstr_cursor, list):\n cursor = qstr_cursor[0]\n return results, cursor", - "docstring": "Submit request to retrieve enrollments list.\n\n Args:\n params (dict): Query parameters to use in the request. Valid parameters are:\n * course_id: Filters the result to course enrollments for the course\n corresponding to the given course ID. The value must be URL encoded.\n Optional.\n * username: username: List of comma-separated usernames. Filters the result to the\n course enrollments of the given users. Optional." - }, - { - "code": "def _ReadMemberHeader(self, file_object):\n file_offset = file_object.get_offset()\n member_header = self._ReadStructure(\n file_object, file_offset, self._MEMBER_HEADER_SIZE,\n self._MEMBER_HEADER, 'member header')\n if member_header.signature != self._GZIP_SIGNATURE:\n raise errors.FileFormatError(\n 'Unsupported signature: 0x{0:04x}.'.format(member_header.signature))\n if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE:\n raise errors.FileFormatError(\n 'Unsupported compression method: {0:d}.'.format(\n member_header.compression_method))\n self.modification_time = member_header.modification_time\n self.operating_system = member_header.operating_system\n if member_header.flags & self._FLAG_FEXTRA:\n file_offset = file_object.get_offset()\n extra_field_data_size = self._ReadStructure(\n file_object, file_offset, self._UINT16LE_SIZE,\n self._UINT16LE, 'extra field data size')\n file_object.seek(extra_field_data_size, os.SEEK_CUR)\n if member_header.flags & self._FLAG_FNAME:\n file_offset = file_object.get_offset()\n string_value = self._ReadString(\n file_object, file_offset, self._CSTRING, 'original filename')\n self.original_filename = string_value.rstrip('\\x00')\n if member_header.flags & self._FLAG_FCOMMENT:\n file_offset = file_object.get_offset()\n string_value = self._ReadString(\n file_object, file_offset, self._CSTRING, 'comment')\n self.comment = string_value.rstrip('\\x00')\n if member_header.flags & self._FLAG_FHCRC:\n file_object.read(2)", - "docstring": "Reads a member header.\n\n Args:\n file_object (FileIO): file-like object to read from.\n\n Raises:\n FileFormatError: if the member header cannot be read." - }, - { - "code": "def _getImageSize(filename):\n result = None\n file = open(filename, 'rb')\n if file.read(8) == b'\\x89PNG\\r\\n\\x1a\\n':\n while 1:\n length, = _struct.unpack('>i', file.read(4))\n chunkID = file.read(4)\n if chunkID == '':\n break\n if chunkID == b'IHDR':\n result = _struct.unpack('>ii', file.read(8))\n break\n file.seek(4 + length, 1)\n file.close()\n return result\n file.seek(0)\n if file.read(8) == b'BM':\n file.seek(18, 0)\n result = _struct.unpack(' AtomFeed:\n root = parse_xml(filename).getroot()\n return _parse_atom(root)", - "docstring": "Parse an Atom feed from a local XML file." - }, - { - "code": "def main(handwriting_datasets_file, analyze_features):\n logging.info(\"Start loading data '%s' ...\", handwriting_datasets_file)\n loaded = pickle.load(open(handwriting_datasets_file))\n raw_datasets = loaded['handwriting_datasets']\n logging.info(\"%i datasets loaded.\", len(raw_datasets))\n logging.info(\"Start analyzing...\")\n if analyze_features:\n featurelist = [(features.AspectRatio(), \"aspect_ratio.csv\"),\n (features.ReCurvature(1), \"re_curvature.csv\"),\n (features.Height(), \"height.csv\"),\n (features.Width(), \"width.csv\"),\n (features.Time(), \"time.csv\"),\n (features.Ink(), \"ink.csv\"),\n (features.StrokeCount(), \"stroke-count.csv\")]\n for feat, filename in featurelist:\n logging.info(\"create %s...\", filename)\n analyze_feature(raw_datasets, feat, filename)\n cfg = utils.get_project_configuration()\n if 'data_analyzation_queue' in cfg:\n metrics = dam.get_metrics(cfg['data_analyzation_queue'])\n for metric in metrics:\n logging.info(\"Start metric %s...\", str(metric))\n metric(raw_datasets)\n else:\n logging.info(\"No 'data_analyzation_queue' in ~/.hwrtrc\")", - "docstring": "Start the creation of the wanted metric." - }, - { - "code": "def run(self, fetch_image=True, **kwargs):\n self.create(fetch_image=fetch_image, **kwargs)\n self.start()", - "docstring": "Create the container and start it. Similar to ``docker run``.\n\n :param fetch_image:\n Whether to try pull the image if it's not found. The behaviour here\n is similar to ``docker run`` and this parameter defaults to\n ``True``.\n :param **kwargs: Keyword arguments passed to :meth:`.create`." - }, - { - "code": "def _try_coerce_args(self, values, other):\n values = values.view('i8')\n if isinstance(other, bool):\n raise TypeError\n elif is_null_datetimelike(other):\n other = tslibs.iNaT\n elif isinstance(other, (datetime, np.datetime64, date)):\n other = self._box_func(other)\n if getattr(other, 'tz') is not None:\n raise TypeError(\"cannot coerce a Timestamp with a tz on a \"\n \"naive Block\")\n other = other.asm8.view('i8')\n elif hasattr(other, 'dtype') and is_datetime64_dtype(other):\n other = other.astype('i8', copy=False).view('i8')\n else:\n raise TypeError(other)\n return values, other", - "docstring": "Coerce values and other to dtype 'i8'. NaN and NaT convert to\n the smallest i8, and will correctly round-trip to NaT if converted\n back in _try_coerce_result. values is always ndarray-like, other\n may not be\n\n Parameters\n ----------\n values : ndarray-like\n other : ndarray-like or scalar\n\n Returns\n -------\n base-type values, base-type other" - }, - { - "code": "def delete_role(self, name, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/{mount_point}/roles/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.delete(\n url=api_path,\n )", - "docstring": "Delete an existing role by the given name.\n\n If the role does not exist, a 404 is returned.\n\n Supported methods:\n DELETE: /{mount_point}/roles/{name}. Produces: 204 (empty body)\n\n :param name: the name of the role to delete. This\n is part of the request URL.\n :type name: str | unicode\n :param mount_point: The \"path\" the method/backend was mounted on.\n :type mount_point: str | unicode\n :return: The response of the request.\n :rtype: requests.Response" - }, - { - "code": "async def get_config(self):\n config_facade = client.ModelConfigFacade.from_connection(\n self.connection()\n )\n result = await config_facade.ModelGet()\n config = result.config\n for key, value in config.items():\n config[key] = ConfigValue.from_json(value)\n return config", - "docstring": "Return the configuration settings for this model.\n\n :returns: A ``dict`` mapping keys to `ConfigValue` instances,\n which have `source` and `value` attributes." - }, - { - "code": "def from_dict(cls, ctx):\n 'Instance a new structure from a Python native type.'\n ctx = Context(ctx)\n s = cls()\n ContextFlags = ctx['ContextFlags']\n s.ContextFlags = ContextFlags\n for key in cls._others:\n if key != 'VectorRegister':\n setattr(s, key, ctx[key])\n else:\n w = ctx[key]\n v = (M128A * len(w))()\n i = 0\n for x in w:\n y = M128A()\n y.High = x >> 64\n y.Low = x - (x >> 64)\n v[i] = y\n i += 1\n setattr(s, key, v)\n if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:\n for key in cls._control:\n setattr(s, key, ctx[key])\n if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:\n for key in cls._integer:\n setattr(s, key, ctx[key])\n if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:\n for key in cls._segments:\n setattr(s, key, ctx[key])\n if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:\n for key in cls._debug:\n setattr(s, key, ctx[key])\n if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:\n xmm = s.FltSave.xmm\n for key in cls._mmx:\n y = M128A()\n y.High = x >> 64\n y.Low = x - (x >> 64)\n setattr(xmm, key, y)\n return s", - "docstring": "Instance a new structure from a Python native type." - }, - { - "code": "def load_json_from_file(file_path):\n try:\n with open(file_path) as f:\n json_data = json.load(f)\n except ValueError as e:\n raise ValueError('Given file {} is not a valid JSON file: {}'.format(file_path, e))\n else:\n return json_data", - "docstring": "Load schema from a JSON file" - }, - { - "code": "def modpath_pkg_resources(module, entry_point):\n result = []\n try:\n path = resource_filename_mod_entry_point(module.__name__, entry_point)\n except ImportError:\n logger.warning(\"module '%s' could not be imported\", module.__name__)\n except Exception:\n logger.warning(\"%r does not appear to be a valid module\", module)\n else:\n if path:\n result.append(path)\n return result", - "docstring": "Goes through pkg_resources for compliance with various PEPs.\n\n This one accepts a module as argument." - }, - { - "code": "def convert_npdist(self, node):\n with context(self.fname, node):\n npdist = []\n for np in node.nodalPlaneDist:\n prob, strike, dip, rake = (\n np['probability'], np['strike'], np['dip'], np['rake'])\n npdist.append((prob, geo.NodalPlane(strike, dip, rake)))\n if not self.spinning_floating:\n npdist = [(1, npdist[0][1])]\n return pmf.PMF(npdist)", - "docstring": "Convert the given node into a Nodal Plane Distribution.\n\n :param node: a nodalPlaneDist node\n :returns: a :class:`openquake.hazardlib.geo.NodalPlane` instance" - }, - { - "code": "def _defines(prefix, defs, suffix, env, c=_concat_ixes):\n return c(prefix, env.subst_path(processDefines(defs)), suffix, env)", - "docstring": "A wrapper around _concat_ixes that turns a list or string\n into a list of C preprocessor command-line definitions." - }, - { - "code": "def to_vcards(self):\n self._update()\n return [self._to_vcard(self._book[entry]) for entry in self._book.sections()]", - "docstring": "Return a list of vCards" - }, - { - "code": "def copy(self):\n df = super(Ensemble,self).copy()\n return type(self).from_dataframe(df=df)", - "docstring": "make a deep copy of self\n\n Returns\n -------\n Ensemble : Ensemble" - }, - { - "code": "def _print(self, force_flush=False):\n self._stream_flush()\n next_perc = self._calc_percent()\n if self.update_interval:\n do_update = time.time() - self.last_time >= self.update_interval\n elif force_flush:\n do_update = True\n else:\n do_update = next_perc > self.last_progress\n if do_update and self.active:\n self.last_progress = next_perc\n self._cache_percent_indicator(self.last_progress)\n if self.track:\n self._cached_output += ' Time elapsed: ' + \\\n self._get_time(self._elapsed())\n self._cache_eta()\n if self.item_id:\n self._cache_item_id()\n self._stream_out('\\r%s' % self._cached_output)\n self._stream_flush()\n self._cached_output = ''", - "docstring": "Prints formatted percentage and tracked time to the screen." - }, - { - "code": "def uninstall(**kwargs):\n force = kwargs.get('force')\n restore_legacy = kwargs.get('restore_legacy')\n colorama.init(strip=kwargs.get('no_color'))\n git_dir = current_git_dir()\n if git_dir is None:\n output(NOT_GIT_REPO_MSG)\n exit(1)\n hook_path = os.path.join(git_dir, 'hooks', 'pre-commit')\n if not os.path.isfile(hook_path):\n output(NO_HOOK_INSTALLED_MSG)\n exit(0)\n hook_hash = identify_hook(hook_path)\n if hook_hash:\n if not force:\n if not click.confirm(CONFIRM_UNINSTALL_HOOK_MSG, default=False):\n output(UNINSTALL_ABORTED_MSG)\n exit(1)\n else:\n output(CURRENT_HOOK_NOT_THERAPIST_MSG)\n exit(1)\n legacy_hook_path = os.path.join(git_dir, 'hooks', 'pre-commit.legacy')\n if os.path.isfile(legacy_hook_path):\n if not force and not restore_legacy:\n output(LEGACY_HOOK_EXISTS_MSG)\n restore_legacy = click.confirm(CONFIRM_RESTORE_LEGACY_HOOK_MSG, default=True)\n if restore_legacy:\n output(COPYING_LEGACY_HOOK_MSG, end='')\n shutil.copy2(legacy_hook_path, hook_path)\n os.remove(legacy_hook_path)\n output(DONE_COPYING_LEGACY_HOOK_MSG)\n exit(0)\n else:\n if force or click.confirm('Would you like to remove the legacy hook?', default=False):\n output(REMOVING_LEGACY_HOOK_MSG, end='')\n os.remove(legacy_hook_path)\n output(DONE_REMOVING_LEGACY_HOOK_MSG)\n output(UNINSTALLING_HOOK_MSG, end='')\n os.remove(hook_path)\n output(DONE_UNINSTALLING_HOOK_MSG)", - "docstring": "Uninstall the current pre-commit hook." - }, - { - "code": "def get_nbt(self,x,z):\n rx,cx = divmod(x,32)\n rz,cz = divmod(z,32)\n if (rx,rz) not in self.regions and (rx,rz) not in self.regionfiles:\n raise InconceivedChunk(\"Chunk %s,%s is not present in world\" % (x,z))\n nbt = self.get_region(rx,rz).get_nbt(cx,cz)\n assert nbt != None\n return nbt", - "docstring": "Return a NBT specified by the chunk coordinates x,z. Raise InconceivedChunk\n if the NBT file is not yet generated. To get a Chunk object, use get_chunk." - }, - { - "code": "def stream(self, f, blank_solutions=False, include_unspents=False, include_witness_data=True):\n include_witnesses = include_witness_data and self.has_witness_data()\n stream_struct(\"L\", f, self.version)\n if include_witnesses:\n f.write(b'\\0\\1')\n stream_struct(\"I\", f, len(self.txs_in))\n for t in self.txs_in:\n t.stream(f, blank_solutions=blank_solutions)\n stream_struct(\"I\", f, len(self.txs_out))\n for t in self.txs_out:\n t.stream(f)\n if include_witnesses:\n for tx_in in self.txs_in:\n witness = tx_in.witness\n stream_struct(\"I\", f, len(witness))\n for w in witness:\n stream_satoshi_string(f, w)\n stream_struct(\"L\", f, self.lock_time)\n if include_unspents and not self.missing_unspents():\n self.stream_unspents(f)", - "docstring": "Stream a Bitcoin transaction Tx to the file-like object f.\n\n :param f: writable file-like object to stream binary data of transaction\n :param blank_solutions: (optional) clear out the solutions scripts, effectively \"unsigning\" the\n transaction before writing it. Defaults to False\n :param include_unspents: (optional) stread out the Spendable objects after streaming the transaction.\n This is a pycoin-specific extension. Defaults to False.\n :param include_witness_data: (optional) stream segwit transactions including the witness data if the\n transaction has any witness data. Defaults to True." - }, - { - "code": "def set_return_val(self, state, val, is_fp=None, size=None, stack_base=None):\n ty = self.func_ty.returnty if self.func_ty is not None else None\n try:\n betterval = self._standardize_value(val, ty, state, None)\n except AttributeError:\n raise ValueError(\"Can't fit value %s into a return value\" % repr(val))\n if self.ret_val is not None:\n loc = self.ret_val\n elif is_fp is not None:\n loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL\n elif ty is not None:\n loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL\n else:\n loc = self.FP_RETURN_VAL if self.is_fp_value(val) else self.RETURN_VAL\n if loc is None:\n raise NotImplementedError(\"This SimCC doesn't know how to store this value - should be implemented\")\n loc.set_value(state, betterval, endness='Iend_BE', stack_base=stack_base)", - "docstring": "Set the return value into the given state" - }, - { - "code": "def _extrapolation(self, extrapolate):\n modes = ['extrapolate',\n 'raise',\n 'const',\n 'border']\n if extrapolate not in modes:\n msg = 'invalid extrapolation mode {}'.format(extrapolate)\n raise ValueError(msg)\n if extrapolate == 'raise':\n self.bounds_error = True\n self.extrapolate = False\n else:\n self.extrapolate = True\n self.bounds_error = False\n self.extrapolate_mode = extrapolate", - "docstring": "Check permited values of extrapolation." - }, - { - "code": "def matchToString(aaMatch, read1, read2, indent='', offsets=None):\n match = aaMatch['match']\n matchCount = match['matchCount']\n gapMismatchCount = match['gapMismatchCount']\n gapGapMismatchCount = match['gapGapMismatchCount']\n nonGapMismatchCount = match['nonGapMismatchCount']\n if offsets:\n len1 = len2 = len(offsets)\n else:\n len1, len2 = map(len, (read1, read2))\n result = []\n append = result.append\n append(countPrint('%sMatches' % indent, matchCount, len1, len2))\n mismatchCount = (gapMismatchCount + gapGapMismatchCount +\n nonGapMismatchCount)\n append(countPrint('%sMismatches' % indent, mismatchCount, len1, len2))\n append(countPrint('%s Not involving gaps (i.e., conflicts)' % (indent),\n nonGapMismatchCount, len1, len2))\n append(countPrint('%s Involving a gap in one sequence' % indent,\n gapMismatchCount, len1, len2))\n append(countPrint('%s Involving a gap in both sequences' % indent,\n gapGapMismatchCount, len1, len2))\n for read, key in zip((read1, read2), ('read1', 'read2')):\n append('%s Id: %s' % (indent, read.id))\n length = len(read)\n append('%s Length: %d' % (indent, length))\n gapCount = len(aaMatch[key]['gapOffsets'])\n append(countPrint('%s Gaps' % indent, gapCount, length))\n if gapCount:\n append(\n '%s Gap locations (1-based): %s' %\n (indent,\n ', '.join(map(lambda offset: str(offset + 1),\n sorted(aaMatch[key]['gapOffsets'])))))\n extraCount = aaMatch[key]['extraCount']\n if extraCount:\n append(countPrint('%s Extra nucleotides at end' % indent,\n extraCount, length))\n return '\\n'.join(result)", - "docstring": "Format amino acid sequence match as a string.\n\n @param aaMatch: A C{dict} returned by C{compareAaReads}.\n @param read1: A C{Read} instance or an instance of one of its subclasses.\n @param read2: A C{Read} instance or an instance of one of its subclasses.\n @param indent: A C{str} to indent all returned lines with.\n @param offsets: If not C{None}, a C{set} of offsets of interest that were\n only considered when making C{match}.\n @return: A C{str} describing the match." - }, - { - "code": "def fetch(self, method, url, data=None, expected_status_code=None):\n kwargs = self.prepare_request(method, url, data)\n log.debug(json.dumps(kwargs))\n response = getattr(requests, method.lower())(url, **kwargs)\n log.debug(json.dumps(response.content))\n if response.status_code >= 400:\n response.raise_for_status()\n if (expected_status_code\n and response.status_code != expected_status_code):\n raise NotExpectedStatusCode(self._get_error_reason(response))\n return response", - "docstring": "Prepare the headers, encode data, call API and provide\n data it returns" - }, - { - "code": "def dev_moments(self):\n return numpy.sum(numpy.abs(self.moments-self.ma.moments))", - "docstring": "Sum of the absolute deviations between the central moments of the\n instantaneous unit hydrograph and the ARMA approximation." - }, - { - "code": "def _format_playlist_line(self, lineNum, pad, station):\n line = \"{0}. {1}\".format(str(lineNum + self.startPos + 1).rjust(pad), station[0])\n f_data = ' [{0}, {1}]'.format(station[2], station[1])\n if version_info < (3, 0):\n if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX -2:\n f_data = ' [{0}]'.format(station[1])\n if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 2:\n while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) > self.bodyMaxX - 3:\n f_data = f_data[:-1]\n f_data += ']'\n if len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:\n while len(line.decode('utf-8', 'replace')) + len(f_data.decode('utf-8', 'replace')) < self.maxX - 2:\n line += ' '\n else:\n if len(line) + len(f_data) > self.bodyMaxX -2:\n f_data = ' [{0}]'.format(station[1])\n if len(line) + len(f_data) > self.bodyMaxX - 2:\n while len(line) + len(f_data) > self.bodyMaxX - 3:\n f_data = f_data[:-1]\n f_data += ']'\n if len(line) + len(f_data) < self.maxX - 2:\n while len(line) + len(f_data) < self.maxX - 2:\n line += ' '\n line += f_data\n return line", - "docstring": "format playlist line so that if fills self.maxX" - }, - { - "code": "def _generate_default_grp_constraints(roles, network_constraints):\n default_delay = network_constraints.get('default_delay')\n default_rate = network_constraints.get('default_rate')\n default_loss = network_constraints.get('default_loss', 0)\n except_groups = network_constraints.get('except', [])\n grps = network_constraints.get('groups', roles.keys())\n grps = [expand_groups(g) for g in grps]\n grps = [x for expanded_group in grps for x in expanded_group]\n return [{'src': grp1,\n 'dst': grp2,\n 'delay': default_delay,\n 'rate': default_rate,\n 'loss': default_loss}\n for grp1 in grps for grp2 in grps\n if ((grp1 != grp2\n or _src_equals_dst_in_constraints(network_constraints, grp1))\n and grp1 not in except_groups and grp2 not in except_groups)]", - "docstring": "Generate default symetric grp constraints." - }, - { - "code": "def ektnam(n, lenout=_default_len_out):\n n = ctypes.c_int(n)\n lenout = ctypes.c_int(lenout)\n table = stypes.stringToCharP(lenout)\n libspice.ektnam_c(n, lenout, table)\n return stypes.toPythonString(table)", - "docstring": "Return the name of a specified, loaded table.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ektnam_c.html\n\n :param n: Index of table.\n :type n: int\n :param lenout: Maximum table name length.\n :type lenout: int\n :return: Name of table.\n :rtype: str" - }, - { - "code": "def _forwardrefload(l: Loader, value: Any, type_: type) -> Any:\n if l.frefs is None:\n raise TypedloadException('ForwardRef resolving is disabled for the loader', value=value, type_=type_)\n tname = type_.__forward_arg__\n t = l.frefs.get(tname)\n if t is None:\n raise TypedloadValueError(\n \"ForwardRef '%s' unknown\" % tname,\n value=value,\n type_=type_\n )\n return l.load(value, t, annotation=Annotation(AnnotationType.FORWARDREF, tname))", - "docstring": "This resolves a ForwardRef.\n\n It just looks up the type in the dictionary of known types\n and loads the value using that." - }, - { - "code": "def set_level(self, level):\n for handler in self.__coloredlogs_handlers:\n handler.setLevel(level=level)\n self.logger.setLevel(level=level)", - "docstring": "Set the logging level of this logger.\n\n :param level: must be an int or a str." - }, - { - "code": "def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None):\n client = _get_client(region, key, keyid, profile)\n r = {}\n try:\n client.delete_pipeline(pipelineId=pipeline_id)\n r['result'] = True\n except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:\n r['error'] = six.text_type(e)\n return r", - "docstring": "Delete a pipeline, its pipeline definition, and its run history. This function is idempotent.\n\n CLI example:\n\n .. code-block:: bash\n\n salt myminion boto_datapipeline.delete_pipeline my_pipeline_id" - }, - { - "code": "def confusion_matrix(targets, predictions):\n r\n _supervised_evaluation_error_checking(targets, predictions)\n _check_same_type_not_float(targets, predictions)\n return _turicreate.extensions._supervised_streaming_evaluator(targets,\n predictions, \"confusion_matrix_no_map\", {})", - "docstring": "r\"\"\"\n Compute the confusion matrix for classifier predictions.\n\n Parameters\n ----------\n targets : SArray\n Ground truth class labels (cannot be of type float).\n\n predictions : SArray\n The prediction that corresponds to each target value.\n This vector must have the same length as ``targets``. The predictions\n SArray cannot be of type float.\n\n Returns\n -------\n out : SFrame\n An SFrame containing counts for 'target_label', 'predicted_label' and\n 'count' corresponding to each pair of true and predicted labels.\n\n See Also\n --------\n accuracy\n\n Examples\n --------\n >>> targets = turicreate.SArray([0, 1, 1, 0])\n >>> predictions = turicreate.SArray([1, 0, 1, 0])\n\n >>> turicreate.evaluation.confusion_matrix(targets, predictions)" - }, - { - "code": "def get_assets_by_parent_genus_type(self, asset_genus_type=None):\n return AssetList(self._provider_session.get_assets_by_parent_genus_type(asset_genus_type),\n self._config_map)", - "docstring": "Gets an ``AssetList`` corresponding to the given asset genus ``Type``\n and include any additional assets with genus types derived from the specified\n ``Type``.\n\n In plenary mode, the returned list contains all known assets or\n an error results. Otherwise, the returned list may contain only\n those assets that are accessible through this session.\n\n arg: asset_genus_type (osid.type.Type): an asset genus type\n return: (osid.repository.AssetList) - the returned ``Asset\n list``\n raise: NullArgument - ``asset_genus_type`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def on_delete(resc, req, resp, rid):\n signals.pre_req.send(resc.model)\n signals.pre_req_delete.send(resc.model)\n model = find(resc.model, rid)\n goldman.sess.store.delete(model)\n resp.status = falcon.HTTP_204\n signals.post_req.send(resc.model)\n signals.post_req_delete.send(resc.model)", - "docstring": "Delete the single item\n\n Upon a successful deletion an empty bodied 204\n is returned." - }, - { - "code": "def bakery_client_for_controller(self, controller_name):\n bakery_client = self.bakery_client\n if bakery_client:\n bakery_client = copy.copy(bakery_client)\n else:\n bakery_client = httpbakery.Client()\n bakery_client.cookies = self.jujudata.cookies_for_controller(\n controller_name)\n return bakery_client", - "docstring": "Make a copy of the bakery client with a the appropriate controller's\n cookiejar in it." - }, - { - "code": "def CacheObject(self, identifier, vfs_object):\n if identifier in self._values:\n raise KeyError('Object already cached for identifier: {0:s}'.format(\n identifier))\n if len(self._values) == self._maximum_number_of_cached_values:\n raise errors.CacheFullError('Maximum number of cached values reached.')\n self._values[identifier] = ObjectsCacheValue(vfs_object)", - "docstring": "Caches a VFS object.\n\n This method ignores the cache value reference count.\n\n Args:\n identifier (str): VFS object identifier.\n vfs_object (object): VFS object to cache.\n\n Raises:\n CacheFullError: if he maximum number of cached values is reached.\n KeyError: if the VFS object already is cached." - }, - { - "code": "def _sentence_context(match, language='latin', case_insensitive=True):\n language_punct = {'greek': r'\\.|;',\n 'latin': r'\\.|\\?|!'}\n assert language in language_punct.keys(), \\\n 'Available punctuation schemes: {}'.format(language_punct.keys())\n start = match.start()\n end = match.end()\n window = 1000\n snippet_left = match.string[start - window:start + 1]\n snippet_right = match.string[end:end + window]\n re_match = match.string[match.start():match.end()]\n comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1)\n left_punct = []\n for punct in comp_sent_boundary.finditer(snippet_left):\n end = punct.end()\n left_punct.append(end)\n try:\n last_period = left_punct.pop() + 1\n except IndexError:\n last_period = 0\n right_punct = []\n for punct in comp_sent_boundary.finditer(snippet_right):\n end = punct.end()\n right_punct.append(end)\n try:\n first_period = right_punct.pop(0)\n except IndexError:\n first_period = 0\n sentence = snippet_left[last_period:-1] + '*' + re_match + '*' + snippet_right[0:first_period]\n return sentence", - "docstring": "Take one incoming regex match object and return the sentence in which\n the match occurs.\n\n :rtype : str\n :param match: regex.match\n :param language: str" - }, - { - "code": "def get_list(self, key, fallback=None, split=\",\"):\n fallback = fallback or []\n raw = self.get(key, None)\n if raw:\n return [value.strip() for value in raw.split(split)]\n return fallback", - "docstring": "Retrieve a value in list form.\n\n The interpolated value will be split on some key (by default, ',') and\n the resulting list will be returned.\n\n Arguments:\n key - the key to return\n fallback - The result to return if key isn't in the component. By\n default, this will be an empty list.\n split - The key to split the value on. By default, a comma (,)." - }, - { - "code": "def get_orm_classes_by_table_name_from_base(base: Type) -> Dict[str, Type]:\n return {cls.__tablename__: cls for cls in gen_orm_classes_from_base(base)}", - "docstring": "Given an SQLAlchemy ORM base class, returns a dictionary whose keys are\n table names and whose values are ORM classes.\n\n If you begin with the proper :class`Base` class, then this should give all\n tables and ORM classes in use." - }, - { - "code": "def _getFromDate(l, date):\r\n try:\r\n date = _toDate(date)\r\n i = _insertDateIndex(date, l) - 1\r\n if i == -1:\r\n return l[0]\r\n return l[i]\r\n except (ValueError, TypeError):\r\n return l[0]", - "docstring": "returns the index of given or best fitting date" - }, - { - "code": "def points_are_in_a_straight_line( points, tolerance=1e-7 ):\n a = points[0]\n b = points[1]\n for c in points[2:]:\n if area_of_a_triangle_in_cartesian_space( a, b, c ) > tolerance:\n return False\n return True", - "docstring": "Check whether a set of points fall on a straight line.\n Calculates the areas of triangles formed by triplets of the points.\n Returns False is any of these areas are larger than the tolerance.\n\n Args:\n points (list(np.array)): list of Cartesian coordinates for each point.\n tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.\n\n Returns:\n (bool): True if all points fall on a straight line (within the allowed tolerance)." - }, - { - "code": "def list_queue(self, embed_last_unused_offers=False):\n if embed_last_unused_offers:\n params = {'embed': 'lastUnusedOffers'}\n else:\n params = {}\n response = self._do_request('GET', '/v2/queue', params=params)\n return self._parse_response(response, MarathonQueueItem, is_list=True, resource_name='queue')", - "docstring": "List all the tasks queued up or waiting to be scheduled.\n\n :returns: list of queue items\n :rtype: list[:class:`marathon.models.queue.MarathonQueueItem`]" - }, - { - "code": "def set_starting_ratio(self, ratio):\n from samplerate.lowlevel import src_set_ratio\n if self._state is None:\n self._create()\n src_set_ratio(self._state, ratio)\n self.ratio = ratio", - "docstring": "Set the starting conversion ratio for the next `read` call." - }, - { - "code": "def savetxt(\n fname, arrays, fmt=\"%.18e\", delimiter=\"\\t\", header=\"\", footer=\"\", comments=\"\n):\n r\n if not isinstance(arrays, list):\n arrays = [arrays]\n units = []\n for array in arrays:\n if hasattr(array, \"units\"):\n units.append(str(array.units))\n else:\n units.append(\"dimensionless\")\n if header != \"\" and not header.endswith(\"\\n\"):\n header += \"\\n\"\n header += \" Units\\n \" + \"\\t\".join(units)\n np.savetxt(\n fname,\n np.transpose(arrays),\n header=header,\n fmt=fmt,\n delimiter=delimiter,\n footer=footer,\n newline=\"\\n\",\n comments=comments,\n )", - "docstring": "r\"\"\"\n Write unyt_arrays with unit information to a text file.\n\n Parameters\n ----------\n fname : str\n The file to write the unyt_arrays to.\n arrays : list of unyt_arrays or single unyt_array\n The array(s) to write to the file.\n fmt : str or sequence of strs, optional\n A single format (%10.5f), or a sequence of formats.\n delimiter : str, optional\n String or character separating columns.\n header : str, optional\n String that will be written at the beginning of the file, before the\n unit header.\n footer : str, optional\n String that will be written at the end of the file.\n comments : str, optional\n String that will be prepended to the ``header`` and ``footer`` strings,\n to mark them as comments. Default: '# ', as expected by e.g.\n ``unyt.loadtxt``.\n\n Examples\n --------\n >>> import unyt as u\n >>> a = [1, 2, 3]*u.cm\n >>> b = [8, 10, 12]*u.cm/u.s\n >>> c = [2, 85, 9]*u.g\n >>> savetxt(\"sphere.dat\", [a,b,c], header='My sphere stuff',\n ... delimiter=\"\\t\") # doctest: +SKIP" - }, - { - "code": "def removeRows(self, position, rows, parent = QtCore.QModelIndex()):\n self.beginRemoveRows(parent, position, position + rows - 1)\n for i in range(rows):\n self.model.removeRow(position)\n self.endRemoveRows()\n if self.rowCount() == 0:\n self.emptied.emit(True)\n return True", - "docstring": "Removes parameters from the model. Emits and emptied True signal, if there are no parameters left.\n\n :param position: row location of parameters to remove\n :type position: int\n :param rows: number of parameters to remove\n :type rows: int\n :param parent: Required by QAbstractItemModel, can be safely ignored" - }, - { - "code": "def upload_directory(self, remote_path, local_path, progress=None):\n urn = Urn(remote_path, directory=True)\n if not urn.is_dir():\n raise OptionNotValid(name='remote_path', value=remote_path)\n if not os.path.isdir(local_path):\n raise OptionNotValid(name='local_path', value=local_path)\n if not os.path.exists(local_path):\n raise LocalResourceNotFound(local_path)\n if self.check(urn.path()):\n self.clean(urn.path())\n self.mkdir(remote_path)\n for resource_name in listdir(local_path):\n _remote_path = f'{urn.path()}{resource_name}'\n _local_path = os.path.join(local_path, resource_name)\n self.upload(local_path=_local_path, remote_path=_remote_path, progress=progress)", - "docstring": "Uploads directory to remote path on WebDAV server.\n In case directory is exist on remote server it will delete it and then upload directory with nested files and\n directories.\n\n :param remote_path: the path to directory for uploading on WebDAV server.\n :param local_path: the path to local directory for uploading.\n :param progress: Progress function. Not supported now." - }, - { - "code": "def line(ax, p1, p2, permutation=None, **kwargs):\n pp1 = project_point(p1, permutation=permutation)\n pp2 = project_point(p2, permutation=permutation)\n ax.add_line(Line2D((pp1[0], pp2[0]), (pp1[1], pp2[1]), **kwargs))", - "docstring": "Draws a line on `ax` from p1 to p2.\n\n Parameters\n ----------\n ax: Matplotlib AxesSubplot, None\n The subplot to draw on.\n p1: 2-tuple\n The (x,y) starting coordinates\n p2: 2-tuple\n The (x,y) ending coordinates\n kwargs:\n Any kwargs to pass through to Matplotlib." - }, - { - "code": "def call_plugins(self, step):\n for plugin in self.plugins:\n try:\n getattr(plugin, step)()\n except AttributeError:\n self.logger.debug(\"{} doesn't exist on plugin {}\".format(step, plugin))\n except TypeError:\n self.logger.debug(\"{} on plugin {} is not callable\".format(step, plugin))", - "docstring": "For each plugins, check if a \"step\" method exist on it, and call it\n\n Args:\n step (str): The method to search and call on each plugin" - }, - { - "code": "def sysinfo2magic(version_info=sys.version_info):\n vers_str = '.'.join([str(v) for v in version_info[0:3]])\n if version_info[3] != 'final':\n vers_str += ''.join([str(v) for v in version_info[3:]])\n if IS_PYPY:\n vers_str += 'pypy'\n else:\n try:\n import platform\n platform = platform.python_implementation()\n if platform in ('Jython', 'Pyston'):\n vers_str += platform\n pass\n except ImportError:\n pass\n return magics[vers_str]", - "docstring": "Convert a list sys.versions_info compatible list into a 'canonic'\n floating-point number which that can then be used to look up a\n magic number. Note that this can raise an exception." - }, - { - "code": "def load_project(self, filename, overwrite=False):\n r\n filename = self._parse_filename(filename=filename, ext='pnm')\n temp = {}\n with open(filename, 'rb') as f:\n d = pickle.load(f)\n if type(d) is dict:\n for name in d.keys():\n if isinstance(d[name], list):\n temp[name] = d[name]\n else:\n warnings.warn('File contents must be a dictionary, ' +\n 'of lists, or a single list')\n else:\n if isinstance(d, list):\n temp[filename] = d\n else:\n warnings.warn('File contents must be a dictionary, ' +\n 'of lists, or a single list')\n conflicts = set(temp.keys()).intersection(set(self.keys()))\n for name in list(temp.keys()):\n if name in conflicts:\n new_name = self._gen_name()\n warnings.warn('A project named ' + name + ' already exists, ' +\n 'renaming to ' + new_name)\n self[new_name] = temp[name]\n else:\n self[name] = temp[name]", - "docstring": "r\"\"\"\n Loads a Project from the specified 'pnm' file\n\n The loaded project is added to the Workspace . This will *not* delete\n any existing Projects in the Workspace and will rename any Projects\n being loaded if necessary.\n\n Parameters\n ----------\n filename : string or path object\n The name of the file to open. See Notes for more information.\n\n See Also\n --------\n load_workspace\n\n Notes\n -----\n The filename can be a string such as 'saved_file.pnm'. The string can\n include absolute path such as 'C:\\networks\\saved_file.pnm', or can\n be a relative path such as '..\\..\\saved_file.pnm', which will look\n 2 directories above the current working directory. Can also be a\n path object object such as that produced by ``pathlib`` or\n ``os.path`` in the Python standard library." - }, - { - "code": "def pkgdb(opts):\n return LazyLoader(\n _module_dirs(\n opts,\n 'pkgdb',\n base_path=os.path.join(SALT_BASE_PATH, 'spm')\n ),\n opts,\n tag='pkgdb'\n )", - "docstring": "Return modules for SPM's package database\n\n .. versionadded:: 2015.8.0" - }, - { - "code": "def sentence_starts(self):\n if not self.is_tagged(SENTENCES):\n self.tokenize_sentences()\n return self.starts(SENTENCES)", - "docstring": "The list of start positions representing ``sentences`` layer elements." - }, - { - "code": "def force_process_ordered(self):\n for instance_id, messages in self.replicas.take_ordereds_out_of_turn():\n num_processed = 0\n for message in messages:\n self.try_processing_ordered(message)\n num_processed += 1\n logger.info('{} processed {} Ordered batches for instance {} '\n 'before starting catch up'\n .format(self, num_processed, instance_id))", - "docstring": "Take any messages from replica that have been ordered and process\n them, this should be done rarely, like before catchup starts\n so a more current LedgerStatus can be sent.\n can be called either\n 1. when node is participating, this happens just before catchup starts\n so the node can have the latest ledger status or\n 2. when node is not participating but a round of catchup is about to be\n started, here is forces all the replica ordered messages to be appended\n to the stashed ordered requests and the stashed ordered requests are\n processed with appropriate checks" - }, - { - "code": "def write_text(filename, tracklisting):\n print(\"Saving text file.\")\n try:\n write_listing_to_textfile(filename + '.txt', tracklisting)\n except IOError:\n print(\"Cannot write text file to path: {}\".format(filename))\n print(\"Printing tracklisting here instead.\")\n print(tracklisting.encode(sys.stdout.encoding, errors='ignore'))", - "docstring": "Handle writing tracklisting to text." - }, - { - "code": "def _teardown(self):\n \"Handles the restoration of any potential global state set.\"\n self.example.after(self.context)\n if self.is_root_runner:\n run.after_all.execute(self.context)\n self.has_ran = True", - "docstring": "Handles the restoration of any potential global state set." - }, - { - "code": "def create(self, iterations: int = 1) -> List[JSON]:\n return [self.schema() for _ in range(iterations)]", - "docstring": "Return filled schema.\n\n Create a list of a filled schemas with elements in\n an amount of **iterations**.\n\n :param iterations: Amount of iterations.\n :return: List of willed schemas." - }, - { - "code": "def autosave(self):\n if self.button_autosave.is_checked():\n self.save_file(_os.path.join(self._autosave_directory, \"%04d \" % (self.number_file.get_value()) + self._label_path.get_text()))\n self.number_file.increment()", - "docstring": "Autosaves the currently stored data, but only if autosave is checked!" - }, - { - "code": "def delete_before_cursor(self, count=1):\n assert count >= 0\n deleted = ''\n if self.cursor_position > 0:\n deleted = self.text[self.cursor_position - count:self.cursor_position]\n new_text = self.text[:self.cursor_position - count] + self.text[self.cursor_position:]\n new_cursor_position = self.cursor_position - len(deleted)\n self.document = Document(new_text, new_cursor_position)\n return deleted", - "docstring": "Delete specified number of characters before cursor and return the\n deleted text." - }, - { - "code": "def get_crypt_key(key_path):\n key_path = os.path.expanduser(key_path)\n if os.path.exists(key_path):\n with open(key_path, 'r') as data:\n key = data.read()\n else:\n key = Fernet.generate_key()\n with open(key_path, 'w') as output:\n output.write(key)\n return key", - "docstring": "Get the user's PredixPy manifest key. Generate and store one if not\n yet generated." - }, - { - "code": "def to_excess_returns(returns, rf, nperiods=None):\n if type(rf) is float and nperiods is not None:\n _rf = deannualize(rf, nperiods)\n else:\n _rf = rf\n return returns - _rf", - "docstring": "Given a series of returns, it will return the excess returns over rf.\n\n Args:\n * returns (Series, DataFrame): Returns\n * rf (float, Series): `Risk-Free rate(s) `_ expressed in annualized term or return series\n * nperiods (int): Optional. If provided, will convert rf to different\n frequency using deannualize only if rf is a float\n Returns:\n * excess_returns (Series, DataFrame): Returns - rf" - }, - { - "code": "def _upload_missing_image(self, type, img):\n for directory in images_directories(type):\n image = os.path.join(directory, img)\n if os.path.exists(image):\n self.project.controller.notification.emit(\"log.info\", {\"message\": \"Uploading missing image {}\".format(img)})\n try:\n with open(image, 'rb') as f:\n yield from self._compute.post(\"/{}/images/{}\".format(self._node_type, os.path.basename(img)), data=f, timeout=None)\n except OSError as e:\n raise aiohttp.web.HTTPConflict(text=\"Can't upload {}: {}\".format(image, str(e)))\n self.project.controller.notification.emit(\"log.info\", {\"message\": \"Upload finished for {}\".format(img)})\n return True\n return False", - "docstring": "Search an image on local computer and upload it to remote compute\n if the image exists" - }, - { - "code": "def main(self):\n assert self.params.func, \"No subcommand defined in `ElastiCluster.main()\"\n try:\n return self.params.func()\n except Exception as err:\n log.error(\"Error: %s\", err)\n if self.params.verbose > 2:\n import traceback\n traceback.print_exc()\n print(\"Aborting because of errors: {err}.\".format(err=err))\n sys.exit(1)", - "docstring": "This is the main entry point of the ElastiCluster CLI.\n\n First the central configuration is created, which can be altered\n through the command line interface. Then the given command from\n the command line interface is called." - }, - { - "code": "def get_documents_in_database(self, with_id=True):\n documents = []\n for coll in self.get_collection_names():\n documents += self.get_documents_in_collection(\n coll,\n with_id=with_id\n )\n return documents", - "docstring": "Gets all documents in database\n\n :param with_id: True iff each document should also come with its id\n :return: List of documents in collection in database" - }, - { - "code": "def items(self, path=None):\n items = list(self.iteritems())\n if path is not None:\n path += '$'\n regex = re.compile(path)\n items = [i for i in items if regex.match(i.path)]\n return items", - "docstring": "Returns set of items.\n\n :param path: Regex filter on item path.\n\n :return: List of Item class objects." - }, - { - "code": "def locked_blocks_iterator(blockfile, start_info=(0, 0), cached_headers=50, batch_size=50):\n f = blockfile\n current_state = []\n def change_state(bc, ops):\n for op, bh, work in ops:\n if op == 'add':\n current_state.append(bh)\n pass\n else:\n current_state.pop()\n bc = BlockChain()\n bc.add_change_callback(change_state)\n bhs = []\n index = 0\n info_offset = start_info\n while 1:\n v = blockfile.next_offset(info_offset)\n if v is None:\n break\n block_offset, info_offset = v\n f.jump_to(block_offset)\n bh = Block.parse_as_header(f)\n bh.info = block_offset\n bhs.append(bh)\n if len(bhs) > batch_size:\n bc.add_headers(bhs)\n bhs = []\n if len(current_state) > cached_headers:\n for bh in current_state[:cached_headers]:\n bh.index = index\n yield bh\n index += 1\n bc.lock_to_index(index)\n current_state = current_state[cached_headers:]", - "docstring": "This method loads blocks from disk, skipping any orphan blocks." - }, - { - "code": "def _kak_decomposition_to_operations(q0: ops.Qid,\n q1: ops.Qid,\n kak: linalg.KakDecomposition,\n allow_partial_czs: bool,\n atol: float = 1e-8\n ) -> List[ops.Operation]:\n b0, b1 = kak.single_qubit_operations_before\n pre = [_do_single_on(b0, q0, atol=atol), _do_single_on(b1, q1, atol=atol)]\n a0, a1 = kak.single_qubit_operations_after\n post = [_do_single_on(a0, q0, atol=atol), _do_single_on(a1, q1, atol=atol)]\n return list(cast(Iterable[ops.Operation], ops.flatten_op_tree([\n pre,\n _non_local_part(q0,\n q1,\n kak.interaction_coefficients,\n allow_partial_czs,\n atol=atol),\n post,\n ])))", - "docstring": "Assumes that the decomposition is canonical." - }, - { - "code": "def width_radius_changed_cb(self, widget, val):\n self.width_radius = val\n self.redraw_cuts()\n self.replot_all()\n return True", - "docstring": "Callback executed when the Width radius is changed." - }, - { - "code": "def center_start(r, window_size):\n res = copy.copy(r)\n res.end = res.start + window_size / 2\n res.start = res.end - window_size\n return res", - "docstring": "Center a region on its start and expand it to window_size bases.\n\n :return: the new region." - }, - { - "code": "def env_proxy_settings(selected_settings=None):\n SUPPORTED_SETTINGS = {\n 'http': 'HTTP_PROXY',\n 'https': 'HTTPS_PROXY',\n 'no_proxy': 'NO_PROXY',\n 'ftp': 'FTP_PROXY'\n }\n if selected_settings is None:\n selected_settings = SUPPORTED_SETTINGS\n selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()\n if k in selected_settings]\n proxy_settings = {}\n for var in selected_vars:\n var_val = os.getenv(var)\n if var_val:\n proxy_settings[var] = var_val\n proxy_settings[var.lower()] = var_val\n charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))\n if charm_var_val:\n proxy_settings[var] = charm_var_val\n proxy_settings[var.lower()] = charm_var_val\n if 'no_proxy' in proxy_settings:\n if _contains_range(proxy_settings['no_proxy']):\n log(RANGE_WARNING, level=WARNING)\n return proxy_settings if proxy_settings else None", - "docstring": "Get proxy settings from process environment variables.\n\n Get charm proxy settings from environment variables that correspond to\n juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,\n see lp:1782236) in a format suitable for passing to an application that\n reacts to proxy settings passed as environment variables. Some applications\n support lowercase or uppercase notation (e.g. curl), some support only\n lowercase (e.g. wget), there are also subjectively rare cases of only\n uppercase notation support. no_proxy CIDR and wildcard support also varies\n between runtimes and applications as there is no enforced standard.\n\n Some applications may connect to multiple destinations and expose config\n options that would affect only proxy settings for a specific destination\n these should be handled in charms in an application-specific manner.\n\n :param selected_settings: format only a subset of possible settings\n :type selected_settings: list\n :rtype: Option(None, dict[str, str])" - }, - { - "code": "def delete_snapshot(self, snapshot_id):\n query = self.query_factory(\n action=\"DeleteSnapshot\", creds=self.creds, endpoint=self.endpoint,\n other_params={\"SnapshotId\": snapshot_id})\n d = query.submit()\n return d.addCallback(self.parser.truth_return)", - "docstring": "Remove a previously created snapshot." - }, - { - "code": "def _ensure_arguments_are_provided(expected_types, arguments):\n expected_arg_names = set(six.iterkeys(expected_types))\n provided_arg_names = set(six.iterkeys(arguments))\n if expected_arg_names != provided_arg_names:\n missing_args = expected_arg_names - provided_arg_names\n unexpected_args = provided_arg_names - expected_arg_names\n raise GraphQLInvalidArgumentError(u'Missing or unexpected arguments found: '\n u'missing {}, unexpected '\n u'{}'.format(missing_args, unexpected_args))", - "docstring": "Ensure that all arguments expected by the query were actually provided." - }, - { - "code": "def match_rules(tree, rules, fun=None, multi=False):\n if multi:\n context = match_rules_context_multi(tree, rules)\n else:\n context = match_rules_context(tree, rules)\n if not context:\n return None\n if fun:\n args = fun.__code__.co_varnames\n if multi:\n res = []\n for c in context:\n action_context = {}\n for arg in args:\n if arg in c:\n action_context[arg] = c[arg]\n res.append(fun(**action_context))\n return res\n else:\n action_context = {}\n for arg in args:\n if arg in context:\n action_context[arg] = context[arg]\n return fun(**action_context)\n else:\n return context", - "docstring": "Matches a Tree structure with the given query rules.\n\n Query rules are represented as a dictionary of template to action.\n Action is either a function, or a dictionary of subtemplate parameter to rules::\n\n rules = { 'template' : { 'key': rules } }\n | { 'template' : {} }\n\n Args:\n tree (Tree): Parsed tree structure\n rules (dict): A dictionary of query rules\n fun (function): Function to call with context (set to None if you want to return context)\n multi (Bool): If True, returns all matched contexts, else returns first matched context\n Returns:\n Contexts from matched rules" - }, - { - "code": "def initialize_communities_bucket():\n bucket_id = UUID(current_app.config['COMMUNITIES_BUCKET_UUID'])\n if Bucket.query.get(bucket_id):\n raise FilesException(\"Bucket with UUID {} already exists.\".format(\n bucket_id))\n else:\n storage_class = current_app.config['FILES_REST_DEFAULT_STORAGE_CLASS']\n location = Location.get_default()\n bucket = Bucket(id=bucket_id,\n location=location,\n default_storage_class=storage_class)\n db.session.add(bucket)\n db.session.commit()", - "docstring": "Initialize the communities file bucket.\n\n :raises: `invenio_files_rest.errors.FilesException`" - }, - { - "code": "def create_archaius(self):\n utils.banner(\"Creating S3\")\n s3.init_properties(env=self.env, app=self.app)", - "docstring": "Create S3 bucket for Archaius." - }, - { - "code": "def disable_paging(self, command=\"pager off\", delay_factor=1):\n return super(PluribusSSH, self).disable_paging(\n command=command, delay_factor=delay_factor\n )", - "docstring": "Make sure paging is disabled." - }, - { - "code": "def get_historical_accounts():\n if os.environ.get('SWAG_BUCKET', False):\n swag_opts = {\n 'swag.type': 's3',\n 'swag.bucket_name': os.environ['SWAG_BUCKET'],\n 'swag.data_file': os.environ.get('SWAG_DATA_FILE', 'accounts.json'),\n 'swag.region': os.environ.get('SWAG_REGION', 'us-east-1')\n }\n swag = SWAGManager(**parse_swag_config_options(swag_opts))\n search_filter = f\"[?provider=='aws' && owner=='{os.environ['SWAG_OWNER']}' && account_status!='deleted'\"\n if parse_boolean(os.environ.get('TEST_ACCOUNTS_ONLY')):\n search_filter += \" && environment=='test'\"\n search_filter += ']'\n accounts = swag.get_service_enabled('historical', search_filter=search_filter)\n else:\n accounts = [{'id': account_id} for account_id in os.environ['ENABLED_ACCOUNTS'].split(',')]\n return accounts", - "docstring": "Fetches valid accounts from SWAG if enabled or a list accounts." - }, - { - "code": "def _ParseNumericOption(cls, options, argument_name, default_value=None):\n argument_value = getattr(options, argument_name, None)\n if argument_value is None:\n return default_value\n if not isinstance(argument_value, py2to3.INTEGER_TYPES):\n raise errors.BadConfigOption(\n 'Unsupported option: {0:s} integer type required.'.format(\n argument_name))\n return argument_value", - "docstring": "Parses a numeric command line argument.\n\n Args:\n options (argparse.Namespace): parser options.\n argument_name (str): name of the command line argument.\n default_value (Optional[int]): default value of the command line argument.\n\n Returns:\n int: command line argument value or the default value if the command line\n argument is not set\n\n Raises:\n BadConfigOption: if the command line argument value cannot be converted\n to a Unicode string." - }, - { - "code": "def create_transaction(self):\n request = urlllib.request.urlopen(\n urllib.parse.urljoin(self.base_url, 'fcr:tx'))\n self.transaction = request.read()", - "docstring": "Method creates a new transaction resource and sets instance's\n transaction." - }, - { - "code": "def from_job_desc(cls, warm_start_config):\n if not warm_start_config or \\\n WARM_START_TYPE not in warm_start_config or \\\n PARENT_HYPERPARAMETER_TUNING_JOBS not in warm_start_config:\n return None\n parents = []\n for parent in warm_start_config[PARENT_HYPERPARAMETER_TUNING_JOBS]:\n parents.append(parent[HYPERPARAMETER_TUNING_JOB_NAME])\n return cls(warm_start_type=WarmStartTypes(warm_start_config[WARM_START_TYPE]),\n parents=parents)", - "docstring": "Creates an instance of ``WarmStartConfig`` class, from warm start configuration response from\n DescribeTrainingJob.\n\n Args:\n warm_start_config (dict): The expected format of the ``warm_start_config`` contains two first-class\n fields:\n * \"type\": Type of warm start tuner, currently two supported types - \"IdenticalDataAndAlgorithm\" and\n \"TransferLearning\".\n * \"parents\": List of tuning job names from which the warm start should be done.\n\n Returns:\n sagemaker.tuner.WarmStartConfig: De-serialized instance of WarmStartConfig containing the type and parents\n provided as part of ``warm_start_config``.\n\n Examples:\n >>> warm_start_config = WarmStartConfig.from_job_desc(warm_start_config={\n >>> \"WarmStartType\":\"TransferLearning\",\n >>> \"ParentHyperParameterTuningJobs\": [\n >>> {'HyperParameterTuningJobName': \"p1\"},\n >>> {'HyperParameterTuningJobName': \"p2\"},\n >>> ]\n >>>})\n >>> warm_start_config.type\n \"TransferLearning\"\n >>> warm_start_config.parents\n [\"p1\",\"p2\"]" - }, - { - "code": "def update_event_types(self):\n self.idx_evt_type.clear()\n self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection)\n event_types = sorted(self.parent.notes.annot.event_types,\n key=str.lower)\n for ty in event_types:\n item = QListWidgetItem(ty)\n self.idx_evt_type.addItem(item)", - "docstring": "Update event types in event type box." - }, - { - "code": "def do_session_endpoint(self, params):\n if invalid_hosts(params.hosts):\n self.show_output(\"List of hosts has the wrong syntax.\")\n return\n try:\n info_by_id = self._zk.sessions_info(params.hosts)\n except XClient.CmdFailed as ex:\n self.show_output(str(ex))\n return\n info = info_by_id.get(params.session, None)\n if info is None:\n self.show_output(\"No session info for %s.\", params.session)\n else:\n self.show_output(\"%s\", info.resolved_endpoints if params.reverse else info.endpoints)", - "docstring": "\\x1b[1mNAME\\x1b[0m\n session_endpoint - Gets the session's IP endpoints\n\n\\x1b[1mSYNOPSIS\\x1b[0m\n session_endpoint [reverse_lookup]\n\n\\x1b[1mDESCRIPTION\\x1b[0m\n where hosts is a list of hosts in the host1[:port1][,host2[:port2]],... form\n\n\\x1b[1mOPTIONS\\x1b[0m\n * reverse_lookup: convert IPs back to hostnames (default: false)\n\n\\x1b[1mEXAMPLES\\x1b[0m\n > session_endpoint 0xa4788b919450e6 10.0.0.1,10.0.0.2,10.0.0.3\n 10.3.2.12:54250 10.0.0.2:2181" - }, - { - "code": "def tendermint_version_is_compatible(running_tm_ver):\n tm_ver = running_tm_ver.split('-')\n if not tm_ver:\n return False\n for ver in __tm_supported_versions__:\n if version.parse(ver) == version.parse(tm_ver[0]):\n return True\n return False", - "docstring": "Check Tendermint compatability with BigchainDB server\n\n :param running_tm_ver: Version number of the connected Tendermint instance\n :type running_tm_ver: str\n :return: True/False depending on the compatability with BigchainDB server\n :rtype: bool" - }, - { - "code": "def target(self):\n task = yield self.task()\n if not task:\n yield defer.succeed(None)\n defer.returnValue(None)\n defer.returnValue(task.target)", - "docstring": "Find the target name for this build.\n\n :returns: deferred that when fired returns the build task's target\n name. If we could not determine the build task, or the task's\n target, return None." - }, - { - "code": "def perform(self, node, inputs, output_storage):\n x = inputs[0]\n z = output_storage[0]\n z[0] = np.asarray(self.operator(x))", - "docstring": "Evaluate this node's computation.\n\n Parameters\n ----------\n node : `theano.gof.graph.Apply`\n The node of this Op in the computation graph.\n inputs : 1-element list of arrays\n Contains an array (usually `numpy.ndarray`) of concrete values\n supplied for the symbolic input variable ``x``.\n output_storage : 1-element list of 1-element lists\n The single 1-element list contained in ``output_storage``\n by default contains only ``None``. This value must be replaced\n by the result of the application of `odl_op`.\n\n Examples\n --------\n Perform a matrix multiplication:\n\n >>> space = odl.rn(3)\n >>> matrix = np.array([[1, 0, 1],\n ... [0, 1, 1]], dtype=float)\n >>> op = odl.MatrixOperator(matrix, domain=space)\n >>> matrix_op = TheanoOperator(op)\n >>> x = theano.tensor.dvector()\n >>> op_x = matrix_op(x)\n >>> op_func = theano.function([x], op_x)\n >>> op_func([1, 2, 3])\n array([ 4., 5.])\n\n Evaluate a functional, i.e., an operator with scalar output:\n\n >>> space = odl.rn(3)\n >>> functional = odl.solvers.L2NormSquared(space)\n >>> func_op = TheanoOperator(functional)\n >>> x = theano.tensor.dvector()\n >>> op_x = func_op(x)\n >>> op_func = theano.function([x], op_x)\n >>> op_func([1, 2, 3])\n array(14.0)" - }, - { - "code": "def namedb_update_must_equal( rec, change_fields ):\n must_equal = []\n if len(change_fields) != 0:\n given = rec.keys()\n for k in given:\n if k not in change_fields:\n must_equal.append(k)\n return must_equal", - "docstring": "Generate the set of fields that must stay the same across an update." - }, - { - "code": "def _polarBreaks(self):\n if self.orbit_index is None:\n raise ValueError('Orbit properties must be defined at ' +\n 'pysat.Instrument object instantiation.' + \n 'See Instrument docs.')\n else:\n try:\n self.sat[self.orbit_index]\n except ValueError:\n raise ValueError('Provided orbit index does not appear to ' +\n 'exist in loaded data')\n pos = (self.sat[self.orbit_index] >= 0)\n npos = -pos\n change = (pos.values[:-1] & npos.values[1:]) | (npos.values[:-1] &\n pos.values[1:])\n ind, = np.where(change)\n ind += 1\n ut_diff = Series(self.sat.data.index).diff()\n ut_ind, = np.where(ut_diff / self.orbit_period > 0.95)\n if len(ut_ind) > 0:\n ind = np.hstack((ind, ut_ind))\n ind = np.sort(ind)\n ind = np.unique(ind)\n if ind[0] != 0:\n ind = np.hstack((np.array([0]), ind))\n num_orbits = len(ind)\n self._orbit_breaks = ind\n self.num = num_orbits", - "docstring": "Determine where breaks in a polar orbiting satellite orbit occur.\n\n Looks for sign changes in latitude (magnetic or geographic) as well as \n breaks in UT." - }, - { - "code": "def save_state_machine_as(path=None, recent_opened_notification=False, as_copy=False):\n state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model\n selected_state_machine_model = state_machine_manager_model.get_selected_state_machine_model()\n if selected_state_machine_model is None:\n logger.warning(\"Can not 'save state machine as' because no state machine is selected.\")\n return False\n if path is None:\n if interface.create_folder_func is None:\n logger.error(\"No function defined for creating a folder\")\n return False\n folder_name = selected_state_machine_model.state_machine.root_state.name\n path = interface.create_folder_func(\"Please choose a root folder and a folder name for the state-machine. \"\n \"The default folder name is the name of the root state.\",\n format_default_folder_name(folder_name))\n if path is None:\n logger.warning(\"No valid path specified\")\n return False\n previous_path = selected_state_machine_model.state_machine.file_system_path\n if not as_copy:\n marked_dirty = selected_state_machine_model.state_machine.marked_dirty\n recent_opened_notification = recent_opened_notification and (not previous_path == path or marked_dirty)\n selected_state_machine_model.state_machine.file_system_path = path\n result = save_state_machine(delete_old_state_machine=True,\n recent_opened_notification=recent_opened_notification,\n as_copy=as_copy, copy_path=path)\n library_manager_model.state_machine_was_stored(selected_state_machine_model, previous_path)\n return result", - "docstring": "Store selected state machine to path\n\n If there is no handed path the interface dialog \"create folder\" is used to collect one. The state machine finally\n is stored by the save_state_machine function.\n\n :param str path: Path of state machine folder where selected state machine should be stored\n :param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths\n :param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path\n :return: True if successfully stored, False if the storing process was canceled or stopped by condition fail\n :rtype bool:" - }, - { - "code": "def cast(attrs, inputs, proto_obj):\n try:\n from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE\n except ImportError:\n raise ImportError(\"Onnx and protobuf need to be installed. \"\n + \"Instructions to install - https://github.com/onnx/onnx\")\n new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})\n new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]\n return 'cast', new_attrs, inputs", - "docstring": "Cast input to a given dtype" - }, - { - "code": "def delete_many(self, query):\n items = self.find(query)\n result = [\n self.table.remove(where(u'_id') == item[u'_id'])\n for item in items\n ]\n if query == {}:\n self.table._last_id = 0\n return DeleteResult(raw_result=result)", - "docstring": "Removes all items matching the mongo query\n\n :param query: dictionary representing the mongo query\n :return: DeleteResult" - }, - { - "code": "def count(cls, user_id):\r\n return cls.query.with_entities(\r\n cls.user_id).filter_by(user_id=user_id).count()", - "docstring": "Count sessions with user_id" - }, - { - "code": "def send(self):\n import requests\n if not self._is_enabled():\n return\n self.collect()\n logger.debug(\"Sending analytics: {}\".format(self.info))\n try:\n requests.post(self.URL, json=self.info, timeout=self.TIMEOUT_POST)\n except requests.exceptions.RequestException as exc:\n logger.debug(\"Failed to send analytics: {}\".format(str(exc)))", - "docstring": "Collect and send analytics." - }, - { - "code": "def placeholders(cls,dic):\n keys = [str(x) for x in dic]\n entete = \",\".join(keys)\n placeholders = \",\".join(cls.named_style.format(x) for x in keys)\n entete = f\"({entete})\"\n placeholders = f\"({placeholders})\"\n return entete, placeholders", - "docstring": "Placeholders for fields names and value binds" - }, - { - "code": "def get_all_boards(*args, **kwargs):\n https = kwargs.get('https', args[1] if len(args) > 1 else False)\n url_generator = Url(None, https)\n _fetch_boards_metadata(url_generator)\n return get_boards(_metadata.keys(), *args, **kwargs)", - "docstring": "Returns every board on 4chan.\n\n Returns:\n dict of :class:`basc_py4chan.Board`: All boards." - }, - { - "code": "def validate_feature_api(project, force=False):\n if not force and not project.on_pr():\n raise SkippedValidationTest('Not on PR')\n validator = FeatureApiValidator(project)\n result = validator.validate()\n if not result:\n raise InvalidFeatureApi", - "docstring": "Validate feature API" - }, - { - "code": "def drop_scored_calls(self,names):\n def _remove(calls,names):\n d = dict([(k,v) for k,v in calls.items() if k not in names])\n return d\n if isinstance(names, str):\n names = [names]\n output = self.copy()\n output['scored_calls'] = output['scored_calls'].\\\n apply(lambda x: _remove(x,names))\n return output", - "docstring": "Take a name or list of scored call names and drop those from the scored calls\n\n Args:\n names (list): list of names to drop or a single string name to drop\n\n Returns:\n CellDataFrame: The CellDataFrame modified." - }, - { - "code": "def set_rate_BC(self, pores, values):\n r\n self._set_BC(pores=pores, bctype='rate', bcvalues=values, mode='merge')", - "docstring": "r\"\"\"\n Apply constant rate boundary conditons to the specified pore\n locations. This is similar to a Neumann boundary condition, but is\n slightly different since it's the conductance multiplied by the\n gradient, while Neumann conditions specify just the gradient.\n\n Parameters\n ----------\n pores : array_like\n The pore indices where the condition should be applied\n\n values : scalar or array_like\n The value to of the boundary condition. If a scalar is supplied\n it is assigne to all locations, and if a vector is applied it\n corresponds directy to the locations given in ``pores``.\n\n Notes\n -----\n The definition of ``quantity`` is specified in the algorithm's\n ``settings``, e.g. ``alg.settings['quentity'] = 'pore.pressure'``." - }, - { - "code": "def _close_brokerclients(self, clients):\n def _log_close_failure(failure, brokerclient):\n log.debug(\n 'BrokerClient: %s close result: %s: %s', brokerclient,\n failure.type.__name__, failure.getErrorMessage())\n def _clean_close_dlist(result, close_dlist):\n if close_dlist == self.close_dlist:\n self.close_dlist = None\n if not self.close_dlist:\n dList = []\n else:\n log.debug(\"%r: _close_brokerclients has nested deferredlist: %r\",\n self, self.close_dlist)\n dList = [self.close_dlist]\n for brokerClient in clients:\n log.debug(\"Calling close on: %r\", brokerClient)\n d = brokerClient.close().addErrback(_log_close_failure, brokerClient)\n dList.append(d)\n self.close_dlist = DeferredList(dList)\n self.close_dlist.addBoth(_clean_close_dlist, self.close_dlist)", - "docstring": "Close the given broker clients.\n\n :param clients: Iterable of `_KafkaBrokerClient`" - }, - { - "code": "def get_sortkey(table):\n wfs = WebFeatureService(url=bcdata.OWS_URL, version=\"2.0.0\")\n return sorted(wfs.get_schema(\"pub:\" + table)[\"properties\"].keys())[0]", - "docstring": "Get a field to sort by" - }, - { - "code": "def put(self, url: StrOrURL,\n *, data: Any=None, **kwargs: Any) -> '_RequestContextManager':\n return _RequestContextManager(\n self._request(hdrs.METH_PUT, url,\n data=data,\n **kwargs))", - "docstring": "Perform HTTP PUT request." - }, - { - "code": "def clean(image, mask=None, iterations = 1):\n global clean_table\n if mask is None:\n masked_image = image\n else:\n masked_image = image.astype(bool).copy()\n masked_image[~mask] = False\n result = table_lookup(masked_image, clean_table, False, iterations)\n if not mask is None:\n result[~mask] = image[~mask]\n return result", - "docstring": "Remove isolated pixels\n \n 0 0 0 0 0 0\n 0 1 0 -> 0 0 0\n 0 0 0 0 0 0\n \n Border pixels and pixels adjoining masks are removed unless one valid\n neighbor is true." - }, - { - "code": "def assure_container(fnc):\n @wraps(fnc)\n def _wrapped(self, container, *args, **kwargs):\n if not isinstance(container, Container):\n container = self.get(container)\n return fnc(self, container, *args, **kwargs)\n return _wrapped", - "docstring": "Assures that whether a Container or a name of a container is passed, a\n Container object is available." - }, - { - "code": "def _recoverable(self, method, *args, **kwargs):\n while True:\n try:\n return method(*args, **kwargs)\n except Exception as exc:\n with self._operational_lock:\n _LOGGER.debug(\"Call to retryable %r caused %s.\", method, exc)\n if not self._should_recover(exc):\n self.close()\n _LOGGER.debug(\"Not retrying %r due to %s.\", method, exc)\n self._finalize(exc)\n raise exc\n _LOGGER.debug(\"Re-opening stream from retryable %r.\", method)\n self._reopen()", - "docstring": "Wraps a method to recover the stream and retry on error.\n\n If a retryable error occurs while making the call, then the stream will\n be re-opened and the method will be retried. This happens indefinitely\n so long as the error is a retryable one. If an error occurs while\n re-opening the stream, then this method will raise immediately and\n trigger finalization of this object.\n\n Args:\n method (Callable[..., Any]): The method to call.\n args: The args to pass to the method.\n kwargs: The kwargs to pass to the method." - }, - { - "code": "def StrPrefixOf(prefix, input_string):\n return re.match(r'^' + prefix.value, input_string.value) is not None", - "docstring": "Return True if the concrete value of the input_string starts with prefix\n otherwise false.\n\n :param prefix: prefix we want to check\n :param input_string: the string we want to check\n\n :return: True if the input_string starts with prefix else false" - }, - { - "code": "def encode_dict(dynamizer, value):\n encoded_dict = {}\n for k, v in six.iteritems(value):\n encoded_type, encoded_value = dynamizer.raw_encode(v)\n encoded_dict[k] = {\n encoded_type: encoded_value,\n }\n return 'M', encoded_dict", - "docstring": "Encode a dict for the DynamoDB format" - }, - { - "code": "def client_credentials(self, client_id, client_secret, audience,\n grant_type='client_credentials'):\n return self.post(\n 'https://{}/oauth/token'.format(self.domain),\n data={\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'audience': audience,\n 'grant_type': grant_type,\n },\n headers={'Content-Type': 'application/json'}\n )", - "docstring": "Client credentials grant\n\n This is the OAuth 2.0 grant that server processes utilize in\n order to access an API. Use this endpoint to directly request\n an access_token by using the Application Credentials (a Client Id and\n a Client Secret).\n\n Args:\n grant_type (str): Denotes the flow you're using. For client credentials\n use client_credentials\n\n client_id (str): your application's client Id\n\n client_secret (str): your application's client Secret\n\n audience (str): The unique identifier of the target API you want to access.\n\n Returns:\n access_token" - }, - { - "code": "async def iterStormPodes(self, text, opts=None, user=None):\n if user is None:\n user = self.user\n dorepr = False\n dopath = False\n self.core._logStormQuery(text, user)\n if opts is not None:\n dorepr = opts.get('repr', False)\n dopath = opts.get('path', False)\n async for node, path in self.storm(text, opts=opts, user=user):\n pode = node.pack(dorepr=dorepr)\n pode[1]['path'] = path.pack(path=dopath)\n yield pode", - "docstring": "Yield packed node tuples for the given storm query text." - }, - { - "code": "def on_get(resc, req, resp, rid):\n signals.pre_req.send(resc.model)\n signals.pre_req_find.send(resc.model)\n model = find(resc.model, rid)\n props = to_rest_model(model, includes=req.includes)\n resp.last_modified = model.updated\n resp.serialize(props)\n signals.post_req.send(resc.model)\n signals.post_req_find.send(resc.model)", - "docstring": "Find the model by id & serialize it back" - }, - { - "code": "def _input_as_lines(self, data):\n filename = self._input_filename = \\\n FilePath(self.getTmpFilename(self.TmpDir))\n filename = FilePath(filename)\n data_file = open(filename, 'w')\n data_to_file = '\\n'.join([str(d).strip('\\n') for d in data])\n data_file.write(data_to_file)\n data_file.close()\n return filename", - "docstring": "Write a seq of lines to a temp file and return the filename string\n\n data: a sequence to be written to a file, each element of the\n sequence will compose a line in the file\n * Note: the result will be the filename as a FilePath object\n (which is a string subclass).\n\n * Note: '\\n' will be stripped off the end of each sequence element\n before writing to a file in order to avoid multiple new lines\n accidentally be written to a file" - }, - { - "code": "def calc_qvr_v1(self):\n con = self.parameters.control.fastaccess\n flu = self.sequences.fluxes.fastaccess\n for i in range(2):\n if (flu.avr[i] > 0.) and (flu.uvr[i] > 0.):\n flu.qvr[i] = (con.ekv[i]*con.skv[i] *\n flu.avr[i]**(5./3.)/flu.uvr[i]**(2./3.)*con.gef**.5)\n else:\n flu.qvr[i] = 0.", - "docstring": "Calculate the discharge of both outer embankments after\n Manning-Strickler.\n\n Required control parameters:\n |EKV|\n |SKV|\n |Gef|\n\n Required flux sequence:\n |AVR|\n |UVR|\n\n Calculated flux sequence:\n |QVR|\n\n Examples:\n\n For appropriate strictly positive values:\n\n >>> from hydpy.models.lstream import *\n >>> parameterstep()\n >>> ekv(2.0)\n >>> skv(50.0)\n >>> gef(0.01)\n >>> fluxes.avr = 3.0\n >>> fluxes.uvr = 7.0\n >>> model.calc_qvr_v1()\n >>> fluxes.qvr\n qvr(17.053102, 17.053102)\n\n For zero or negative values of the flown through surface or\n the wetted perimeter:\n\n >>> fluxes.avr = -1.0, 3.0\n >>> fluxes.uvr = 7.0, 0.0\n >>> model.calc_qvr_v1()\n >>> fluxes.qvr\n qvr(0.0, 0.0)" - }, - { - "code": "def get_workspaces(self):\n data = self.message(MessageType.GET_WORKSPACES, '')\n return json.loads(data, object_hook=WorkspaceReply)", - "docstring": "Get a list of workspaces. Returns JSON-like data, not a Con instance.\n\n You might want to try the :meth:`Con.workspaces` instead if the info\n contained here is too little.\n\n :rtype: List of :class:`WorkspaceReply`." - }, - { - "code": "def start(self):\n for client in self._snippet_clients.values():\n if not client.is_alive:\n self._device.log.debug('Starting SnippetClient<%s>.',\n client.package)\n client.start_app_and_connect()\n else:\n self._device.log.debug(\n 'Not startng SnippetClient<%s> because it is already alive.',\n client.package)", - "docstring": "Starts all the snippet clients under management." - }, - { - "code": "def codes_match_any(self, codes):\n for selector in self.code_selectors:\n if selector.code in codes:\n return True\n return False", - "docstring": "Match any code." - }, - { - "code": "def parse_env_file(envfile):\n data = {}\n for line_no, line in enumerate(envfile):\n line = line.strip()\n if not line or line.startswith('\n continue\n if '=' not in line:\n raise ConfigurationError('Env file line missing = operator (line %s)' % (line_no + 1))\n k, v = line.split('=', 1)\n k = k.strip()\n if not ENV_KEY_RE.match(k):\n raise ConfigurationError(\n 'Invalid variable name \"%s\" in env file (line %s)' % (k, (line_no + 1))\n )\n v = v.strip().strip('\\'\"')\n data[k] = v\n return data", - "docstring": "Parse the content of an iterable of lines as ``.env``.\n\n Return a dict of config variables.\n\n >>> parse_env_file(['DUDE=Abides'])\n {'DUDE': 'Abides'}" - }, - { - "code": "def register_arguments(self, parser):\n parser.add_argument('x', type=int, help='the first value')\n parser.add_argument('y', type=int, help='the second value')", - "docstring": "Guacamole method used by the argparse ingredient.\n\n :param parser:\n Argument parser (from :mod:`argparse`) specific to this command." - }, - { - "code": "def split_array_as_array(self, values):\n if not self.index.uniform:\n raise ValueError(\"Array can only be split as array if all groups have the same size\")\n values = np.asarray(values)\n values = values[self.index.sorter]\n return values.reshape(self.groups, -1, *values.shape[1:])", - "docstring": "Group ndarray into ndarray by means of reshaping\n\n Parameters\n ----------\n values : ndarray_like, [index.size, ...]\n\n Returns\n -------\n ndarray, [groups, group_size, ...]\n values grouped by key\n\n Raises\n ------\n AssertionError\n This operation is only possible if index.uniform==True" - }, - { - "code": "def get_files_changed(repository, review_id):\n repository.git.fetch([next(iter(repository.remotes)), review_id])\n files_changed = repository.git.diff_tree([\"--no-commit-id\",\n \"--name-only\",\n \"-r\",\n \"FETCH_HEAD\"]).splitlines()\n print(\"Found {} files changed\".format(len(files_changed)))\n return files_changed", - "docstring": "Get a list of files changed compared to the given review.\n Compares against current directory.\n\n :param repository: Git repository. Used to get remote.\n - By default uses first remote in list.\n :param review_id: Gerrit review ID.\n :return: List of file paths relative to current directory." - }, - { - "code": "def parse_db_settings(self, settings):\n if settings == 'DJANGO_SETTINGS_MODULE':\n django_settings = os.environ.get('DJANGO_SETTINGS_MODULE')\n self.print_message(\"Getting settings file from DJANGO_SETTINGS_MODULE=%s\"\n % django_settings)\n path_pieces = django_settings.split('.')\n path_pieces[-1] = '%s.py' % path_pieces[-1]\n settings = os.path.join(*path_pieces)\n self.print_message(\"Parsing settings from settings file '%s'\" % settings)\n parser = DatabaseSettingsParser()\n with open(settings) as settings_file:\n settings_ast = ast.parse(settings_file.read())\n parser.visit(settings_ast)\n try:\n return parser.database_settings['default']\n except KeyError as e:\n self.error(\"Missing key or value for: %s\\nSettings must be of the form: %s\"\n % (e, self.settings_format))", - "docstring": "Parse out database settings from filename or DJANGO_SETTINGS_MODULE." - }, - { - "code": "def _IsMetadataFile(self, file_entry):\n if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and\n file_entry.path_spec.location in self._METADATA_FILE_LOCATIONS_TSK):\n return True\n return False", - "docstring": "Determines if the file entry is a metadata file.\n\n Args:\n file_entry (dfvfs.FileEntry): a file entry object.\n\n Returns:\n bool: True if the file entry is a metadata file." - }, - { - "code": "def initialize():\n from zsl.interface.web.performers.default import create_not_found_mapping\n from zsl.interface.web.performers.resource import create_resource_mapping\n create_not_found_mapping()\n create_resource_mapping()", - "docstring": "Import in this form is necessary so that we avoid the unwanted behavior and immediate initialization of the\n application objects. This makes the initialization procedure run in the time when it is necessary and has every\n required resources." - }, - { - "code": "def asyncStarMap(asyncCallable, iterable):\n deferreds = starmap(asyncCallable, iterable)\n return gatherResults(deferreds, consumeErrors=True)", - "docstring": "itertools.starmap for deferred callables" - }, - { - "code": "def is_unused(input, model_file=None, model_proto=None, name=None):\n return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(\n input, model_file=model_file, model_proto=model_proto, name=name,\n piece_type=2)", - "docstring": "Returns true if input id is unused piece.\n\n Args:\n input: An arbitrary tensor of int32.\n model_file: The sentencepiece model file path.\n model_proto: The sentencepiece model serialized proto.\n Either `model_file` or `model_proto` must be set.\n name: The name argument that is passed to the op function.\n Returns:\n A tensor of bool with the same shape as input." - }, - { - "code": "def change_logger_levels(logger=None, level=logging.DEBUG):\n if not isinstance(logger, logging.Logger):\n logger = logging.getLogger(logger)\n logger.setLevel(level)\n for handler in logger.handlers:\n handler.level = level", - "docstring": "Go through the logger and handlers and update their levels to the\n one specified.\n\n :param logger: logging name or object to modify, defaults to root logger\n :param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error)" - }, - { - "code": "def _feature_country_mentions(self, doc):\n c_list = []\n for i in doc.ents:\n try:\n country = self._both_codes[i.text]\n c_list.append(country)\n except KeyError:\n pass\n count = Counter(c_list).most_common()\n try:\n top, top_count = count[0]\n except:\n top = \"\"\n top_count = 0\n try:\n two, two_count = count[1]\n except:\n two = \"\"\n two_count = 0\n countries = (top, top_count, two, two_count)\n return countries", - "docstring": "Given a document, count how many times different country names and adjectives are mentioned.\n These are features used in the country picking phase.\n\n Parameters\n ---------\n doc: a spaCy nlp'ed piece of text\n\n Returns\n -------\n countries: dict\n the top two countries (ISO code) and their frequency of mentions." - }, - { - "code": "def post_message(self, msg):\n super(mavlogfile, self).post_message(msg)\n if self.planner_format:\n self.f.read(1)\n self.timestamp = msg._timestamp\n self._last_message = msg\n if msg.get_type() != \"BAD_DATA\":\n self._last_timestamp = msg._timestamp\n msg._link = self._link", - "docstring": "add timestamp to message" - }, - { - "code": "def getDict(self):\n badList = self.checkSetSaveEntries(doSave=False)\n if badList:\n self.processBadEntries(badList, self.taskName, canCancel=False)\n return self._taskParsObj.dict()", - "docstring": "Retrieve the current parameter settings from the GUI." - }, - { - "code": "def has_namespace(self, namespace: str) -> bool:\n return self.has_enumerated_namespace(namespace) or self.has_regex_namespace(namespace)", - "docstring": "Check that the namespace has either been defined by an enumeration or a regular expression." - }, - { - "code": "def _search_tree(self, name):\n tpl1 = \"{sep}{name}{sep}\".format(sep=self._node_separator, name=name)\n tpl2 = \"{sep}{name}\".format(sep=self._node_separator, name=name)\n tpl3 = \"{name}{sep}\".format(sep=self._node_separator, name=name)\n return sorted(\n [\n node\n for node in self._db\n if (tpl1 in node)\n or node.endswith(tpl2)\n or node.startswith(tpl3)\n or (name == node)\n ]\n )", - "docstring": "Search_tree for nodes that contain a specific hierarchy name." - }, - { - "code": "def prettyval(self, val):\r\n if len(val) == self.wordsize and val[-1:] in (b'\\x00', b'\\xff'):\r\n return \"%x\" % struct.unpack(\"<\" + self.fmt, val)\r\n if len(val) == self.wordsize and re.search(b'[\\x00-\\x08\\x0b\\x0c\\x0e-\\x1f]', val, re.DOTALL):\r\n return \"%x\" % struct.unpack(\"<\" + self.fmt, val)\r\n if len(val) < 2 or not re.match(b'^[\\x09\\x0a\\x0d\\x20-\\xff]+.$', val, re.DOTALL):\r\n return hexdump(val)\r\n val = val.replace(b\"\\n\", b\"\\\\n\")\r\n return \"'%s'\" % val.decode('utf-8', 'ignore')", - "docstring": "returns the value in a readable format." - }, - { - "code": "def _check_cost(self):\n self._test_list.append(self.cost)\n if len(self._test_list) == self._test_range:\n t1 = np.mean(self._test_list[len(self._test_list) // 2:], axis=0)\n t2 = np.mean(self._test_list[:len(self._test_list) // 2], axis=0)\n if not np.around(t1, decimals=16):\n cost_diff = 0.0\n else:\n cost_diff = (np.linalg.norm(t1 - t2) / np.linalg.norm(t1))\n self._test_list = []\n if self._verbose:\n print(' - CONVERGENCE TEST - ')\n print(' - CHANGE IN COST:', cost_diff)\n print('')\n return cost_diff <= self._tolerance\n else:\n return False", - "docstring": "Check cost function\n\n This method tests the cost function for convergence in the specified\n interval of iterations using the last n (test_range) cost values\n\n Returns\n -------\n bool result of the convergence test" - }, - { - "code": "def reload_source(self, name):\n src = self.roi.get_source_by_name(name)\n if hasattr(self.like.logLike, 'loadSourceMap'):\n self.like.logLike.loadSourceMap(str(name), True, False)\n srcmap_utils.delete_source_map(self.files['srcmap'], name)\n self.like.logLike.saveSourceMaps(str(self.files['srcmap']))\n self._scale_srcmap(self._src_expscale, check_header=False,\n names=[name])\n self.like.logLike.buildFixedModelWts()\n else:\n self.write_xml('tmp')\n src = self.delete_source(name)\n self.add_source(name, src, free=True)\n self.load_xml('tmp')", - "docstring": "Recompute the source map for a single source in the model." - }, - { - "code": "def p2sh_input(outpoint, stack_script, redeem_script, sequence=None):\n if sequence is None:\n sequence = guess_sequence(redeem_script)\n stack_script = script_ser.serialize(stack_script)\n redeem_script = script_ser.hex_serialize(redeem_script)\n redeem_script = script_ser.serialize(redeem_script)\n return tb.make_legacy_input(\n outpoint=outpoint,\n stack_script=stack_script,\n redeem_script=redeem_script,\n sequence=sequence)", - "docstring": "OutPoint, str, str, int -> TxIn\n Create a signed legacy TxIn from a p2pkh prevout" - }, - { - "code": "def transform(self, X, y=None, sample_weight=None):\n check_ts_data(X, y)\n Xt, Xc = get_ts_data_parts(X)\n yt = y\n N = len(Xt)\n if Xt[0].ndim > 1:\n Xt = np.array([sliding_tensor(Xt[i], self.width, self._step, self.order)\n for i in np.arange(N)])\n else:\n Xt = np.array([sliding_window(Xt[i], self.width, self._step, self.order)\n for i in np.arange(N)])\n Nt = [len(Xt[i]) for i in np.arange(len(Xt))]\n Xt = np.concatenate(Xt)\n if Xc is not None:\n Xc = expand_variables_to_segments(Xc, Nt)\n Xt = TS_Data(Xt, Xc)\n if yt is not None:\n yt = np.array([sliding_window(yt[i], self.width, self._step, self.order)\n for i in np.arange(N)])\n yt = np.concatenate(yt)\n yt = self.y_func(yt)\n if self.shuffle is True:\n check_random_state(self.random_state)\n Xt, yt, _ = shuffle_data(Xt, yt)\n return Xt, yt, None", - "docstring": "Transforms the time series data into segments\n Note this transformation changes the number of samples in the data\n If y is provided, it is segmented and transformed to align to the new samples as per\n ``y_func``\n Currently sample weights always returned as None\n\n Parameters\n ----------\n X : array-like, shape [n_series, ...]\n Time series data and (optionally) contextual data\n y : array-like shape [n_series], default = None\n target vector\n sample_weight : array-like shape [n_series], default = None\n sample weights\n\n Returns\n -------\n Xt : array-like, shape [n_segments, ]\n transformed time series data\n yt : array-like, shape [n_segments]\n expanded target vector\n sample_weight_new : None" - }, - { - "code": "def content_size_exceeded_max(self, content_bytes):\n content_size = sys.getsizeof(content_bytes)\n if content_size > self.item_size_max():\n return (True, content_size)\n return (False, content_size)", - "docstring": "`sys.getsizeof` works great for this use case because we have a byte\n sequence, and not a recursive nested structure. The unit of Memcache\n `item_size_max` is also bytes.\n\n :rettype: tuple(bool, int)" - }, - { - "code": "def send_array(\n socket, A=None, metadata=None, flags=0,\n copy=False, track=False, compress=None,\n chunksize=50 * 1000 * 1000\n):\n md = {}\n md['timestamp'] = datetime.datetime.now().isoformat()\n if metadata:\n md.update(metadata)\n if A is None:\n md['parts'] = 0\n socket.send_json(md, flags)\n return\n if isinstance(A, float) or isinstance(A, int):\n A = np.asarray(A)\n md['dtype'] = str(A.dtype)\n md['shape'] = A.shape\n md['parts'] = int(np.prod(A.shape) // chunksize + 1)\n try:\n md['fill_value'] = np.asscalar(A.fill_value)\n A = A.filled()\n except AttributeError:\n pass\n socket.send_json(md, flags | zmq.SNDMORE)\n if md['parts'] == 1:\n msg = memoryview(np.ascontiguousarray(A))\n socket.send(msg, flags, copy=copy, track=track)\n else:\n for i, a in enumerate(np.array_split(A, md['parts'])):\n msg = memoryview(np.ascontiguousarray(a))\n flags_ = flags\n if i != md['parts'] - 1:\n flags_ |= zmq.SNDMORE\n socket.send(msg, flags_, copy=copy, track=track)\n return", - "docstring": "send a numpy array with metadata over zmq\n\n message is mostly multipart:\n metadata | array part 1 | array part 2, etc\n\n only metadata:\n metadata\n\n the chunksize roughly determines the size of the parts being sent\n if the chunksize is too big, you get an error like:\n zmq.error.Again: Resource temporarily unavailable" - }, - { - "code": "def jira(test_key):\n def decorator(test_item):\n def modified_test(*args, **kwargs):\n save_jira_conf()\n try:\n test_item(*args, **kwargs)\n except Exception as e:\n error_message = get_error_message_from_exception(e)\n test_comment = \"The test '{}' has failed: {}\".format(args[0].get_method_name(), error_message)\n add_jira_status(test_key, 'Fail', test_comment)\n raise\n add_jira_status(test_key, 'Pass', None)\n modified_test.__name__ = test_item.__name__\n return modified_test\n return decorator", - "docstring": "Decorator to update test status in Jira\n\n :param test_key: test case key in Jira\n :returns: jira test" - }, - { - "code": "def evaluate(self, genomes, config):\n if self.mode != MODE_PRIMARY:\n raise ModeError(\"Not in primary mode!\")\n tasks = [(genome_id, genome, config) for genome_id, genome in genomes]\n id2genome = {genome_id: genome for genome_id, genome in genomes}\n tasks = chunked(tasks, self.secondary_chunksize)\n n_tasks = len(tasks)\n for task in tasks:\n self.inqueue.put(task)\n tresults = []\n while len(tresults) < n_tasks:\n try:\n sr = self.outqueue.get(block=True, timeout=0.2)\n except (queue.Empty, managers.RemoteError):\n continue\n tresults.append(sr)\n results = []\n for sr in tresults:\n results += sr\n for genome_id, fitness in results:\n genome = id2genome[genome_id]\n genome.fitness = fitness", - "docstring": "Evaluates the genomes.\n This method raises a ModeError if the\n DistributedEvaluator is not in primary mode." - }, - { - "code": "def get_port_monitor(self):\n uri = \"{}{}\".format(self.data[\"uri\"], self.PORT_MONITOR_PATH)\n return self._helper.do_get(uri)", - "docstring": "Gets the port monitor configuration of a logical interconnect.\n\n Returns:\n dict: The Logical Interconnect." - }, - { - "code": "def set_debug_listener(stream):\n def debugger(sig, frame):\n launch_debugger(frame, stream)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, debugger)\n else:\n logger.warn(\"Cannot set SIGUSR1 signal for debug mode.\")", - "docstring": "Break into a debugger if receives the SIGUSR1 signal" - }, - { - "code": "def lonlat2xyz(lon, lat):\n lat = xu.deg2rad(lat)\n lon = xu.deg2rad(lon)\n x = xu.cos(lat) * xu.cos(lon)\n y = xu.cos(lat) * xu.sin(lon)\n z = xu.sin(lat)\n return x, y, z", - "docstring": "Convert lon lat to cartesian." - }, - { - "code": "def normalized_table_calc(classes, table):\n map_dict = {k: 0 for k in classes}\n new_table = {k: map_dict.copy() for k in classes}\n for key in classes:\n div = sum(table[key].values())\n if div == 0:\n div = 1\n for item in classes:\n new_table[key][item] = numpy.around(table[key][item] / div, 5)\n return new_table", - "docstring": "Return normalized confusion matrix.\n\n :param classes: classes list\n :type classes:list\n :param table: table\n :type table:dict\n :return: normalized table as dict" - }, - { - "code": "def truncate_money(money: Money) -> Money:\n amount = truncate_to(money.amount, money.currency)\n return Money(amount, money.currency)", - "docstring": "Truncates money amount to the number of decimals corresponding to the currency" - }, - { - "code": "def decrypt(self, esp, key, icv_size=None):\n if icv_size is None:\n icv_size = self.icv_size if self.is_aead else 0\n iv = esp.data[:self.iv_size]\n data = esp.data[self.iv_size:len(esp.data) - icv_size]\n icv = esp.data[len(esp.data) - icv_size:]\n if self.cipher:\n cipher = self.new_cipher(key, iv, icv)\n decryptor = cipher.decryptor()\n if self.is_aead:\n decryptor.authenticate_additional_data(\n struct.pack('!LL', esp.spi, esp.seq)\n )\n try:\n data = decryptor.update(data) + decryptor.finalize()\n except InvalidTag as err:\n raise IPSecIntegrityError(err)\n padlen = (data[-2])\n nh = data[-1]\n data = data[:len(data) - padlen - 2]\n padding = data[len(data) - padlen - 2: len(data) - 2]\n return _ESPPlain(spi=esp.spi,\n seq=esp.seq,\n iv=iv,\n data=data,\n padding=padding,\n padlen=padlen,\n nh=nh,\n icv=icv)", - "docstring": "Decrypt an ESP packet\n\n @param esp: an encrypted ESP packet\n @param key: the secret key used for encryption\n @param icv_size: the length of the icv used for integrity check\n\n @return: a valid ESP packet encrypted with this algorithm\n @raise IPSecIntegrityError: if the integrity check fails with an AEAD\n algorithm" - }, - { - "code": "def try_mongodb_opts(self, host=\"localhost\", database_name='INGInious'):\n try:\n mongo_client = MongoClient(host=host)\n except Exception as e:\n self._display_warning(\"Cannot connect to MongoDB on host %s: %s\" % (host, str(e)))\n return None\n try:\n database = mongo_client[database_name]\n except Exception as e:\n self._display_warning(\"Cannot access database %s: %s\" % (database_name, str(e)))\n return None\n try:\n GridFS(database)\n except Exception as e:\n self._display_warning(\"Cannot access gridfs %s: %s\" % (database_name, str(e)))\n return None\n return database", - "docstring": "Try MongoDB configuration" - }, - { - "code": "def contains_all(self, other):\n dtype = getattr(other, 'dtype', None)\n if dtype is None:\n dtype = np.result_type(*other)\n return is_numeric_dtype(dtype)", - "docstring": "Return ``True`` if ``other`` is a sequence of complex numbers." - }, - { - "code": "def extract_secrets_from_android_rooted(adb_path='adb'):\n data = subprocess.check_output([\n adb_path, 'shell', 'su', '-c',\n \"'cat /data/data/com.valvesoftware.android.steam.community/files/Steamguard*'\"\n ])\n data = data.decode('utf-8').split('\\n')[-1]\n if data[0] != \"{\":\n raise RuntimeError(\"Got invalid data: %s\" % repr(data))\n return {int(x['steamid']): x\n for x in map(json.loads, data.replace(\"}{\", '}|||||{').split('|||||'))}", - "docstring": "Extract Steam Authenticator secrets from a rooted Android device\n\n Prerequisite for this to work:\n\n - rooted android device\n - `adb binary `_\n - device in debug mode, connected and paired\n\n .. note::\n If you know how to make this work, without requiring the device to be rooted,\n please open a issue on github. Thanks\n\n :param adb_path: path to adb binary\n :type adb_path: str\n :raises: When there is any problem\n :return: all secrets from the device, steamid as key\n :rtype: dict" - }, - { - "code": "def alias(requestContext, seriesList, newName):\n try:\n seriesList.name = newName\n except AttributeError:\n for series in seriesList:\n series.name = newName\n return seriesList", - "docstring": "Takes one metric or a wildcard seriesList and a string in quotes.\n Prints the string instead of the metric name in the legend.\n\n Example::\n\n &target=alias(Sales.widgets.largeBlue,\"Large Blue Widgets\")" - }, - { - "code": "def _find_usage_vpc_links(self):\n logger.debug('Finding usage for VPC Links')\n link_count = 0\n paginator = self.conn.get_paginator('get_vpc_links')\n for resp in paginator.paginate():\n link_count += len(resp['items'])\n self.limits['VPC Links per account']._add_current_usage(\n link_count, aws_type='AWS::ApiGateway::VpcLink'\n )", - "docstring": "Find usage on VPC Links. Update `self.limits`." - }, - { - "code": "def _set(self, schema):\n if isinstance(schema, CommonSchema):\n self._spl_type = False\n self._schema = schema.schema()\n self._style = self._default_style()\n else:\n self._spl_type = schema._spl_type\n self._schema = schema._schema\n self._style = schema._style", - "docstring": "Set a schema from another schema" - }, - { - "code": "def do_for_dir(inws, begin):\n inws = os.path.abspath(inws)\n for wroot, wdirs, wfiles in os.walk(inws):\n for wfile in wfiles:\n if wfile.endswith('.html'):\n if 'autogen' in wroot:\n continue\n check_html(os.path.abspath(os.path.join(wroot, wfile)), begin)", - "docstring": "do something in the directory." - }, - { - "code": "def task(func, *args, **kwargs):\n from celery import shared_task\n if 'serializer' not in kwargs:\n kwargs['serializer'] = DJANGO_CEREAL_PICKLE\n return shared_task(func, *args, **kwargs)", - "docstring": "A task decorator that uses the django-cereal pickler as the default serializer." - }, - { - "code": "def krai_to_raw(self, amount):\n amount = self._process_value(amount, 'int')\n payload = {\"amount\": amount}\n resp = self.call('krai_to_raw', payload)\n return int(resp['amount'])", - "docstring": "Multiply an krai amount by the krai ratio.\n\n :param amount: Amount in krai to convert to raw\n :type amount: int\n\n :raises: :py:exc:`nano.rpc.RPCException`\n\n >>> rpc.krai_to_raw(amount=1)\n 1000000000000000000000000000" - }, - { - "code": "async def wait_tasks(tasks, flatten=True):\n rets = await asyncio.gather(*tasks)\n if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)):\n rets = list(itertools.chain(*rets))\n return rets", - "docstring": "Gather a list of asynchronous tasks and wait their completion.\n\n :param list tasks:\n A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`.\n :param bool flatten:\n If ``True`` the returned results are flattened into one list if the\n tasks return iterable objects. The parameter does nothing if all the\n results are not iterable.\n :returns:\n The results of tasks as a list or as a flattened list" - }, - { - "code": "def _logprior(self):\n logj = self.logjacobian\n logp = self.prior_distribution(**self.current_params) + logj\n if numpy.isnan(logp):\n logp = -numpy.inf\n return logp", - "docstring": "Calculates the log prior at the current parameters." - }, - { - "code": "def process_result(self, new_concept, concepts):\n if new_concept.phi > 0:\n new_concept.subsystem = self.subsystem\n concepts.append(new_concept)\n return concepts", - "docstring": "Save all concepts with non-zero |small_phi| to the\n |CauseEffectStructure|." - }, - { - "code": "def object_types():\n types = [element.rel\n for element in entry_point()]\n types.extend(list(CONTEXTS))\n return types", - "docstring": "Show all available 'entry points' available for searching. An entry\n point defines a uri that provides unfiltered access to all elements\n of the entry point type.\n\n :return: list of entry points by name\n :rtype: list(str)" - }, - { - "code": "def register(request):\n serializer_class = registration_settings.REGISTER_SERIALIZER_CLASS\n serializer = serializer_class(data=request.data)\n serializer.is_valid(raise_exception=True)\n kwargs = {}\n if registration_settings.REGISTER_VERIFICATION_ENABLED:\n verification_flag_field = get_user_setting('VERIFICATION_FLAG_FIELD')\n kwargs[verification_flag_field] = False\n email_field = get_user_setting('EMAIL_FIELD')\n if (email_field not in serializer.validated_data\n or not serializer.validated_data[email_field]):\n raise BadRequest(\"User without email cannot be verified\")\n user = serializer.save(**kwargs)\n output_serializer_class = registration_settings.REGISTER_OUTPUT_SERIALIZER_CLASS\n output_serializer = output_serializer_class(instance=user)\n user_data = output_serializer.data\n if registration_settings.REGISTER_VERIFICATION_ENABLED:\n signer = RegisterSigner({\n 'user_id': user.pk,\n }, request=request)\n template_config = (\n registration_settings.REGISTER_VERIFICATION_EMAIL_TEMPLATES)\n send_verification_notification(user, signer, template_config)\n return Response(user_data, status=status.HTTP_201_CREATED)", - "docstring": "Register new user." - }, - { - "code": "def get_docs_from_split(session, candidate_classes, split):\n sub_query = session.query(Candidate.id).filter(Candidate.split == split).subquery()\n split_docs = set()\n for candidate_class in candidate_classes:\n split_docs.update(\n cand.document\n for cand in session.query(candidate_class)\n .filter(candidate_class.id.in_(sub_query))\n .all()\n )\n return split_docs", - "docstring": "Return a list of documents that contain the candidates in the split." - }, - { - "code": "def get_reading(self):\n yield self._manager.poll_sensor(self._name)\n raise Return(self._reading)", - "docstring": "Get a fresh sensor reading from the KATCP resource\n\n Returns\n -------\n reply : tornado Future resolving with :class:`KATCPSensorReading` object\n\n Note\n ----\n\n As a side-effect this will update the reading stored in this object, and result in\n registered listeners being called." - }, - { - "code": "def _extract_expressions(node):\n if (\n isinstance(node, nodes.Call)\n and isinstance(node.func, nodes.Name)\n and node.func.name == _TRANSIENT_FUNCTION\n ):\n real_expr = node.args[0]\n real_expr.parent = node.parent\n for name in node.parent._astroid_fields:\n child = getattr(node.parent, name)\n if isinstance(child, (list, tuple)):\n for idx, compound_child in enumerate(child):\n if compound_child is node:\n child[idx] = real_expr\n elif child is node:\n setattr(node.parent, name, real_expr)\n yield real_expr\n else:\n for child in node.get_children():\n yield from _extract_expressions(child)", - "docstring": "Find expressions in a call to _TRANSIENT_FUNCTION and extract them.\n\n The function walks the AST recursively to search for expressions that\n are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an\n expression, it completely removes the function call node from the tree,\n replacing it by the wrapped expression inside the parent.\n\n :param node: An astroid node.\n :type node: astroid.bases.NodeNG\n :yields: The sequence of wrapped expressions on the modified tree\n expression can be found." - }, - { - "code": "def _synopsis(self, container_settings, container_status=''):\r\n settings = {\r\n 'container_status': container_settings['State']['Status'],\r\n 'container_exit': container_settings['State']['ExitCode'],\r\n 'container_ip': container_settings['NetworkSettings']['IPAddress'],\r\n 'image_name': container_settings['Config']['Image'],\r\n 'container_alias': container_settings['Name'].replace('/',''),\r\n 'container_variables': {},\r\n 'mapped_ports': {},\r\n 'mounted_volumes': {},\r\n 'container_networks': []\r\n }\r\n import re\r\n num_pattern = re.compile('\\d+')\r\n if container_settings['NetworkSettings']['Ports']:\r\n for key, value in container_settings['NetworkSettings']['Ports'].items():\r\n if value:\r\n port = num_pattern.findall(value[0]['HostPort'])[0]\r\n settings['mapped_ports'][port] = num_pattern.findall(key)[0]\r\n elif container_settings['HostConfig']['PortBindings']:\r\n for key, value in container_settings['HostConfig']['PortBindings'].items():\r\n port = num_pattern.findall(value[0]['HostPort'])[0]\r\n settings['mapped_ports'][port] = num_pattern.findall(key)[0]\r\n if container_settings['Config']['Env']:\r\n for variable in container_settings['Config']['Env']:\r\n k, v = variable.split('=')\r\n settings['container_variables'][k] = v\r\n for volume in container_settings['Mounts']:\r\n system_path = volume['Source']\r\n container_path = volume['Destination']\r\n settings['mounted_volumes'][system_path] = container_path\r\n if container_settings['NetworkSettings']:\r\n if container_settings['NetworkSettings']['Networks']:\r\n for key in container_settings['NetworkSettings']['Networks'].keys():\r\n settings['container_networks'].append(key)\r\n if settings['container_status'] == 'exited':\r\n if not container_status:\r\n try:\r\n from subprocess import check_output, STDOUT\r\n sys_command = 'docker logs --tail 1 %s' % settings['container_alias']\r\n check_output(sys_command, shell=True, stderr=STDOUT).decode('utf-8')\r\n settings['container_status'] = 'stopped'\r\n except:\r\n pass\r\n else:\r\n settings['container_status'] = container_status\r\n return settings", - "docstring": "a helper method for summarizing container settings" - }, - { - "code": "def prompt_file(prompt, default=None, must_exist=True, is_dir=False, \n show_default=True, prompt_suffix=': ', color=None):\n if must_exist:\n while True:\n r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default,\n prompt_suffix=prompt_suffix, color=color)\n if os.path.exists(r):\n break\n print('This path does not exist.')\n else:\n r = prompt_autocomplete(prompt, path_complete(is_dir), default, show_default=show_default,\n prompt_suffix=prompt_suffix, color=color)\n return r", - "docstring": "Prompt a filename using using glob for autocompetion.\n\n If must_exist is True (default) then you can be sure that the value returned\n is an existing filename or directory name.\n If is_dir is True, this will show only the directories for the completion." - }, - { - "code": "def _get_model_instance(model_cls, data):\n if not isinstance(data, (model_cls, dict)):\n raise TypeError('{0} is not valid type, instance of '\n '{1} or dict required'.format(data, model_cls))\n return model_cls(**data) if isinstance(data, dict) else data", - "docstring": "Convert dict into object of class of passed model.\n\n :param class model_cls:\n :param object data:\n :rtype DomainModel:" - }, - { - "code": "def find_module_egg_path(module_path):\n if module_path.find('.egg') == -1:\n return None\n _module_absolute_path = os.path.abspath(module_path)\n egg_paths = [os.path.relpath(_module_absolute_path, egg_path)\n for egg_path in [egg_path for egg_path in sys.path\n if REGEX_EGG_PACKAGE_PATH.match(egg_path) \\\n and _module_absolute_path.startswith(egg_path)]]\n return None if len(egg_paths) == 0 else egg_paths[0]", - "docstring": "Find the path of the deployed egg package that may contain the\n specified module path.\n\n\n @param module_path: the path of a Python module.\n\n\n @return: the absolute path of the deployed egg package that contains\n the specified module path, or ``None`` if no deployed egg\n package contains this module path." - }, - { - "code": "def run(items, background=None):\n if not background: background = []\n background_bams = []\n paired = vcfutils.get_paired_bams([x[\"align_bam\"] for x in items], items)\n if paired:\n inputs = [paired.tumor_data]\n if paired.normal_bam:\n background = [paired.normal_data]\n background_bams = [paired.normal_bam]\n else:\n assert not background\n inputs, background = shared.find_case_control(items)\n background_bams = [x[\"align_bam\"] for x in background]\n orig_vcf = _run_wham(inputs, background_bams)\n out = []\n for data in inputs:\n if \"sv\" not in data:\n data[\"sv\"] = []\n final_vcf = shared.finalize_sv(orig_vcf, data, items)\n data[\"sv\"].append({\"variantcaller\": \"wham\", \"vrn_file\": final_vcf})\n out.append(data)\n return out", - "docstring": "Detect copy number variations from batched set of samples using WHAM." - }, - { - "code": "def get_last(self, keyword, param=None, default=None):\n return self.get_param(keyword, param, default)[-1]", - "docstring": "Get the parameters for a given keyword, or default if keyword or\n parameter are not present in the configuration.\n\n This finds the last declaration of the given parameter (which is the\n one which takes effect). If no parameter is given, then the entire\n line is treated as the parameter and returned.\n\n Parameters:\n keyword(str): The keyword name, e.g. 'tinker' or 'driftfile'\n param(str): The parameter name, e.g. 'panic' or 'step'. If not\n given, the last definition of that keyword is given.\n\n Returns:\n str or None: The value of the given parameter, or None if not\n found." - }, - { - "code": "def project_interval_backward(self, c_interval):\n return self.src_tm.g_to_c(self.dst_tm.c_to_g(c_interval))", - "docstring": "project c_interval on the destination transcript to the\n source transcript\n\n :param c_interval: an :class:`hgvs.interval.Interval` object on the destination transcript\n :returns: c_interval: an :class:`hgvs.interval.Interval` object on the source transcript" - }, - { - "code": "def _get_info_dir():\n path = os.path.join(tempfile.gettempdir(), \".tensorboard-info\")\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n else:\n os.chmod(path, 0o777)\n return path", - "docstring": "Get path to directory in which to store info files.\n\n The directory returned by this function is \"owned\" by this module. If\n the contents of the directory are modified other than via the public\n functions of this module, subsequent behavior is undefined.\n\n The directory will be created if it does not exist." - }, - { - "code": "def update(self, group_id, name=None, order=None, collapsed=None):\n data = ApiParams()\n data['group'] = group_id\n data['name'] = name\n data['order'] = order\n data['collapsed'] = collapsed\n return self._put('components/groups/%s' % group_id, data=data)['data']", - "docstring": "Update a Component Group\n\n :param int group_id: Component Group ID\n :param str name: Name of the component group\n :param int order: Order of the group\n :param int collapsed: Collapse the group?\n :return: Updated component group data (:class:`dict`)\n\n .. seealso:: https://docs.cachethq.io/reference#put-component-group" - }, - { - "code": "def eformat(format_str,lst,dct,defvalue='-'):\n return vformat(format_str,DefaultList(defvalue,lst),DefaultDict(defvalue,dct))", - "docstring": "Formats a list and a dictionary, manages unkown keys\n\n It works like :meth:`string.Formatter.vformat` except that it accepts a defvalue for not matching keys.\n Defvalue can be a callable that will receive the requested key as argument and return a string\n\n Args:\n format_string (str): Same format string as for :meth:`str.format`\n lst (dict) : the list to format\n dct (dict) : the dict to format\n defvalue (str or callable): the default value to display when the data is not in the dict\n\n Examples:\n\n >>> d = {'count': '32591', 'soft': 'textops'}\n >>> l = ['Eric','Guido']\n >>> eformat('{0} => {soft} : {count} dowloads',l,d)\n 'Eric => textops : 32591 dowloads'\n >>> eformat('{2} => {software} : {count} dowloads',l,d,'N/A')\n 'N/A => N/A : 32591 dowloads'\n >>> eformat('{2} => {software} : {count} dowloads',l,d,lambda k:'unknown_tag_%s' % k)\n 'unknown_tag_2 => unknown_tag_software : 32591 dowloads'" - }, - { - "code": "def pop(self):\n popped = False\n result = None\n current_node = self._first_node\n while not popped:\n next_node = current_node.next()\n next_next_node = next_node.next()\n if not next_next_node:\n self._last_node = current_node\n self._last_node.update_next(None)\n self._size -= 1\n result = next_node.data()\n popped = True\n current_node = next_node\n return result", - "docstring": "Removes the last node from the list" - }, - { - "code": "def _alerter_thread_func(self) -> None:\n self._alert_count = 0\n self._next_alert_time = 0\n while not self._stop_thread:\n if self.terminal_lock.acquire(blocking=False):\n alert_str = self._generate_alert_str()\n new_prompt = self._generate_colored_prompt()\n if alert_str:\n self.async_alert(alert_str, new_prompt)\n new_title = \"Alerts Printed: {}\".format(self._alert_count)\n self.set_window_title(new_title)\n elif new_prompt != self.prompt:\n self.async_update_prompt(new_prompt)\n self.terminal_lock.release()\n time.sleep(0.5)", - "docstring": "Prints alerts and updates the prompt any time the prompt is showing" - }, - { - "code": "def set(self, key, data):\n if isinstance(data, dict):\n self._volatile_data[key] = {k: v for (k, v) in data.items()}\n else:\n self._volatile_data[key] = data", - "docstring": "set arbitrary data by key in volatile memory\n\n :param key: key of the data\n :param data: data to be stored" - }, - { - "code": "def read_plain_byte_array(file_obj, count):\n return [file_obj.read(struct.unpack(b\" int(current_provisioning * 2):\n reads = int(current_provisioning * 2)\n logger.debug(\n '{0} - '\n 'Cannot reach min-provisioned-reads as max scale up '\n 'is 100% of current provisioning'.format(log_tag))\n logger.debug(\n '{0} - Setting min provisioned reads to {1}'.format(\n log_tag, min_provisioned_reads))\n return reads", - "docstring": "Get the minimum number of reads to current_provisioning\n\n :type current_provisioning: int\n :param current_provisioning: Current provisioned reads\n :type min_provisioned_reads: int\n :param min_provisioned_reads: Configured min provisioned reads\n :type log_tag: str\n :param log_tag: Prefix for the log\n :returns: int -- Minimum number of reads" - }, - { - "code": "def get_message(self, metadata=False, asctime=True):\n msg = self.msg if is_string(self.msg) else str(self.msg)\n if self.args:\n try:\n msg = msg % self.args\n except:\n msg += str(self.args)\n if asctime: msg = \"[\" + self.asctime + \"] \" + msg\n if metadata:\n msg += \"\\nCalled by %s at %s:%s\\n\" % (self.func_name, self.pathname, self.lineno)\n return msg", - "docstring": "Return the message after merging any user-supplied arguments with the message.\n\n Args:\n metadata: True if function and module name should be added.\n asctime: True if time string should be added." - }, - { - "code": "def get_value(self, label):\n for (key, value) in self.items:\n if key == label:\n return value", - "docstring": "Get value from a single fully-qualified name" - }, - { - "code": "def parallel_apply(func, arg_iterable, **kwargs):\n max_workers = kwargs.pop('max_workers', None)\n parallel = kwargs.pop('parallel', True)\n parallel_warning = kwargs.pop('parallel_warning', True)\n func_args = kwargs.pop('func_args', ())\n func_pre_args = kwargs.pop('func_pre_args', ())\n func_kwargs = kwargs.pop('func_kwargs', {})\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n if 'leave' not in tqdm_kwargs:\n tqdm_kwargs['leave'] = False\n assert isinstance(func_args, tuple), (\n str(func_args) + ' is type ' + str(type(func_args)))\n assert isinstance(func_pre_args, tuple), (\n str(func_pre_args) + ' is type ' + str(type(func_pre_args)))\n progress = select_tqdm()\n if not parallel:\n if parallel_warning:\n warnings.warn(('parallel_map has parallel=False - turn on '\n 'parallelisation for faster processing'),\n UserWarning)\n return [func(*(func_pre_args + (x,) + func_args), **func_kwargs) for\n x in progress(arg_iterable, **tqdm_kwargs)]\n else:\n pool = concurrent.futures.ProcessPoolExecutor(max_workers=max_workers)\n futures = []\n for element in arg_iterable:\n futures.append(pool.submit(\n func, *(func_pre_args + (element,) + func_args),\n **func_kwargs))\n results = []\n for fut in progress(concurrent.futures.as_completed(futures),\n total=len(arg_iterable), **tqdm_kwargs):\n results.append(fut.result())\n return results", - "docstring": "Apply function to iterable with parallelisation and a tqdm progress bar.\n\n Roughly equivalent to\n\n >>> [func(*func_pre_args, x, *func_args, **func_kwargs) for x in\n arg_iterable]\n\n but will **not** necessarily return results in input order.\n\n Parameters\n ----------\n func: function\n Function to apply to list of args.\n arg_iterable: iterable\n argument to iterate over.\n func_args: tuple, optional\n Additional positional arguments for func.\n func_pre_args: tuple, optional\n Positional arguments to place before the iterable argument in func.\n func_kwargs: dict, optional\n Additional keyword arguments for func.\n parallel: bool, optional\n To turn off parallelisation if needed.\n parallel_warning: bool, optional\n To turn off warning for no parallelisation if needed.\n max_workers: int or None, optional\n Number of processes.\n If max_workers is None then concurrent.futures.ProcessPoolExecutor\n defaults to using the number of processors of the machine.\n N.B. If max_workers=None and running on supercomputer clusters with\n multiple nodes, this may default to the number of processors on a\n single node.\n\n Returns\n -------\n results_list: list of function outputs" - }, - { - "code": "def size(self):\n old = self.__file.tell()\n self.__file.seek(0, 2)\n n_bytes = self.__file.tell()\n self.__file.seek(old)\n return n_bytes", - "docstring": "Calculate and return the file size in bytes." - }, - { - "code": "def fill_childgoid2obj(childgoid2obj, parent_obj):\n for child_obj in parent_obj.children:\n if child_obj.id not in childgoid2obj:\n childgoid2obj[child_obj.id] = child_obj\n fill_childgoid2obj(childgoid2obj, child_obj)", - "docstring": "Fill childgoid2obj with all child key GO IDs and their objects." - }, - { - "code": "def _append_to(self, field, element):\n if element not in EMPTIES:\n self.obj.setdefault(field, [])\n self.obj.get(field).append(element)", - "docstring": "Append the ``element`` to the ``field`` of the record.\n\n This method is smart: it does nothing if ``element`` is empty and\n creates ``field`` if it does not exit yet.\n\n Args:\n :param field: the name of the field of the record to append to\n :type field: string\n :param element: the element to append" - }, - { - "code": "def has_file(self, name: str):\n return os.path.isfile(self._path / name)", - "docstring": "check whether this directory contains the file." - }, - { - "code": "def cp_parents(files, target_dir: Union[str, Path]):\n if isinstance(files, (str, Path)):\n files = [files]\n files = (Path(f).expanduser() for f in files)\n target_dir = Path(target_dir).expanduser()\n for f in files:\n newpath = target_dir / f.parent\n newpath.mkdir(parents=True, exist_ok=True)\n shutil.copy2(f, newpath)", - "docstring": "This function requires Python >= 3.6.\n\n This acts like bash cp --parents in Python\n inspiration from\n http://stackoverflow.com/questions/15329223/copy-a-file-into-a-directory-with-its-original-leading-directories-appended\n\n example\n source: /tmp/e/f\n dest: /tmp/a/b/c/d/\n result: /tmp/a/b/c/d/tmp/e/f\n\n cp_parents('/tmp/a/b/c/d/boo','/tmp/e/f')\n cp_parents('x/hi','/tmp/e/f/g') --> copies ./x/hi to /tmp/e/f/g/x/hi" - }, - { - "code": "def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6):\n if start_x is None:\n start_x = self._analytical_fitter.fit(self._c)\n return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)", - "docstring": "Optimise value of x using levenberg marquardt" - }, - { - "code": "def full_width_svg(url, width, height, alt_text=None):\n return {\n 'ratio': str((float(height)/float(width))*100)[:2],\n 'url': url,\n 'alt_text': alt_text\n }", - "docstring": "Helper to render an SVG that will size to fill\n its element while keeping its dimentions." - }, - { - "code": "def connectionLost(self, reason=connectionDone):\n self._failed = reason\n pending, self._pending = self._pending, None\n for d in pending.values():\n d.errback(reason)", - "docstring": "Mark the protocol as failed and fail all pending operations." - }, - { - "code": "def extract_code(end_mark, current_str, str_array, line_num):\n if end_mark not in current_str:\n reached_end = False\n line_num += 1\n while reached_end is False:\n next_line = str_array[line_num]\n if end_mark in next_line:\n reached_end = True\n else:\n line_num += 1\n current_str += next_line\n clean_str = current_str.split(end_mark)[0]\n return {'current_str': clean_str, 'line_num': line_num}", - "docstring": "Extract a multi-line string from a string array, up to a specified end marker.\n\n Args:\n end_mark (str): The end mark string to match for.\n current_str (str): The first line of the string array.\n str_array (list): An array of strings (lines).\n line_num (int): The current offset into the array.\n\n Returns:\n Extended string up to line with end marker." - }, - { - "code": "def _merge_ranges(self, plots, xspecs, yspecs):\n plot_ranges = {}\n for plot in plots:\n if plot is None:\n continue\n if hasattr(plot, 'x_range') and plot.x_range.tags and xspecs is not None:\n if match_dim_specs(plot.x_range.tags[0], xspecs):\n plot_ranges['x_range'] = plot.x_range\n if match_dim_specs(plot.x_range.tags[0], yspecs):\n plot_ranges['y_range'] = plot.x_range\n if hasattr(plot, 'y_range') and plot.y_range.tags and yspecs is not None:\n if match_dim_specs(plot.y_range.tags[0], yspecs):\n plot_ranges['y_range'] = plot.y_range\n if match_dim_specs(plot.y_range.tags[0], xspecs):\n plot_ranges['x_range'] = plot.y_range\n return plot_ranges", - "docstring": "Given a list of other plots return axes that are shared\n with another plot by matching the dimensions specs stored\n as tags on the dimensions." - }, - { - "code": "def convert_namespaces_ast(\n ast,\n api_url: str = None,\n namespace_targets: Mapping[str, List[str]] = None,\n canonicalize: bool = False,\n decanonicalize: bool = False,\n):\n if isinstance(ast, NSArg):\n given_term_id = \"{}:{}\".format(ast.namespace, ast.value)\n if (canonicalize and not ast.canonical) or (\n decanonicalize and not ast.decanonical\n ):\n normalized_term = convert_nsarg(\n given_term_id,\n api_url=api_url,\n namespace_targets=namespace_targets,\n canonicalize=canonicalize,\n decanonicalize=decanonicalize,\n )\n if canonicalize:\n ast.canonical = normalized_term\n elif decanonicalize:\n ast.decanonical = normalized_term\n if canonicalize:\n ns, value = ast.canonical.split(\":\")\n ast.change_nsvalue(ns, value)\n elif decanonicalize:\n ns, value = ast.canonical.split(\":\")\n ast.change_nsvalue(ns, value)\n if hasattr(ast, \"args\"):\n for arg in ast.args:\n convert_namespaces_ast(\n arg,\n api_url=api_url,\n namespace_targets=namespace_targets,\n canonicalize=canonicalize,\n decanonicalize=decanonicalize,\n )\n return ast", - "docstring": "Recursively convert namespaces of BEL Entities in BEL AST using API endpoint\n\n Canonicalization and decanonicalization is determined by endpoint used and namespace_targets.\n\n Args:\n ast (BEL): BEL AST\n api_url (str): endpoint url with a placeholder for the term_id (either /terms//canonicalized or /terms//decanonicalized)\n namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities\n\n Returns:\n BEL: BEL AST" - }, - { - "code": "def sync(self):\r\n if self._customFormat:\r\n self._customFormat.save(self.fileName())\r\n else:\r\n super(XSettings, self).sync()", - "docstring": "Syncs the information for this settings out to the file system." - }, - { - "code": "def assert_condition_md5(self):\n if 'Content-MD5' in self.request.headers:\n body_md5 = hashlib.md5(self.request.body).hexdigest()\n if body_md5 != self.request.headers['Content-MD5']:\n raise_400(self, msg='Invalid Content-MD5 request header.')", - "docstring": "If the ``Content-MD5`` request header is present in the request\n it's verified against the MD5 hash of the request body. If they don't\n match, a 400 HTTP response is returned.\n\n :raises: :class:`webob.exceptions.ResponseException` of status 400 if\n the MD5 hash does not match the body." - }, - { - "code": "def finish(self):\n assert (\n lib.BrotliDecoderHasMoreOutput(self._decoder) == lib.BROTLI_FALSE\n )\n if lib.BrotliDecoderIsFinished(self._decoder) == lib.BROTLI_FALSE:\n raise Error(\"Decompression error: incomplete compressed stream.\")\n return b''", - "docstring": "Finish the decompressor. As the decompressor decompresses eagerly, this\n will never actually emit any data. However, it will potentially throw\n errors if a truncated or damaged data stream has been used.\n\n Note that, once this method is called, the decompressor is no longer\n safe for further use and must be thrown away." - }, - { - "code": "def push(self):\n self.github_repo.create_and_push()\n self._repo = self.github_repo.repo\n return self._repo", - "docstring": "create a github repo and push the local repo into it" - }, - { - "code": "def _disconnect(cls):\n post_save.disconnect(\n notify_items, sender=cls,\n dispatch_uid='knocker_{0}'.format(cls.__name__)\n )", - "docstring": "Disconnect signal from current model" - }, - { - "code": "def get_env_key(obj, key=None):\n return str.join('_', [obj.__module__.replace('.','_').upper(),\n key.upper()])", - "docstring": "Return environment variable key to use for lookups within a\n namespace represented by the package name.\n\n For example, any varialbes for predix.security.uaa are stored\n as PREDIX_SECURITY_UAA_KEY" - }, - { - "code": "def _check_open_dir(self, fs, path, info):\n if self.exclude_dirs is not None and fs.match(self.exclude_dirs, info.name):\n return False\n if self.filter_dirs is not None and not fs.match(self.filter_dirs, info.name):\n return False\n return self.check_open_dir(fs, path, info)", - "docstring": "Check if a directory should be considered in the walk." - }, - { - "code": "def upload_file_and_send_file_offer(self, file_name, user_id, data=None, input_file_path=None,\n content_type='application/octet-stream', auto_open=False,\n prevent_share=False, scope='content/send'):\n if input_file_path:\n with open(input_file_path, 'rb') as f:\n data = f.read()\n if not data:\n raise ValueError('Either the data of a file or the path to a file must be provided')\n params = {\n 'fileName': file_name,\n 'userId': user_id,\n 'autoOpen': 'true' if auto_open else 'false',\n 'preventShare': 'true' if prevent_share else 'false',\n }\n return _post(\n token=self.oauth.get_app_token(scope),\n uri='/user/media/file/send?' + urllib.urlencode(params),\n data=data,\n content_type=content_type\n )", - "docstring": "Upload a file of any type to store and return a FileId once file offer has been sent.\n No user authentication required" - }, - { - "code": "def field_mapping_help():\n message = m.Message()\n message.add(m.Brand())\n message.add(heading())\n message.add(content())\n return message", - "docstring": "Help message for field mapping Dialog.\n\n .. versionadded:: 4.1.0\n\n :returns: A message object containing helpful information.\n :rtype: messaging.message.Message" - }, - { - "code": "def evaluate(ref_patterns, est_patterns, **kwargs):\n scores = collections.OrderedDict()\n scores['F'], scores['P'], scores['R'] = \\\n util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs)\n scores['F_est'], scores['P_est'], scores['R_est'] = \\\n util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns,\n **kwargs)\n kwargs['thresh'] = .5\n scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \\\n util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,\n **kwargs)\n kwargs['thresh'] = .75\n scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \\\n util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,\n **kwargs)\n scores['F_3'], scores['P_3'], scores['R_3'] = \\\n util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns,\n **kwargs)\n if 'n' not in kwargs:\n kwargs['n'] = 5\n scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns,\n est_patterns, **kwargs)\n scores['FFTP_est'] = \\\n util.filter_kwargs(first_n_target_proportion_R, ref_patterns,\n est_patterns, **kwargs)\n return scores", - "docstring": "Load data and perform the evaluation.\n\n Examples\n --------\n >>> ref_patterns = mir_eval.io.load_patterns(\"ref_pattern.txt\")\n >>> est_patterns = mir_eval.io.load_patterns(\"est_pattern.txt\")\n >>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)\n\n Parameters\n ----------\n ref_patterns : list\n The reference patterns in the format returned by\n :func:`mir_eval.io.load_patterns()`\n est_patterns : list\n The estimated patterns in the same format\n kwargs\n Additional keyword arguments which will be passed to the\n appropriate metric or preprocessing functions.\n\n Returns\n -------\n scores : dict\n Dictionary of scores, where the key is the metric name (str) and\n the value is the (float) score achieved." - }, - { - "code": "def push(self, patch=None, toa=None, meta=None):\n if not meta:\n meta = {}\n if not toa:\n toa = time.mktime(datetime.datetime.now().timetuple())\n if not isinstance(toa, int):\n toa = int(toa)\n if isinstance(patch, dict):\n patch = self.revisions._dictionary_to_cursor(patch)\n action = None\n if isinstance(patch, type(None)):\n action = self.DELETE_ACTION\n elif self.master_id and isinstance(patch, dict):\n action = self.UPDATE_ACTION\n patch = self.__make_patch_storeable(patch)\n yield self._lazy_migration(meta=copy.deepcopy(meta), toa=toa-1)\n elif not self.master_id and isinstance(patch, dict):\n action = self.INSERT_ACTION\n patch[\"_id\"] = ObjectId()\n self.master_id = patch[\"_id\"].__str__()\n elif not action:\n raise RevisionActionNotValid()\n if patch and patch.get(\"_id\"):\n del patch[\"_id\"]\n change = {\n \"toa\": toa,\n \"processed\": False,\n \"collection\": self.collection_name,\n \"master_id\": self.master_id,\n \"action\": action,\n \"patch\" : None if action == self.DELETE_ACTION else self.collection._dictionary_to_cursor(patch),\n \"meta\": meta\n }\n jsonschema.validate(change, self.SCHEMA)\n id = yield self.revisions.insert(change)\n raise Return(id)", - "docstring": "Push a change on to the revision stack for this ObjectId. Pushing onto the stack is how you\n get revisions to be staged and scheduled for some future time.\n\n :param dict patch: None Denotes Delete\n :param int toa: Time of action\n :param dict meta: The meta data for this action" - }, - { - "code": "def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):\n assert 0 <= t0 <= 1 and 0 <= t1 <= 1\n if _quad_available:\n return quad(lambda tau: abs(self.derivative(tau)), t0, t1,\n epsabs=error, limit=1000)[0]\n else:\n return segment_length(self, t0, t1, self.point(t0), self.point(t1),\n error, min_depth, 0)", - "docstring": "The length of an elliptical large_arc segment requires numerical\n integration, and in that case it's simpler to just do a geometric\n approximation, as for cubic bezier curves." - }, - { - "code": "def get_by_id(self, id_networkv4):\n uri = 'api/networkv4/%s/' % id_networkv4\n return super(ApiNetworkIPv4, self).get(uri)", - "docstring": "Get IPv4 network\n\n :param id_networkv4: ID for NetworkIPv4\n\n :return: IPv4 Network" - }, - { - "code": "def day_fraction(time):\n hour = int(time.split(\":\")[0])\n minute = int(time.split(\":\")[1])\n return hour/24 + minute/1440", - "docstring": "Convert a 24-hour time to a fraction of a day.\n\n For example, midnight corresponds to 0.0, and noon to 0.5.\n\n :param time: Time in the form of 'HH:MM' (24-hour time)\n :type time: string\n\n :return: A day fraction\n :rtype: float\n\n :Examples:\n\n .. code-block:: python\n\n day_fraction(\"18:30\")" - }, - { - "code": "def format(self, version=None, wipe=None):\n return super(Type3Tag, self).format(version, wipe)", - "docstring": "Format and blank an NFC Forum Type 3 Tag.\n\n A generic NFC Forum Type 3 Tag can be (re)formatted if it is\n in either one of blank, initialized or readwrite state. By\n formatting, all contents of the attribute information block is\n overwritten with values determined. The number of user data\n blocks is determined by reading all memory until an error\n response. Similarily, the maximum number of data block that\n can be read or written with a single command is determined by\n sending successively increased read and write commands. The\n current data length is set to zero. The NDEF mapping version\n is set to the latest known version number (1.0), unless the\n *version* argument is provided and it's major version number\n corresponds to one of the known major version numbers.\n\n By default, no data other than the attribute block is\n modified. To overwrite user data the *wipe* argument must be\n set to an integer value. The lower 8 bits of that value are\n written to all data bytes that follow the attribute block." - }, - { - "code": "def _audio_item(self, stream_url=None, offset=0, push_buffer=True, opaque_token=None):\n audio_item = {'stream': {}}\n stream = audio_item['stream']\n if not stream_url:\n stream['url'] = current_stream.url\n stream['token'] = current_stream.token\n stream['offsetInMilliseconds'] = current_stream.offsetInMilliseconds\n else:\n stream['url'] = stream_url\n stream['token'] = opaque_token or str(uuid.uuid4())\n stream['offsetInMilliseconds'] = offset\n if push_buffer:\n push_stream(stream_cache, context['System']['user']['userId'], stream)\n return audio_item", - "docstring": "Builds an AudioPlayer Directive's audioItem and updates current_stream" - }, - { - "code": "def getAngle(self, mode='deg'):\n if self.refresh is True:\n self.getMatrix()\n try:\n if self.mflag:\n if mode == 'deg':\n return self.bangle / np.pi * 180\n else:\n return self.bangle\n else:\n return 0\n except AttributeError:\n print(\"Please execute getMatrix() first.\")", - "docstring": "return bend angle\n\n :param mode: 'deg' or 'rad'\n :return: deflecting angle in RAD" - }, - { - "code": "def start_event(self, event_type, *args, dt=1/60):\n if not any(self.__yield_handlers(event_type)):\n return\n def on_time_interval(dt):\n self.dispatch_event(event_type, *args, dt)\n pyglet.clock.schedule_interval(on_time_interval, dt)\n self.__timers[event_type] = on_time_interval", - "docstring": "Begin dispatching the given event at the given frequency.\n\n Calling this method will cause an event of type *event_type* with \n arguments *args* to be dispatched every *dt* seconds. This will \n continue until `stop_event()` is called for the same event.\n\n These continuously firing events are useful if, for example, you want \n to make a button that scrolls for as long as it's being held." - }, - { - "code": "def rename_notes_folder(self, title, folderid):\n if self.standard_grant_type is not \"authorization_code\":\n raise DeviantartError(\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\")\n response = self._req('/notes/folders/rename/{}'.format(folderid), post_data={\n 'title' : title\n })\n return response", - "docstring": "Rename a folder\n\n :param title: New title of the folder\n :param folderid: The UUID of the folder to rename" - }, - { - "code": "def exit_after(s):\n def outer(fn):\n def inner(*args, **kwargs):\n timer = threading.Timer(s, thread.interrupt_main)\n timer.start()\n try:\n result = fn(*args, **kwargs)\n except KeyboardInterrupt:\n raise TimeoutError(\"Function '{}' hit the timeout ({}s).\".format(fn.__name__, s))\n finally:\n timer.cancel()\n return result\n return inner\n return outer", - "docstring": "Use as decorator to exit process if\n function takes longer than s seconds.\n\n Direct call is available via exit_after(TIMEOUT_IN_S)(fce)(args).\n\n Inspired by https://stackoverflow.com/a/31667005" - }, - { - "code": "def register(self, extensions):\n for ext in reversed(extensions):\n for name in ext.names:\n try:\n self._extensions[name].appendleft(ext)\n except KeyError:\n self._extensions[name] = deque([ext])", - "docstring": "Registers extensions." - }, - { - "code": "def exhaustive_label_check( self,\n ontology:pd.DataFrame,\n label_predicate='rdfs:label',\n diff:bool=True, ) -> Tuple[list]:\n inside, outside = [], []\n header = ['Index'] + list(ontology.columns)\n for row in ontology.itertuples():\n row = {header[i]:val for i, val in enumerate(row)}\n label_obj = row[label_predicate]\n if isinstance(label_obj, list):\n if len(label_obj) != 1:\n exit('Need to have only 1 label in the cell from the onotology.')\n else:\n label_obj = label_obj[0]\n entity_label = self.local_degrade(label_obj)\n ilx_rows = self.label2rows.get(entity_label)\n if ilx_rows:\n inside.append({\n 'external_ontology_row': row,\n 'ilx_rows': ilx_rows,\n })\n else:\n outside.append(row)\n if diff:\n diff = self.__exhaustive_diff(inside)\n return inside, outside, diff\n return inside, outside", - "docstring": "All entities with conflicting labels gets a full diff\n\n Args:\n ontology: pandas DataFrame created from an ontology where the colnames are predicates\n and if classes exist it is also thrown into a the colnames.\n label_predicate: usually in qname form and is the colname of the DataFrame for the label\n diff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2\n Returns:\n inside: entities that are inside of InterLex\n outside: entities NOT in InterLex\n diff (optional): List[List[dict]]... so complicated but usefull diff between matches only" - }, - { - "code": "def get(self, copy=False):\n array = getattr(self.owner, self.name)\n if copy:\n return array.copy()\n else:\n return array", - "docstring": "Return the value of the attribute" - }, - { - "code": "def collect(self):\n cmd = [self.config['bin'], \"list\"]\n if str_to_bool(self.config['reset']):\n cmd.append(\"reset\")\n if str_to_bool(self.config['use_sudo']):\n cmd.insert(0, self.config['sudo_cmd'])\n matcher = re.compile(\"{ pkts = (.*), bytes = (.*) } = (.*);\")\n lines = Popen(cmd, stdout=PIPE).communicate()[0].strip().splitlines()\n for line in lines:\n matches = re.match(matcher, line)\n if matches:\n num_packets = int(matches.group(1))\n num_bytes = int(matches.group(2))\n name = matches.group(3)\n self.publish(name + \".pkts\", num_packets)\n self.publish(name + \".bytes\", num_bytes)", - "docstring": "Collect and publish netfilter counters" - }, - { - "code": "def pool_list_volumes(name, **kwargs):\n conn = __get_conn(**kwargs)\n try:\n pool = conn.storagePoolLookupByName(name)\n return pool.listVolumes()\n finally:\n conn.close()", - "docstring": "List the volumes contained in a defined libvirt storage pool.\n\n :param name: libvirt storage pool name\n :param connection: libvirt connection URI, overriding defaults\n :param username: username to connect with, overriding defaults\n :param password: password to connect with, overriding defaults\n\n .. versionadded:: 2019.2.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt \"*\" virt.pool_list_volumes " - }, - { - "code": "def Evaluate(self, client_obj):\n if self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ALL:\n quantifier = all\n elif self.match_mode == ForemanClientRuleSet.MatchMode.MATCH_ANY:\n quantifier = any\n else:\n raise ValueError(\"Unexpected match mode value: %s\" % self.match_mode)\n return quantifier(rule.Evaluate(client_obj) for rule in self.rules)", - "docstring": "Evaluates rules held in the rule set.\n\n Args:\n client_obj: Either an aff4 client object or a client_info dict as returned\n by ReadFullInfoClient if the relational db is used for reading.\n\n Returns:\n A bool value of the evaluation.\n\n Raises:\n ValueError: The match mode is of unknown value." - }, - { - "code": "def load_meta_data(self, path=None):\n if not path:\n path = self.state.file_system_path\n if path is None:\n self.meta = Vividict({})\n return False\n path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA)\n if not os.path.exists(path_meta_data):\n logger.debug(\"Because meta data was not found in {0} use backup option {1}\"\n \"\".format(path_meta_data, os.path.join(path, storage.FILE_NAME_META_DATA_OLD)))\n path_meta_data = os.path.join(path, storage.FILE_NAME_META_DATA_OLD)\n try:\n tmp_meta = storage.load_data_file(path_meta_data)\n except ValueError as e:\n if not path.startswith(constants.RAFCON_TEMP_PATH_STORAGE) and not os.path.exists(os.path.dirname(path)):\n logger.debug(\"Because '{1}' meta data of {0} was not loaded properly.\".format(self, e))\n tmp_meta = {}\n tmp_meta = Vividict(tmp_meta)\n if tmp_meta:\n self._parse_for_element_meta_data(tmp_meta)\n self.meta = tmp_meta\n self.meta_signal.emit(MetaSignalMsg(\"load_meta_data\", \"all\", True))\n return True\n else:\n return False", - "docstring": "Load meta data of state model from the file system\n\n The meta data of the state model is loaded from the file system and stored in the meta property of the model.\n Existing meta data is removed. Also the meta data of all state elements (data ports, outcomes,\n etc) are loaded, as those stored in the same file as the meta data of the state.\n\n This is either called on the __init__ of a new state model or if a state model for a container state is created,\n which then calls load_meta_data for all its children.\n\n :param str path: Optional file system path to the meta data file. If not given, the path will be derived from\n the state's path on the filesystem\n :return: if meta data file was loaded True otherwise False\n :rtype: bool" - }, - { - "code": "def fast_median(a):\n a = checkma(a)\n if a.count() > 0:\n out = np.percentile(a.compressed(), 50)\n else:\n out = np.ma.masked\n return out", - "docstring": "Fast median operation for masked array using 50th-percentile" - }, - { - "code": "def list_workspaces(self,\n page_limit=None,\n include_count=None,\n sort=None,\n cursor=None,\n include_audit=None,\n **kwargs):\n headers = {}\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n sdk_headers = get_sdk_headers('conversation', 'V1', 'list_workspaces')\n headers.update(sdk_headers)\n params = {\n 'version': self.version,\n 'page_limit': page_limit,\n 'include_count': include_count,\n 'sort': sort,\n 'cursor': cursor,\n 'include_audit': include_audit\n }\n url = '/v1/workspaces'\n response = self.request(\n method='GET',\n url=url,\n headers=headers,\n params=params,\n accept_json=True)\n return response", - "docstring": "List workspaces.\n\n List the workspaces associated with a Watson Assistant service instance.\n This operation is limited to 500 requests per 30 minutes. For more information,\n see **Rate limiting**.\n\n :param int page_limit: The number of records to return in each page of results.\n :param bool include_count: Whether to include information about the number of\n records returned.\n :param str sort: The attribute by which returned workspaces will be sorted. To\n reverse the sort order, prefix the value with a minus sign (`-`).\n :param str cursor: A token identifying the page of results to retrieve.\n :param bool include_audit: Whether to include the audit properties (`created` and\n `updated` timestamps) in the response.\n :param dict headers: A `dict` containing the request headers\n :return: A `DetailedResponse` containing the result, headers and HTTP status code.\n :rtype: DetailedResponse" - }, - { - "code": "def token(self, token_address: Address) -> Token:\n if not is_binary_address(token_address):\n raise ValueError('token_address must be a valid address')\n with self._token_creation_lock:\n if token_address not in self.address_to_token:\n self.address_to_token[token_address] = Token(\n jsonrpc_client=self.client,\n token_address=token_address,\n contract_manager=self.contract_manager,\n )\n return self.address_to_token[token_address]", - "docstring": "Return a proxy to interact with a token." - }, - { - "code": "def process_memberdocs(self, docs, codeEl, add=True):\n remainingdocs = []\n expandeddocs = []\n for doc in docs:\n if isinstance(doc, DocGroup):\n kids = self._process_docgroup(doc, codeEl, add)\n expandeddocs.extend(kids)\n else:\n expandeddocs.append(doc)\n for doc in expandeddocs:\n if not self._process_docstrings(doc, codeEl.members, add):\n remainingdocs.append(doc)\n return remainingdocs", - "docstring": "Associates member type DocElements with their corresponding members\n in the specified code element. The element must have a dictionary of\n members already." - }, - { - "code": "def changelog():\n versions = [x for x in git.tags() if versioning.is_valid(x[1:])]\n cmd = 'git log --format=%H'\n if versions:\n cmd += ' {}..HEAD'.format(versions[-1])\n hashes = shell.run(cmd, capture=True).stdout.strip().splitlines()\n commits = [git.CommitDetails.get(h) for h in hashes]\n tags = conf.get('changelog.tags', [\n {'header': 'Features', 'tag': 'feature'},\n {'header': 'Changes', 'tag': 'change'},\n {'header': 'Fixes', 'tag': 'fix'},\n ])\n results = OrderedDict((\n (x['header'], []) for x in tags\n ))\n for commit in commits:\n commit_items = extract_changelog_items(commit.desc, tags)\n for header, items in commit_items.items():\n results[header] += items\n lines = [\n '<35>v{}<0>'.format(versioning.current()),\n '',\n ]\n for header, items in results.items():\n if items:\n lines += [\n '',\n '<32>{}<0>'.format(header),\n '<32>{}<0>'.format('-' * len(header)),\n '',\n ]\n for item_text in items:\n item_lines = textwrap.wrap(item_text, 77)\n lines += ['- {}'.format('\\n '.join(item_lines))]\n lines += ['']\n return '\\n'.join(lines)", - "docstring": "Print change log since last release." - }, - { - "code": "def dispatch(self, request, *args, **kwargs):\n self.request = request\n self.args = args\n self.kwargs = kwargs\n response = self.check_permissions(request)\n if response:\n return response\n return super().dispatch(request, *args, **kwargs)", - "docstring": "Dispatches an incoming request." - }, - { - "code": "def _process_parse_dates_argument(parse_dates):\n if parse_dates is True or parse_dates is None or parse_dates is False:\n parse_dates = []\n elif not hasattr(parse_dates, '__iter__'):\n parse_dates = [parse_dates]\n return parse_dates", - "docstring": "Process parse_dates argument for read_sql functions" - }, - { - "code": "def visit_Name(self, node):\n if isinstance(node.ctx, ast.Store):\n self.result[node.id] = True", - "docstring": "Stored variable have new value." - }, - { - "code": "def to(self, jid: str):\n if jid is not None and not isinstance(jid, str):\n raise TypeError(\"'to' MUST be a string\")\n self._to = aioxmpp.JID.fromstr(jid) if jid is not None else None", - "docstring": "Set jid of the receiver.\n\n Args:\n jid (str): the jid of the receiver." - }, - { - "code": "def is_vert_aligned(c):\n return all(\n [\n _to_span(c[i]).sentence.is_visual()\n and bbox_vert_aligned(\n bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))\n )\n for i in range(len(c))\n ]\n )", - "docstring": "Return true if all the components of c are vertically aligned.\n\n Vertical alignment means that the bounding boxes of each Mention of c\n shares a similar x-axis value in the visual rendering of the document.\n\n :param c: The candidate to evaluate\n :rtype: boolean" - }, - { - "code": "def find_pulls(self, testpulls=None):\n result = {}\n for lname, repo in self.repositories.items():\n if lname not in self.archive:\n raise ValueError(\"Trying to find pull requests for a repository \"\n \"that hasn't been installed. Use server.install().\")\n if self.runnable is not None and lname not in self.runnable:\n continue\n pulls = testpulls if testpulls is not None else repo.repo.get_pulls(\"open\")\n result[lname] = []\n for pull in pulls:\n newpull = True\n if pull.snumber in self.archive[lname]:\n if self.archive[lname][pull.snumber][\"completed\"] == True:\n newpull = False\n if newpull:\n result[lname].append(PullRequest(self, repo, pull, testpulls is not None))\n return result", - "docstring": "Finds a list of new pull requests that need to be processed.\n\n :arg testpulls: a list of tserver.FakePull instances so we can test the code\n functionality without making live requests to github." - }, - { - "code": "def handle(self, *args, **options):\n self.db = options.get(\"database\", DEFAULT_DB_ALIAS)\n self.current_name = connections[self.db].settings_dict[\"NAME\"]\n self.compare_name = options.get(\"db_name\")\n self.lines = options.get(\"lines\")\n self.ignore = int(options.get('ignore'))\n if not self.compare_name:\n self.compare_name = \"%s_compare\" % self.current_name\n command = NASHVEGAS.get(\"dumpdb\", \"pg_dump -s {dbname}\")\n print \"Getting schema for current database...\"\n current_sql = Popen(\n command.format(dbname=self.current_name),\n shell=True,\n stdout=PIPE\n ).stdout.readlines()\n print \"Getting schema for fresh database...\"\n self.setup_database()\n connections[self.db].close()\n connections[self.db].settings_dict[\"NAME\"] = self.compare_name\n try:\n call_command(\"syncdb\", interactive=False, verbosity=0, migrations=False)\n new_sql = Popen(\n command.format(dbname=self.compare_name).split(),\n stdout=PIPE\n ).stdout.readlines()\n finally:\n connections[self.db].close()\n connections[self.db].settings_dict[\"NAME\"] = self.current_name\n self.teardown_database()\n print \"Outputing diff between the two...\"\n print \"\".join(difflib.unified_diff(normalize_sql(current_sql, self.ignore),\n normalize_sql(new_sql, self.ignore),\n n=int(self.lines)))", - "docstring": "Compares current database with a migrations.\n \n Creates a temporary database, applies all the migrations to it, and\n then dumps the schema from both current and temporary, diffs them,\n then report the diffs to the user." - }, - { - "code": "def admin_tools_render_menu_css(context, menu=None):\n if menu is None:\n menu = get_admin_menu(context)\n context.update({\n 'template': 'admin_tools/menu/css.html',\n 'css_files': menu.Media.css,\n })\n return context", - "docstring": "Template tag that renders the menu css files,, it takes an optional\n ``Menu`` instance as unique argument, if not given, the menu will be\n retrieved with the ``get_admin_menu`` function." - }, - { - "code": "def _get_bb_addr_from_instr(self, instr):\n current_method = self.state.addr.method\n try:\n bb = current_method.block_by_label[instr]\n except KeyError:\n l.error(\"Possible jump to a non-existing bb %s --> %d\",\n self.state.addr, instr)\n raise IncorrectLocationException()\n return SootAddressDescriptor(current_method, bb.idx, 0)", - "docstring": "Returns the address of the methods basic block that contains the given\n instruction.\n\n :param instr: The index of the instruction (within the current method).\n :rtype: SootAddressDescriptor" - }, - { - "code": "def show(self):\n try:\n if platform.system().lower().startswith('darwin'):\n subprocess.call(['open', self.pdf])\n elif os.name == 'nt':\n os.startfile(self.pdf)\n elif os.name == 'posix':\n subprocess.call(['xdg-open', self.pdf])\n else:\n raise IOError(\"\")\n except IOError:\n log.info(\"Unable to open the pdf. Try opening it manually:\")\n log.info(self.pdf)", - "docstring": "Show the overfitting PDF summary." - }, - { - "code": "def push(self, item, *, index=None):\n if index is None:\n self.__list.append(item)\n else:\n self.__list.insert(index, item)\n name = getattr(item, 'name', None)\n if name is not None:\n self.__dict[name] = item", - "docstring": "Push item to the chain." - }, - { - "code": "def init_driver(client_id):\n profile_path = CHROME_CACHE_PATH + str(client_id)\n if not os.path.exists(profile_path):\n os.makedirs(profile_path)\n chrome_options = [\n 'window-size=' + CHROME_WINDOW_SIZE,\n '--user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/60.0.3112.78 Chrome/60.0.3112.78 Safari/537.36'\n ]\n if CHROME_IS_HEADLESS:\n chrome_options.append('--headless')\n if CHROME_DISABLE_GPU:\n chrome_options.append('--disable-gpu')\n d = WhatsAPIDriver(\n username=client_id, \n profile=profile_path, \n client='chrome', \n chrome_options=chrome_options\n )\n return d", - "docstring": "Initialises a new driver via webwhatsapi module\n \n @param client_id: ID of user client\n @return webwhatsapi object" - }, - { - "code": "def UpdateSet(self, dataset):\n for data in dataset:\n for hypo in self.Values():\n like = self.Likelihood(data, hypo)\n self.Mult(hypo, like)\n return self.Normalize()", - "docstring": "Updates each hypothesis based on the dataset.\n\n This is more efficient than calling Update repeatedly because\n it waits until the end to Normalize.\n\n Modifies the suite directly; if you want to keep the original, make\n a copy.\n\n dataset: a sequence of data\n\n returns: the normalizing constant" - }, - { - "code": "def build(self, connection, grammar):\n for statement in self.to_sql(connection, grammar):\n connection.statement(statement)", - "docstring": "Execute the blueprint against the database.\n\n :param connection: The connection to use\n :type connection: orator.connections.Connection\n\n :param grammar: The grammar to user\n :type grammar: orator.query.grammars.QueryGrammar" - }, - { - "code": "def start(self):\n self._patcher = mock.patch(target=self.target)\n MockClient = self._patcher.start()\n instance = MockClient.return_value\n instance.model.side_effect = mock.Mock(\n side_effect=self.model\n )", - "docstring": "Start the patch" - }, - { - "code": "def _set_value(self, slot_record):\n if slot_record.status == _SlotRecord.FILLED:\n self.filled = True\n self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(\n slot_record)\n self._fill_datetime = slot_record.fill_time\n self._value = slot_record.value", - "docstring": "Sets the value of this slot based on its corresponding _SlotRecord.\n\n Does nothing if the slot has not yet been filled.\n\n Args:\n slot_record: The _SlotRecord containing this Slot's value." - }, - { - "code": "def _build_block_context(template, context):\n if BLOCK_CONTEXT_KEY not in context.render_context:\n context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()\n block_context = context.render_context[BLOCK_CONTEXT_KEY]\n for node in template.nodelist:\n if isinstance(node, ExtendsNode):\n compiled_parent = node.get_parent(context)\n block_context.add_blocks(\n {n.name: n for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)})\n _build_block_context(compiled_parent, context)\n return compiled_parent\n if not isinstance(node, TextNode):\n break", - "docstring": "Populate the block context with BlockNodes from parent templates." - }, - { - "code": "def _node_participation_settings(self):\n try:\n return self.node_participation_settings\n except ObjectDoesNotExist:\n node_participation_settings = NodeParticipationSettings(node=self)\n node_participation_settings.save()\n return node_participation_settings", - "docstring": "Return node_participation_settings record\n or create it if it does not exist\n\n usage:\n node = Node.objects.get(pk=1)\n node.participation_settings" - }, - { - "code": "def returnDepositsWithdrawals(self, start=0, end=2**32-1):\n return self._private('returnDepositsWithdrawals', start=start, end=end)", - "docstring": "Returns your deposit and withdrawal history within a range,\n specified by the \"start\" and \"end\" POST parameters, both of which\n should be given as UNIX timestamps." - }, - { - "code": "def string_to_file(path, input):\n mkdir_p(os.path.dirname(path))\n with codecs.open(path, \"w+\", \"UTF-8\") as file:\n file.write(input)", - "docstring": "Write a file from a given string." - }, - { - "code": "def raw_diff(self):\n udiff_copy = self.copy_iterator()\n if self.__format == 'gitdiff':\n udiff_copy = self._parse_gitdiff(udiff_copy)\n return u''.join(udiff_copy)", - "docstring": "Returns raw string as udiff" - }, - { - "code": "def filenames(self):\n fns = set()\n for fn2var in self._type2filename2variable.values():\n fns.update(fn2var.keys())\n return sorted(fns)", - "docstring": "A list of all handled auxiliary file names.\n\n >>> from hydpy import dummies\n >>> dummies.v2af.filenames\n ['file1', 'file2']" - }, - { - "code": "def get_container_id(self, container_id=None):\n if container_id == None and self.container_id == None:\n bot.exit('You must provide a container_id.')\n container_id = container_id or self.container_id\n return container_id", - "docstring": "a helper function shared between functions that will return a \n container_id. First preference goes to a container_id provided by\n the user at runtime. Second preference goes to the container_id\n instantiated with the client.\n\n Parameters\n ==========\n container_id: image uri to parse (required)" - }, - { - "code": "def AllBalancesZeroOrLess(self):\n for key, fixed8 in self.Balances.items():\n if fixed8.value > 0:\n return False\n return True", - "docstring": "Flag indicating if all balances are 0 or less.\n\n Returns:\n bool: True if all balances are <= 0. False, otherwise." - }, - { - "code": "def connect_db(Repo, database=\":memory:\"):\n Repo.db = sqlite3.connect(database,\n detect_types=sqlite3.PARSE_DECLTYPES)\n return Repo.db", - "docstring": "Connect Repo to a database with path +database+ so all instances can\n interact with the database." - }, - { - "code": "def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):\n return len(get_task_trackers(properties, hadoop_conf_dir, offline))", - "docstring": "Get the number of task trackers in the Hadoop cluster.\n\n All arguments are passed to :func:`get_task_trackers`." - }, - { - "code": "def is_valid(self, field_name, value) -> (bool, object):\n if self.has_field(field_name):\n if self.fields_dict[field_name] == FieldType.KG_ID:\n return True, value\n if self.fields_dict[field_name] == FieldType.NUMBER:\n if isinstance(value, numbers.Number):\n return True, value\n else:\n converted_number = self.parse_number(value)\n return (False, value) if not converted_number else (True, value)\n if self.fields_dict[field_name] == FieldType.STRING:\n if isinstance(value, str):\n return True, value.strip()\n else:\n return True, str(value).strip()\n if self.fields_dict[field_name] == FieldType.DATE:\n valid, d = self.is_date(value)\n if valid:\n return True, d.isoformat()\n else:\n return False, value\n if self.fields_dict[field_name] == FieldType.LOCATION:\n valid, l = self.is_location(value)\n if valid:\n return True, l\n else:\n return False, value\n else:\n print('{} not found in KG Schema'.format(field_name))\n return False, value", - "docstring": "Return true if the value type matches or can be coerced to the defined type in schema, otherwise false.\n If field not defined, return none\n\n Args:\n field_name: str\n value:\n\n Returns: bool, value, where the value may have been coerced to the required type." - }, - { - "code": "def namedb_get_account_tokens(cur, address):\n sql = 'SELECT DISTINCT type FROM accounts WHERE address = ?;'\n args = (address,)\n rows = namedb_query_execute(cur, sql, args)\n ret = []\n for row in rows:\n ret.append(row['type'])\n return ret", - "docstring": "Get an account's tokens\n Returns the list of tokens on success\n Returns None if not found" - }, - { - "code": "def extra_info(port):\n extra_items = []\n if port.manufacturer:\n extra_items.append(\"vendor '{}'\".format(port.manufacturer))\n if port.serial_number:\n extra_items.append(\"serial '{}'\".format(port.serial_number))\n if port.interface:\n extra_items.append(\"intf '{}'\".format(port.interface))\n if extra_items:\n return ' with ' + ' '.join(extra_items)\n return ''", - "docstring": "Collects the serial nunber and manufacturer into a string, if\n the fields are available." - }, - { - "code": "def loadFromFile(self, filename):\n file = open(filename, 'rb')\n try:\n wsdl = self.loadFromStream(file)\n finally:\n file.close()\n return wsdl", - "docstring": "Return a WSDL instance loaded from the given file." - }, - { - "code": "def head(self, uuid):\n url = \"%(base)s/%(uuid)s\" % {\n 'base': self.local_base_url,\n 'uuid': uuid\n }\n return self.core.head(url)", - "docstring": "Get one thread." - }, - { - "code": "def chgroups(name, groups, append=False):\n uinfo = info(name)\n if not uinfo:\n raise CommandExecutionError('User \\'{0}\\' does not exist'.format(name))\n if isinstance(groups, string_types):\n groups = groups.split(',')\n bad_groups = [x for x in groups if salt.utils.stringutils.contains_whitespace(x)]\n if bad_groups:\n raise SaltInvocationError(\n 'Invalid group name(s): {0}'.format(', '.join(bad_groups))\n )\n ugrps = set(list_groups(name))\n desired = set(six.text_type(x) for x in groups if bool(six.text_type(x)))\n primary_group = __salt__['file.gid_to_group'](uinfo['gid'])\n if primary_group:\n desired.add(primary_group)\n if ugrps == desired:\n return True\n for group in desired - ugrps:\n _dscl(\n ['/Groups/{0}'.format(group), 'GroupMembership', name],\n ctype='append'\n )\n if not append:\n for group in ugrps - desired:\n _dscl(\n ['/Groups/{0}'.format(group), 'GroupMembership', name],\n ctype='delete'\n )\n time.sleep(1)\n return set(list_groups(name)) == desired", - "docstring": "Change the groups to which the user belongs. Note that the user's primary\n group does not have to be one of the groups passed, membership in the\n user's primary group is automatically assumed.\n\n groups\n Groups to which the user should belong, can be passed either as a\n python list or a comma-separated string\n\n append\n Instead of removing user from groups not included in the ``groups``\n parameter, just add user to any groups for which they are not members\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' user.chgroups foo wheel,root" - }, - { - "code": "def del_by_idx(tree, idxs):\n if len(idxs) == 0:\n tree['item'] = None\n tree['subtrees'] = []\n else:\n hidx, tidxs = idxs[0], idxs[1:]\n del_by_idx(tree['subtrees'][hidx][1], tidxs)\n if len(tree['subtrees'][hidx][1]['subtrees']) == 0:\n del tree['subtrees'][hidx]", - "docstring": "Delete a key entry based on numerical indexes into subtree lists." - }, - { - "code": "def _kname(obj):\n if isinstance(obj, dict):\n return [obj.get(\"metadata\", {}).get(\"name\", \"\")]\n elif isinstance(obj, (list, tuple)):\n names = []\n for i in obj:\n names.append(i.get(\"metadata\", {}).get(\"name\", \"\"))\n return names\n else:\n return \"Unknown type\"", - "docstring": "Get name or names out of json result from API server" - }, - { - "code": "def add_drop_down(self, col_number, col_label):\n if col_label in ['magic_method_codes', 'magic_method_codes++']:\n self.add_method_drop_down(col_number, col_label)\n if col_label in vocab.possible_vocabularies:\n if col_number not in list(self.choices.keys()):\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n url = 'http://api.earthref.org/MagIC/vocabularies/{}.json'.format(col_label)\n controlled_vocabulary = pd.io.json.read_json(url)\n stripped_list = []\n for item in controlled_vocabulary[col_label][0]:\n try:\n stripped_list.append(str(item['item']))\n except UnicodeEncodeError:\n pass\n if len(stripped_list) > 100:\n dictionary = {}\n for item in stripped_list:\n letter = item[0].upper()\n if letter not in list(dictionary.keys()):\n dictionary[letter] = []\n dictionary[letter].append(item)\n stripped_list = dictionary\n two_tiered = True if isinstance(stripped_list, dict) else False\n self.choices[col_number] = (stripped_list, two_tiered)", - "docstring": "Add a correctly formatted drop-down-menu for given col_label, if required.\n Otherwise do nothing." - }, - { - "code": "def get_plaintext_citations(bibtex):\n parser = BibTexParser()\n parser.customization = convert_to_unicode\n if os.path.isfile(bibtex):\n with open(bibtex) as fh:\n bib_database = bibtexparser.load(fh, parser=parser)\n else:\n bib_database = bibtexparser.loads(bibtex, parser=parser)\n bibentries = [bibentry_as_plaintext(bibentry)\n for bibentry in bib_database.entries]\n return bibentries", - "docstring": "Parse a BibTeX file to get a clean list of plaintext citations.\n\n :param bibtex: Either the path to the BibTeX file or the content of a \\\n BibTeX file.\n :returns: A list of cleaned plaintext citations." - }, - { - "code": "def _is_temporal_problem(exception):\n try:\n return isinstance(exception, (requests.ConnectionError, requests.Timeout))\n except AttributeError:\n return isinstance(exception, requests.ConnectionError)", - "docstring": "Checks if the obtained exception is temporal and if download attempt should be repeated\n\n :param exception: Exception raised during download\n :type exception: Exception\n :return: True if exception is temporal and False otherwise\n :rtype: bool" - }, - { - "code": "def probabilities(self, angles: List[float]) -> np.ndarray:\n if isinstance(angles, list):\n angles = np.array(angles)\n assert angles.shape[0] == 2 * self.steps, \"angles must be 2 * steps\"\n param_prog = self.get_parameterized_program()\n prog = param_prog(angles)\n wf = WavefunctionSimulator().wavefunction(prog)\n wf = wf.amplitudes.reshape((-1, 1))\n probs = np.zeros_like(wf)\n for xx in range(2 ** len(self.qubits)):\n probs[xx] = np.conj(wf[xx]) * wf[xx]\n return probs", - "docstring": "Computes the probability of each state given a particular set of angles.\n\n :param angles: A concatenated list of angles [betas]+[gammas]\n :return: The probabilities of each outcome given those angles." - }, - { - "code": "def get_pulse_s(self):\n try:\n dwelltime = self.ppg.dwelltime.mean\n beam_on = self.ppg.beam_on.mean\n except AttributeError:\n raise AttributeError(\"Missing logged ppg parameter: dwelltime \"+\\\n \"or beam_on\")\n return dwelltime*beam_on/1000.", - "docstring": "Get pulse duration in seconds, for pulsed measurements." - }, - { - "code": "def _merge_DC_to_base(self, X_DC, X_base, no_DC):\n if X_base is not None:\n reg_sol = np.linalg.lstsq(X_DC, X_base)\n if not no_DC:\n if not np.any(np.isclose(reg_sol[1], 0)):\n X_base = np.concatenate((X_DC, X_base), axis=1)\n idx_DC = np.arange(0, X_DC.shape[1])\n else:\n logger.warning('Provided regressors for uninteresting '\n 'time series already include baseline. '\n 'No additional baseline is inserted.')\n idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]\n else:\n idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]\n else:\n X_base = X_DC\n idx_DC = np.arange(0, X_base.shape[1])\n logger.info('You did not provide time series of no interest '\n 'such as DC component. Trivial regressors of'\n ' DC component are included for further modeling.'\n ' The final covariance matrix won''t '\n 'reflect these components.')\n return X_DC, X_base, idx_DC", - "docstring": "Merge DC components X_DC to the baseline time series\n X_base (By baseline, this means any fixed nuisance\n regressors not updated during fitting, including DC\n components and any nuisance regressors provided by\n the user.\n X_DC is always in the first few columns of X_base." - }, - { - "code": "def save_volt(elecs, volt, filename):\n content = np.column_stack((elecs, volt, np.zeros(len(volt))))\n with open(filename, 'w') as fid:\n fid.write('{0}\\n'.format(content.shape[0]))\n with open(filename, 'ab') as fid:\n np.savetxt(fid, np.array(content), fmt='%i %i %f %f')", - "docstring": "Save the values in volt-format." - }, - { - "code": "def _simplify_function(self):\n rd = self.project.analyses.ReachingDefinitions(func=self.function, func_graph=self.graph, observe_all=True)\n simp = self.project.analyses.AILSimplifier(self.function, func_graph=self.graph, reaching_definitions=rd)\n for key in list(self._blocks.keys()):\n old_block = self._blocks[key]\n if old_block in simp.blocks:\n self._blocks[key] = simp.blocks[old_block]\n self._update_graph()", - "docstring": "Simplify the entire function.\n\n :return: None" - }, - { - "code": "def remove_group_roles(request, group, domain=None, project=None):\n client = keystoneclient(request, admin=True)\n roles = client.roles.list(group=group, domain=domain, project=project)\n for role in roles:\n remove_group_role(request, role=role.id, group=group,\n domain=domain, project=project)", - "docstring": "Removes all roles from a group on a domain or project." - }, - { - "code": "def attr_descriptor(action, *names):\n if isinstance(action, AttributeActions):\n action = [action]\n def wrapped(fn):\n if not hasattr(fn, '__jsonapi_action__'):\n fn.__jsonapi_action__ = set()\n fn.__jsonapi_desc_for_attrs__ = set()\n fn.__jsonapi_desc_for_attrs__ |= set(names)\n fn.__jsonapi_action__ |= set(action)\n return fn\n return wrapped", - "docstring": "Wrap a function that allows for getting or setting of an attribute. This\n allows for specific handling of an attribute when it comes to serializing\n and deserializing.\n\n :param action: The AttributeActions that this descriptor performs\n :param names: A list of names of the attributes this references" - }, - { - "code": "def discard_plugin_preset(self):\n if self.has_plugin_preset:\n for name, plugin in list(self._active_plugins.items()):\n if id(plugin) in self._provided_by_preset:\n self.release_plugin(name)\n self._active_preset.deactivate(self)\n self._active_preset = None", - "docstring": "Discard the current active preset. Will release any active plugins that could have come from the old preset." - }, - { - "code": "def traverse_preorder(self, leaves=True, internal=True):\n for node in self.root.traverse_preorder(leaves=leaves, internal=internal):\n yield node", - "docstring": "Perform a preorder traversal of the ``Node`` objects in this ``Tree``\n\n Args:\n ``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n ``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``" - }, - { - "code": "def columns_by_index(self) -> Dict[str, List[Well]]:\n col_dict = self._create_indexed_dictionary(group=2)\n return col_dict", - "docstring": "Accessor function used to navigate through a labware by column name.\n\n With indexing one can treat it as a typical python dictionary.\n To access row A for example,\n simply write: labware.columns_by_index()['1']\n This will output ['A1', 'B1', 'C1', 'D1'...].\n\n :return: Dictionary of Well lists keyed by column name" - }, - { - "code": "def csv_response_from_context(context=None, filename=None, field_names=None, null_string='', eval_python=True):\n filename = filename or context.get('filename') or 'table_download.csv'\n field_names = field_names or context.get('field_names', [])\n if field_names and all(field_names) and all(all(c in (string.letters + string.digits + '_.') for c in s) for s in field_names):\n eval_python=False\n data = context\n if not (isinstance(data, (tuple, list)) and isinstance(data[0], (tuple, list))):\n data = json.loads(data.get('data', {}).get('d3data', '[[]]'))\n if not data or not any(data):\n data = context.get('data', {}).get('cases', [[]])\n if not isinstance(data, (list, tuple)) or not isinstance(data[0], (list, tuple)):\n data = table_generator_from_list_of_instances(data, field_names=field_names, eval_python=eval_python)\n try:\n if len(data) < len(data[0]):\n data = util.transposed_lists(data)\n except TypeError:\n pass\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n writer = csv.writer(response)\n for row in data:\n newrow = []\n for s in row:\n try:\n newrow.append(s.encode('utf-8'))\n except AttributeError:\n newrow.append(s)\n except:\n newrow.append(unicode(s))\n writer.writerow(newrow)\n return response", - "docstring": "Generate the response for a Download CSV button from data within the context dict\n\n The CSV data must be in one of these places/formats:\n\n * context as a list of lists of python values (strings for headers in first list)\n * context['data']['d3data'] as a string in json format (python) for a list of lists of repr(python_value)s\n * context['data']['cases'] as a list of lists of python values (strings for headers in first list)\n * context['data']['cases'] as a django queryset or iterable of model instances (list, tuple, generator)\n\n If the input data is a list of lists (table) that has more columns that rows it will be trasposed before being processed" - }, - { - "code": "def main():\n args = GetArgs()\n if args.password:\n password = args.password\n else:\n password = getpass.getpass(prompt='Enter password for host %s and '\n 'user %s: ' % (args.host,args.user))\n context = None\n if hasattr(ssl, '_create_unverified_context'):\n context = ssl._create_unverified_context()\n si = SmartConnect(host=args.host,\n user=args.user,\n pwd=password,\n port=int(args.port),\n sslContext=context)\n if not si:\n print(\"Could not connect to the specified host using specified \"\n \"username and password\")\n return -1\n atexit.register(Disconnect, si)\n content = si.RetrieveContent()\n for child in content.rootFolder.childEntity:\n if hasattr(child, 'vmFolder'):\n datacenter = child\n vmFolder = datacenter.vmFolder\n vmList = vmFolder.childEntity\n for vm in vmList:\n PrintVmInfo(vm)\n return 0", - "docstring": "Simple command-line program for listing the virtual machines on a system." - }, - { - "code": "def get_files(*bases):\n for base in bases:\n basedir, _ = base.split(\".\", 1)\n base = os.path.join(os.path.dirname(__file__), *base.split(\".\"))\n rem = len(os.path.dirname(base)) + len(basedir) + 2\n for root, dirs, files in os.walk(base):\n for name in files:\n yield os.path.join(basedir, root, name)[rem:]", - "docstring": "List all files in a data directory." - }, - { - "code": "def delete_user_login(self, id, user_id):\r\n path = {}\r\n data = {}\r\n params = {}\r\n path[\"user_id\"] = user_id\r\n path[\"id\"] = id\r\n self.logger.debug(\"DELETE /api/v1/users/{user_id}/logins/{id} with query params: {params} and form data: {data}\".format(params=params, data=data, **path))\r\n return self.generic_request(\"DELETE\", \"/api/v1/users/{user_id}/logins/{id}\".format(**path), data=data, params=params, no_data=True)", - "docstring": "Delete a user login.\r\n\r\n Delete an existing login." - }, - { - "code": "def close(self):\r\n if not self.is_opened():\r\n return\r\n self.__open_status = False\r\n if self.__reading_thread and self.__reading_thread.is_alive():\r\n self.__reading_thread.abort()\r\n if self._input_report_queue:\r\n self._input_report_queue.release_events()\r\n if self.__input_processing_thread and \\\r\n self.__input_processing_thread.is_alive():\r\n self.__input_processing_thread.abort()\r\n if self.ptr_preparsed_data:\r\n ptr_preparsed_data = self.ptr_preparsed_data\r\n self.ptr_preparsed_data = None\r\n hid_dll.HidD_FreePreparsedData(ptr_preparsed_data)\r\n if self.__reading_thread:\r\n self.__reading_thread.join()\r\n if self.hid_handle:\r\n winapi.CloseHandle(self.hid_handle)\r\n if self.__input_processing_thread:\r\n self.__input_processing_thread.join()\r\n button_caps_storage = self.__button_caps_storage\r\n self.__reset_vars()\r\n while button_caps_storage:\r\n item = button_caps_storage.pop()\r\n del item", - "docstring": "Release system resources" - }, - { - "code": "def updateCalibration(self):\n if self.samplerate() != self._calibration_fs:\n self.setCalibration(self._attenuationVector, self._calFrequencies, self._calFrange)", - "docstring": "Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes\n the calibration needs to be recalculated." - }, - { - "code": "def get_sources(zone, permanent=True):\n cmd = '--zone={0} --list-sources'.format(zone)\n if permanent:\n cmd += ' --permanent'\n return __firewall_cmd(cmd).split()", - "docstring": "List sources bound to a zone\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' firewalld.get_sources zone" - }, - { - "code": "def list_tag(self, limit=500, offset=0):\n evt = self._client._request_entity_tag_list(self.__lid, limit=limit, offset=offset)\n self._client._wait_and_except_if_failed(evt)\n return evt.payload['tags']", - "docstring": "List `all` the tags for this Thing\n\n Returns lists of tags, as below\n\n #!python\n [\n \"mytag1\",\n \"mytag2\"\n \"ein_name\",\n \"nochein_name\"\n ]\n\n - OR...\n\n Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)\n containing the error if the infrastructure detects a problem\n\n Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)\n if there is a communications problem between you and the infrastructure\n\n `limit` (optional) (integer) Return at most this many tags\n\n `offset` (optional) (integer) Return tags starting at this offset" - }, - { - "code": "def set_brightness(self, brightness, duration=0, rapid=False):\n color = self.get_color()\n color2 = (color[0], color[1], brightness, color[3])\n try:\n if rapid:\n self.fire_and_forget(LightSetColor, {\"color\": color2, \"duration\": duration}, num_repeats=1)\n else:\n self.req_with_ack(LightSetColor, {\"color\": color2, \"duration\": duration})\n except WorkflowException as e:\n raise", - "docstring": "brightness to set\n duration in ms" - }, - { - "code": "def get_symbol(x):\n hdl = SymbolHandle()\n check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))\n return Symbol(hdl)", - "docstring": "Retrieve recorded computation history as `Symbol`.\n\n Parameters\n ----------\n x : NDArray\n Array representing the head of computation graph.\n\n Returns\n -------\n Symbol\n The retrieved Symbol." - }, - { - "code": "def darklyrics(song):\n if not hasattr(song, 'album') or not song.album:\n song.fetch_album_name()\n if not hasattr(song, 'album') or not song.album:\n return ''\n artist = song.artist.lower()\n artist = normalize(artist, URLESCAPES, '')\n album = song.album.lower()\n album = normalize(album, URLESCAPES, '')\n title = song.title\n url = 'http://www.darklyrics.com/lyrics/{}/{}.html'.format(artist, album)\n soup = get_url(url)\n text = ''\n for header in soup.find_all('h3'):\n song = str(header.get_text())\n next_sibling = header.next_sibling\n if song.lower().find(title.lower()) != -1:\n while next_sibling is not None and\\\n (next_sibling.name is None or next_sibling.name != 'h3'):\n if next_sibling.name is None:\n text += str(next_sibling)\n next_sibling = next_sibling.next_sibling\n return text.strip()", - "docstring": "Returns the lyrics found in darklyrics for the specified mp3 file or an\n empty string if not found." - }, - { - "code": "def _set_fqdn(self):\n results = self._search(\n 'cn=config',\n '(objectClass=*)',\n ['nsslapd-localhost'],\n scope=ldap.SCOPE_BASE\n )\n if not results and type(results) is not list:\n r = None\n else:\n dn, attrs = results[0]\n r = attrs['nsslapd-localhost'][0].decode('utf-8')\n self._fqdn = r\n log.debug('FQDN: %s' % self._fqdn)", - "docstring": "Get FQDN from LDAP" - }, - { - "code": "def parse_datetime(value):\n match = datetime_re.match(value)\n if match:\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n tzinfo = kw.pop('tzinfo')\n if tzinfo == 'Z':\n tzinfo = utc\n elif tzinfo is not None:\n offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0\n offset = 60 * int(tzinfo[1:3]) + offset_mins\n if tzinfo[0] == '-':\n offset = -offset\n tzinfo = get_fixed_timezone(offset)\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n kw['tzinfo'] = tzinfo\n return datetime.datetime(**kw)", - "docstring": "Parses a string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raises ValueError if the input is well formatted but not a valid datetime.\n Returns None if the input isn't well formatted." - }, - { - "code": "def _setup_stats(self):\n self.stats_dict = {}\n redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],\n port=self.settings['REDIS_PORT'],\n db=self.settings.get('REDIS_DB'))\n try:\n redis_conn.info()\n self.logger.debug(\"Connected to Redis in StatsCollector Setup\")\n self.redis_conn = redis_conn\n except ConnectionError:\n self.logger.warn(\"Failed to connect to Redis in StatsCollector\"\n \" Setup, no stats will be collected\")\n return\n if self.settings['STATS_TOTAL']:\n self._setup_stats_total(redis_conn)\n if self.settings['STATS_PLUGINS']:\n self._setup_stats_plugins(redis_conn)", - "docstring": "Sets up the stats collection" - }, - { - "code": "def supports_undefined(self):\n try:\n yes = self(const.UNDEFINED) is not const.UNDEFINED\n except (Invalid, SchemaError):\n yes = False\n self.__dict__['supports_undefined'] = yes\n return yes", - "docstring": "Test whether this schema supports Undefined.\n\n A Schema that supports `Undefined`, when given `Undefined`, should return some value (other than `Undefined`)\n without raising errors.\n\n This is designed to support a very special case like that:\n\n ```python\n Schema(Default(0)).supports_undefined #-> True\n ```\n\n This way a validator can declare that it has a default in case no value was provided,\n and this case happens when:\n\n 1. A [`Required`](#required) mapping key was not provided, and it's mapped to `Default()`\n 2. .. no more supported cases. Yet.\n\n :rtype: bool" - }, - { - "code": "def dispatch(self, frame):\n if frame.type() == HeartbeatFrame.type():\n self.send_heartbeat()\n elif frame.type() == MethodFrame.type():\n if frame.class_id == 10:\n cb = self._method_map.get(frame.method_id)\n if cb:\n method = self.clear_synchronous_cb(cb)\n method(frame)\n else:\n raise Channel.InvalidMethod(\n \"unsupported method %d on channel %d\",\n frame.method_id, self.channel_id)\n else:\n raise Channel.InvalidClass(\n \"class %d is not supported on channel %d\",\n frame.class_id, self.channel_id)\n else:\n raise Frame.InvalidFrameType(\n \"frame type %d is not supported on channel %d\",\n frame.type(), self.channel_id)", - "docstring": "Override the default dispatch since we don't need the rest of\n the stack." - }, - { - "code": "def text_to_data(self, text, elt, ps):\n if self.strip: text = text.strip()\n if self.pyclass is not None:\n return self.pyclass(text.encode(UNICODE_ENCODING))\n return text.encode(UNICODE_ENCODING)", - "docstring": "convert text into typecode specific data.\n Encode all strings as UTF-8, which will be type 'str'\n not 'unicode'" - }, - { - "code": "def _read_isotopedatabase(self, ffname='isotopedatabase.txt'):\n name=self.sldir+ffname\n z_db, a_db, el_db, stable_a_db,logic_db=\\\n np.loadtxt(name,unpack=True,dtype='str')\n z_db=np.array(z_db,dtype='int')\n a_db=np.array(a_db,dtype='int')\n stable_a_db=np.array(stable_a_db,dtype='int')\n charge_from_element_name={}\n for name in self.stable_names:\n if name=='Neutron' or name=='Neut' or name=='NEUT' or name=='N-1':\n name='nn'\n try:\n zz=z_db[np.where(el_db==name)][0]\n charge_from_element_name[name]=zz\n except IndexError:\n print(name+\" does not exist in this run\")\n return z_db, a_db, el_db, stable_a_db,logic_db,charge_from_element_name", - "docstring": "This private method reads the isotopedatabase.txt file in sldir\n run dictory and returns z, a, elements, the cutoff mass for each\n species that delineate beta+ and beta- decay and the logical in\n the last column. Also provides charge_from_element dictionary\n according to isotopedatabase.txt." - }, - { - "code": "def run_on(*, event: str):\n def decorator(callback):\n @functools.wraps(callback)\n def decorator_wrapper():\n RTMClient.on(event=event, callback=callback)\n return decorator_wrapper()\n return decorator", - "docstring": "A decorator to store and link a callback to an event." - }, - { - "code": "def file_name_error(file_name):\n if file_name == '' or file_name[0][0] == '-':\n raise IOError('Input file name not specified.')\n elif not os.path.isfile(file_name):\n raise IOError('Input file name [%s] not found!' % file_name)", - "docstring": "File name error\n\n This method checks if the input file name is valid.\n\n Parameters\n ----------\n file_name : str\n File name string\n\n Raises\n ------\n IOError\n If file name not specified or file not found" - }, - { - "code": "def define_zip_index_for_species(names_ppn_world,\n number_names_ppn_world):\n global cl\n cl={}\n for a,b in zip(names_ppn_world,number_names_ppn_world):\n cl[a] = b", - "docstring": "This just give back cl, that is the original index as it is read from files from a data file." - }, - { - "code": "def forward_substitution(matrix_l, matrix_b):\n q = len(matrix_b)\n matrix_y = [0.0 for _ in range(q)]\n matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0])\n for i in range(1, q):\n matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)])\n matrix_y[i] /= float(matrix_l[i][i])\n return matrix_y", - "docstring": "Forward substitution method for the solution of linear systems.\n\n Solves the equation :math:`Ly = b` using forward substitution method\n where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix.\n\n :param matrix_l: L, lower triangular matrix\n :type matrix_l: list, tuple\n :param matrix_b: b, column matrix\n :type matrix_b: list, tuple\n :return: y, column matrix\n :rtype: list" - }, - { - "code": "def perpendicular_vector(n):\n dim = len(n)\n if dim == 2:\n return n[::-1]\n for ix in range(dim):\n _ = N.zeros(dim)\n _[dim-ix-1] = 1\n v1 = N.cross(n,_)\n if N.linalg.norm(v1) != 0:\n return v1\n raise ValueError(\"Cannot find perpendicular vector\")", - "docstring": "Get a random vector perpendicular\n to the given vector" - }, - { - "code": "def pandoc(script=None,\n input=None,\n output=None,\n args='{input:q} --output {output:q}',\n **kwargs):\n if not executable('pandoc').target_exists():\n raise RuntimeError('pandoc not found')\n input = sos_targets(collect_input(script, input))\n output = sos_targets(output)\n if len(output) == 0:\n write_to_stdout = True\n output = sos_targets(\n tempfile.NamedTemporaryFile(\n mode='w+t', suffix='.html', delete=False).name)\n else:\n write_to_stdout = False\n ret = 1\n try:\n p = None\n cmd = interpolate(f'pandoc {args}', {'input': input, 'output': output})\n if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:\n env.log_to_file('ACTION', f'Running command \"{cmd}\"')\n if env.config['run_mode'] == 'interactive':\n from .utils import pexpect_run\n ret = pexpect_run(cmd)\n else:\n p = subprocess.Popen(cmd, shell=True)\n ret = p.wait()\n except Exception as e:\n env.logger.error(e)\n if ret != 0:\n temp_file = os.path.join('.sos', f'pandoc_{os.getpid()}.md')\n shutil.copyfile(input, temp_file)\n cmd = interpolate(f'pandoc {args}', {\n 'input': sos_targets(temp_file),\n 'output': sos_targets(output)\n })\n raise RuntimeError(\n f'Failed to execute script. Please use command \\n{cmd}\\nunder {os.getcwd()} to test it.'\n )\n if write_to_stdout:\n with open(output[0].fullname()) as out:\n sys.stdout.write(out.read())\n else:\n env.logger.info(f'Report saved to {output}')\n try:\n os.remove(input)\n except Exception:\n pass", - "docstring": "Convert input file to output using pandoc\n\n The input can be specified in three ways:\n\n 1. instant script, which is assumed to be in md format\n\n pandoc: output='report.html'\n script\n\n 2. one or more input files. The format is determined by extension of input file\n\n pandoc(input, output='report.html')\n\n 3. input file specified by command line option `-r` .\n pandoc(output='report.html')\n\n If no output is specified, it is assumed to be in html format\n and is written to standard output.\n\n You can specify more options such as \"from\" and \"to\" by customizing\n the args parameter of the action. The default value of args is\n `{input:q} --output {output:q}'" - }, - { - "code": "def get_device_activity(self, type_p):\n if not isinstance(type_p, list):\n raise TypeError(\"type_p can only be an instance of type list\")\n for a in type_p[:10]:\n if not isinstance(a, DeviceType):\n raise TypeError(\n \"array can only contain objects of type DeviceType\")\n activity = self._call(\"getDeviceActivity\",\n in_p=[type_p])\n activity = [DeviceActivity(a) for a in activity]\n return activity", - "docstring": "Gets the current activity type of given devices or device groups.\n\n in type_p of type :class:`DeviceType`\n\n return activity of type :class:`DeviceActivity`\n\n raises :class:`OleErrorInvalidarg`\n Invalid device type." - }, - { - "code": "def list_stacks(awsclient):\n client_cf = awsclient.get_client('cloudformation')\n response = client_cf.list_stacks(\n StackStatusFilter=[\n 'CREATE_IN_PROGRESS', 'CREATE_COMPLETE', 'ROLLBACK_IN_PROGRESS',\n 'ROLLBACK_COMPLETE', 'DELETE_IN_PROGRESS', 'DELETE_FAILED',\n 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',\n 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS',\n 'UPDATE_ROLLBACK_FAILED',\n 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',\n 'UPDATE_ROLLBACK_COMPLETE',\n ]\n )\n result = {}\n stack_sum = 0\n for summary in response['StackSummaries']:\n result['StackName'] = summary[\"StackName\"]\n result['CreationTime'] = summary['CreationTime']\n result['StackStatus'] = summary['StackStatus']\n print(json2table(result))\n stack_sum += 1\n print('listed %s stacks' % str(stack_sum))", - "docstring": "Print out the list of stacks deployed at AWS cloud.\n\n :param awsclient:\n :return:" - }, - { - "code": "def _find_impl(cls, role, interface):\n module = _relation_module(role, interface)\n if not module:\n return None\n return cls._find_subclass(module)", - "docstring": "Find relation implementation based on its role and interface." - }, - { - "code": "def collect_table_content(table_bboxes, elems):\n table_contents = [[] for _ in range(len(table_bboxes))]\n prev_content = None\n prev_bbox = None\n for cid, c in enumerate(elems):\n if isinstance(c, LTAnno):\n if prev_content is not None:\n prev_content.append(c)\n continue\n if prev_bbox is not None and intersect(prev_bbox, c.bbox):\n prev_content.append(c)\n continue\n for table_id, table_bbox in enumerate(table_bboxes):\n if intersect(table_bbox, c.bbox):\n prev_bbox = table_bbox\n prev_content = table_contents[table_id]\n prev_content.append(c)\n break\n return table_contents", - "docstring": "Returns a list of elements that are contained inside\n the corresponding supplied bbox." - }, - { - "code": "def tile_bbox(self, tile_indices):\n (z, x, y) = tile_indices\n topleft = (x * self.tilesize, (y + 1) * self.tilesize)\n bottomright = ((x + 1) * self.tilesize, y * self.tilesize)\n nw = self.unproject_pixels(topleft, z)\n se = self.unproject_pixels(bottomright, z)\n return nw + se", - "docstring": "Returns the WGS84 bbox of the specified tile" - }, - { - "code": "def _parse_names_set(feature_names):\r\n feature_collection = OrderedDict()\r\n for feature_name in feature_names:\r\n if isinstance(feature_name, str):\r\n feature_collection[feature_name] = ...\r\n else:\r\n raise ValueError('Failed to parse {}, expected string'.format(feature_name))\r\n return feature_collection", - "docstring": "Helping function of `_parse_feature_names` that parses a set of feature names." - }, - { - "code": "def write(self, destination, filename, content):\n if not os.path.exists(destination):\n try:\n os.makedirs(destination)\n except:\n pass\n filepath = \"%s/%s\" % (destination, filename)\n f = open(filepath, \"w+\")\n f.write(content)\n f.close()", - "docstring": "Write a file at the specific destination with the content.\n\n Args:\n destination (string): the destination location\n filename (string): the filename that will be written\n content (string): the content of the filename" - }, - { - "code": "def gen_send_version_url(ip, port):\n return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, VERSION_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID)", - "docstring": "Generate send error url" - }, - { - "code": "def do_video(self, args):\n func = getattr(args, 'func', None)\n if func is not None:\n func(self, args)\n else:\n self.do_help('video')", - "docstring": "Video management command demonstrates multiple layers of sub-commands being handled by AutoCompleter" - }, - { - "code": "def repo_name(self):\n ds = [[x.repo_name] for x in self.repos]\n df = pd.DataFrame(ds, columns=['repository'])\n return df", - "docstring": "Returns a DataFrame of the repo names present in this project directory\n\n :return: DataFrame" - }, - { - "code": "def _view_filter(self):\n view_filter = OsidSession._view_filter(self)\n if self._sequestered_view == SEQUESTERED:\n view_filter['sequestered'] = False\n return view_filter", - "docstring": "Overrides OsidSession._view_filter to add sequestering filter." - }, - { - "code": "def _package_path(package):\n from os import path\n confdir = config_dir()\n return path.join(confdir, \"{}.cfg\".format(package))", - "docstring": "Returns the full path to the default package configuration file.\n\n Args:\n package (str): name of the python package to return a path for." - }, - { - "code": "def qs(schema):\n def wrapper(func):\n setattr(func, QS, schema)\n return func\n return wrapper", - "docstring": "Decorate a function with a query string schema." - }, - { - "code": "def _add_logical_methods(cls):\n _doc =\n _index_shared_docs['index_all'] = dedent(\n)\n _index_shared_docs['index_any'] = dedent(\n)\n def _make_logical_function(name, desc, f):\n @Substitution(outname=name, desc=desc)\n @Appender(_index_shared_docs['index_' + name])\n @Appender(_doc)\n def logical_func(self, *args, **kwargs):\n result = f(self.values)\n if (isinstance(result, (np.ndarray, ABCSeries, Index)) and\n result.ndim == 0):\n return result.dtype.type(result.item())\n else:\n return result\n logical_func.__name__ = name\n return logical_func\n cls.all = _make_logical_function('all', 'Return whether all elements '\n 'are True.',\n np.all)\n cls.any = _make_logical_function('any',\n 'Return whether any element is True.',\n np.any)", - "docstring": "Add in logical methods." - }, - { - "code": "def process_input(netIn, allowedformats, outputformat='G'):\n inputtype = checkInput(netIn)\n if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':\n G = netIn.df_to_array()\n netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}\n elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':\n TN = netIn\n elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':\n G = contact2graphlet(netIn)\n netInfo = dict(netIn)\n netInfo.pop('contacts')\n elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':\n TN = TemporalNetwork(from_dict=netIn)\n elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':\n TN = TemporalNetwork(from_array=netIn)\n elif inputtype == 'G' and 'G' in allowedformats:\n netInfo = {}\n netInfo['netshape'] = netIn.shape\n netInfo['nettype'] = gen_nettype(netIn)\n G = netIn\n elif inputtype == 'C' and outputformat == 'C':\n pass\n else:\n raise ValueError('Input invalid.')\n if outputformat == 'TN' and not isinstance(TN.network, str):\n TN.network['i'] = TN.network['i'].astype(int)\n TN.network['j'] = TN.network['j'].astype(int)\n TN.network['t'] = TN.network['t'].astype(int)\n if outputformat == 'C' or outputformat == 'G':\n netInfo['inputtype'] = inputtype\n if inputtype != 'C' and outputformat == 'C':\n C = graphlet2contact(G, netInfo)\n if outputformat == 'G':\n return G, netInfo\n elif outputformat == 'C':\n return C\n elif outputformat == 'TN':\n return TN", - "docstring": "Takes input network and checks what the input is.\n\n Parameters\n ----------\n\n netIn : array, dict, or TemporalNetwork\n Network (graphlet, contact or object)\n allowedformats : str\n Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.\n outputformat: str, default=G\n Target output format. Options: 'C' or 'G'.\n\n\n Returns\n -------\n\n C : dict\n\n OR\n\n G : array\n Graphlet representation.\n netInfo : dict\n Metainformation about network.\n\n OR\n\n tnet : object\n object of TemporalNetwork class" - }, - { - "code": "def unregister(self, svc_ref):\n with self.__svc_lock:\n try:\n return self.__pending_services.pop(svc_ref)\n except KeyError:\n pass\n if svc_ref not in self.__svc_registry:\n raise BundleException(\"Unknown service: {0}\".format(svc_ref))\n bundle = svc_ref.get_bundle()\n service = self.__svc_registry.pop(svc_ref)\n for spec in svc_ref.get_property(OBJECTCLASS):\n spec_services = self.__svc_specs[spec]\n idx = bisect.bisect_left(spec_services, svc_ref)\n del spec_services[idx]\n if not spec_services:\n del self.__svc_specs[spec]\n if svc_ref.is_factory():\n factory, svc_reg = self.__svc_factories.pop(svc_ref)\n for counter in self.__factory_usage.values():\n counter.cleanup_service(factory, svc_reg)\n else:\n bundle_services = self.__bundle_svc[bundle]\n bundle_services.remove(svc_ref)\n if not bundle_services:\n del self.__bundle_svc[bundle]\n return service", - "docstring": "Unregisters a service\n\n :param svc_ref: A service reference\n :return: The unregistered service instance\n :raise BundleException: Unknown service reference" - }, - { - "code": "def createAltHistoryPlot(self):\n self.altHistRect = patches.Rectangle((self.leftPos+(self.vertSize/10.0),-0.25),0.5,0.5,facecolor='grey',edgecolor='none',alpha=0.4,zorder=4)\n self.axes.add_patch(self.altHistRect)\n self.altPlot, = self.axes.plot([self.leftPos+(self.vertSize/10.0),self.leftPos+(self.vertSize/10.0)+0.5],[0.0,0.0],color='k',marker=None,zorder=4)\n self.altMarker, = self.axes.plot(self.leftPos+(self.vertSize/10.0)+0.5,0.0,marker='o',color='k',zorder=4)\n self.altText2 = self.axes.text(self.leftPos+(4*self.vertSize/10.0)+0.5,0.0,'%.f m' % self.relAlt,color='k',size=self.fontSize,ha='left',va='center',zorder=4)", - "docstring": "Creates the altitude history plot." - }, - { - "code": "def draw_linecollection(data, obj):\n content = []\n edgecolors = obj.get_edgecolors()\n linestyles = obj.get_linestyles()\n linewidths = obj.get_linewidths()\n paths = obj.get_paths()\n for i, path in enumerate(paths):\n color = edgecolors[i] if i < len(edgecolors) else edgecolors[0]\n style = linestyles[i] if i < len(linestyles) else linestyles[0]\n width = linewidths[i] if i < len(linewidths) else linewidths[0]\n data, options = mypath.get_draw_options(data, obj, color, None, style, width)\n data, cont, _, _ = mypath.draw_path(\n data, path, draw_options=options, simplify=False\n )\n content.append(cont + \"\\n\")\n return data, content", - "docstring": "Returns Pgfplots code for a number of patch objects." - }, - { - "code": "def do_help(self, arg):\n if arg:\n try:\n func = getattr(self, 'help_' + arg)\n except AttributeError:\n try:\n doc = getattr(self, 'do_' + arg).__doc__\n if doc:\n self.stdout.write(\"%s\\n\" % str(doc))\n return\n except AttributeError:\n pass\n self.stdout.write(\"%s\\n\" % str(self.nohelp % (arg,)))\n return\n func()\n else:\n names = self.get_names()\n cmds_doc = []\n cmds_undoc = []\n help_page = {}\n for name in names:\n if name[:5] == 'help_':\n help_page[name[5:]] = 1\n names.sort()\n prevname = ''\n for name in names:\n if name[:3] == 'do_':\n if name == prevname:\n continue\n prevname = name\n cmd = name[3:]\n if cmd in help_page:\n cmds_doc.append(cmd)\n del help_page[cmd]\n elif getattr(self, name).__doc__:\n cmds_doc.append(cmd)\n else:\n cmds_undoc.append(cmd)\n self.stdout.write(\"%s\\n\" % str(self.doc_leader))\n self.print_topics(self.doc_header, cmds_doc, 15, 80)\n self.print_topics(self.misc_header, list(help_page.keys()), 15, 80)\n self.print_topics(self.undoc_header, cmds_undoc, 15, 80)\n for topic in self.command_topics:\n topic_cmds = self.command_topics[topic]\n self.print_topics(string.capwords(topic + \" commands\"), topic_cmds, 15, 80)", - "docstring": "List available commands with \"help\" or detailed help with \"help cmd\"." - }, - { - "code": "def spin(self, start_message, end_message, fmt=None, interval=100, values=None):\n spinner = ProgressIndicator(self.io, fmt, interval, values)\n return spinner.auto(start_message, end_message)", - "docstring": "Automatically spin a progress indicator." - }, - { - "code": "def get_orga(self, orgaPk):\n r = self._request('orga/' + str(orgaPk))\n if r:\n o = Orga()\n o.pk = o.id = orgaPk\n o.__dict__.update(r.json())\n return o\n return None", - "docstring": "Return an organization speficied with orgaPk" - }, - { - "code": "def repeat(self, time, function, args = []):\n callback_id = self.tk.after(time, self._call_wrapper, time, function, *args)\n self._callback[function] = [callback_id, True]", - "docstring": "Repeat `function` every `time` milliseconds." - }, - { - "code": "def canBeCollapsed(master, br1, br2):\n if br1['buildsetid'] == br2['buildsetid']:\n return True\n selfBuildsets = yield master.data.get(\n ('buildsets', str(br1['buildsetid'])))\n otherBuildsets = yield master.data.get(\n ('buildsets', str(br2['buildsetid'])))\n selfSources = dict((ss['codebase'], ss)\n for ss in selfBuildsets['sourcestamps'])\n otherSources = dict((ss['codebase'], ss)\n for ss in otherBuildsets['sourcestamps'])\n if set(selfSources) != set(otherSources):\n return False\n for c, selfSS in selfSources.items():\n otherSS = otherSources[c]\n if selfSS['repository'] != otherSS['repository']:\n return False\n if selfSS['branch'] != otherSS['branch']:\n return False\n if selfSS['project'] != otherSS['project']:\n return False\n if selfSS['patch'] or otherSS['patch']:\n return False\n selfChanges = yield master.data.get(('sourcestamps', selfSS['ssid'], 'changes'))\n otherChanges = yield master.data.get(('sourcestamps', otherSS['ssid'], 'changes'))\n if selfChanges and otherChanges:\n continue\n elif selfChanges and not otherChanges:\n return False\n elif not selfChanges and otherChanges:\n return False\n elif selfSS['revision'] != otherSS['revision']:\n return False\n return True", - "docstring": "Returns true if both buildrequest can be merged, via Deferred.\n\n This implements Buildbot's default collapse strategy." - }, - { - "code": "def _clean_url(self, url):\n if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:\n return None\n if site(norm(url).lower()) in config.NONCANONIC_SITES:\n clean_url = canonicalize_url(url, keep_params=True)\n else:\n clean_url = canonicalize_url(url)\n return clean_url", - "docstring": "Canonicalizes the url, as it is done in Scrapy.\n And keeps only USEFUL_QUERY_KEYS. It also strips the \n trailing slash to help identifying dupes." - }, - { - "code": "def normalize_headers(headers):\n for (k, v) in headers.items():\n headers.pop(k)\n headers[k.lower()] = v\n result = {}\n if 'content-length' in headers:\n result['content-length'] = headers['content-length']\n elif 'contentlength' in headers:\n result['content-length'] = headers['contentlength']\n if 'content-type' in headers:\n result['content-type'] = headers['content-type']\n elif 'contenttype' in headers:\n result['content-type'] = headers['contenttype']\n if 'last-modified' in headers:\n result['last-modified'] = headers['last-modified']\n elif 'lastmodified' in headers:\n result['last-modified'] = headers['lastmodified']\n else:\n result['last-modified'] = datetime.datetime.utcnow().strftime(RFC1123_TIME_FORMAT)\n if isinstance(result['last-modified'], datetime.datetime):\n result['last-modified'] = result['last-modified'].strftime(RFC1123_TIME_FORMAT)\n if 'ETag' in headers:\n result['etag'] = headers['etag'].strip('\"')\n elif 'etag' in headers:\n result['etag'] = headers['etag'].strip('\"')\n return result", - "docstring": "Convert useful headers to a normalized type and return in a new dict\n\n Only processes content-type, content-length, etag, and last-modified\n :param headers:\n :return:" - }, - { - "code": "def update_forward_refs(cls, **localns: Any) -> None:\n globalns = sys.modules[cls.__module__].__dict__\n globalns.setdefault(cls.__name__, cls)\n for f in cls.__fields__.values():\n update_field_forward_refs(f, globalns=globalns, localns=localns)", - "docstring": "Try to update ForwardRefs on fields based on this Model, globalns and localns." - }, - { - "code": "def set(self, name, arg=UNSET):\n self._set(name, arg, self._lineno)", - "docstring": "Modify the instruction in-place.\n\n Replace name and arg attributes. Don't modify lineno." - }, - { - "code": "def available_subtypes(format=None):\n subtypes = _available_formats_helper(_snd.SFC_GET_FORMAT_SUBTYPE_COUNT,\n _snd.SFC_GET_FORMAT_SUBTYPE)\n return dict((subtype, name) for subtype, name in subtypes\n if format is None or check_format(format, subtype))", - "docstring": "Return a dictionary of available subtypes.\n\n Parameters\n ----------\n format : str\n If given, only compatible subtypes are returned.\n\n Examples\n --------\n >>> import soundfile as sf\n >>> sf.available_subtypes('FLAC')\n {'PCM_24': 'Signed 24 bit PCM',\n 'PCM_16': 'Signed 16 bit PCM',\n 'PCM_S8': 'Signed 8 bit PCM'}" - }, - { - "code": "def get_repo(self, auth, username, repo_name):\n path = \"/repos/{u}/{r}\".format(u=username, r=repo_name)\n response = self.get(path, auth=auth)\n return GogsRepo.from_json(response.json())", - "docstring": "Returns a the repository with name ``repo_name`` owned by\n the user with username ``username``.\n\n :param auth.Authentication auth: authentication object\n :param str username: username of owner of repository\n :param str repo_name: name of repository\n :return: a representation of the retrieved repository\n :rtype: GogsRepo\n :raises NetworkFailure: if there is an error communicating with the server\n :raises ApiFailure: if the request cannot be serviced" - }, - { - "code": "def autodoc_skip(app, what, name, obj, skip, options):\n if name in config.EXCLUDE_MEMBERS:\n return True\n if name in config.INCLUDE_MEMBERS:\n return False\n return skip", - "docstring": "Hook that tells autodoc to include or exclude certain fields.\n\n Sadly, it doesn't give a reference to the parent object,\n so only the ``name`` can be used for referencing.\n\n :type app: sphinx.application.Sphinx\n :param what: The parent type, ``class`` or ``module``\n :type what: str\n :param name: The name of the child method/attribute.\n :type name: str\n :param obj: The child value (e.g. a method, dict, or module reference)\n :param options: The current autodoc settings.\n :type options: dict\n\n .. seealso:: http://www.sphinx-doc.org/en/stable/ext/autodoc.html#event-autodoc-skip-member" - }, - { - "code": "def haversine_distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n if not (-90.0 <= lat1 <= 90):\n raise ValueError('lat1={:2.2f}, but must be in [-90,+90]'.format(lat1))\n if not (-90.0 <= lat2 <= 90):\n raise ValueError('lat2={:2.2f}, but must be in [-90,+90]'.format(lat2))\n if not (-180.0 <= lon1 <= 180):\n raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'\n .format(lat1))\n if not (-180.0 <= lon2 <= 180):\n raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'\n .format(lat1))\n radius = 6371\n dlat = math_stl.radians(lat2 - lat1)\n dlon = math_stl.radians(lon2 - lon1)\n a = (math_stl.sin(dlat / 2) * math_stl.sin(dlat / 2) +\n math_stl.cos(math_stl.radians(lat1)) *\n math_stl.cos(math_stl.radians(lat2)) *\n math_stl.sin(dlon / 2) * math_stl.sin(dlon / 2))\n c = 2 * math_stl.atan2(math_stl.sqrt(a), math_stl.sqrt(1 - a))\n d = radius * c\n return d", - "docstring": "Calculate the Haversine distance.\n\n Parameters\n ----------\n origin : tuple of float\n (lat, long)\n destination : tuple of float\n (lat, long)\n\n Returns\n -------\n distance_in_km : float\n\n Examples\n --------\n >>> munich = (48.1372, 11.5756)\n >>> berlin = (52.5186, 13.4083)\n >>> round(haversine_distance(munich, berlin), 1)\n 504.2\n\n >>> new_york_city = (40.712777777778, -74.005833333333) # NYC\n >>> round(haversine_distance(berlin, new_york_city), 1)\n 6385.3" - }, - { - "code": "def get_sigmasqs(self, instruments=None):\n\t\tif len(self):\n\t\t\tif not instruments:\n\t\t\t\tinstruments = map(str, \\\n\t\t\t\t\tinstrument_set_from_ifos(self[0].ifos))\n\t\t\treturn dict((ifo, self.get_sigmasq(ifo))\\\n\t\t\t\t for ifo in instruments)\n\t\telse:\n\t\t\treturn dict()", - "docstring": "Return dictionary of single-detector sigmas for each row in the\n\t\ttable." - }, - { - "code": "def differentiate(self, coord, edge_order=1, datetime_unit=None):\n ds = self._to_temp_dataset().differentiate(\n coord, edge_order, datetime_unit)\n return self._from_temp_dataset(ds)", - "docstring": "Differentiate the array with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: str\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: DataArray\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.differentiate('x')\n \n array([[30. , 30. , 30. ],\n [27.545455, 27.545455, 27.545455],\n [27.545455, 27.545455, 27.545455],\n [30. , 30. , 30. ]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y" - }, - { - "code": "def __error_callback(self, exception, interval):\n logging.error('Error while publishing {}'.format(\n exception))\n logging.info('Retry in %s seconds.', interval)", - "docstring": "Execute when there is an error while sending a message.\n\n :param exception: Exception which has been thrown while trying to send\n the message.\n :param interval: Interval in which the message delivery will be\n retried." - }, - { - "code": "def atlas_zonefile_push_dequeue( zonefile_queue=None ):\n ret = None\n with AtlasZonefileQueueLocked(zonefile_queue) as zfq:\n if len(zfq) > 0:\n ret = zfq.pop(0)\n return ret", - "docstring": "Dequeue a zonefile's information to replicate\n Return None if there are none queued" - }, - { - "code": "def check(jail=None,\n chroot=None,\n root=None,\n depends=False,\n recompute=False,\n checksum=False):\n if not any((depends, recompute, checksum)):\n return 'One of depends, recompute, or checksum must be set to True'\n opts = ''\n if depends:\n opts += 'dy'\n if recompute:\n opts += 'r'\n if checksum:\n opts += 's'\n cmd = _pkg(jail, chroot, root)\n cmd.append('check')\n if opts:\n cmd.append('-' + opts)\n return __salt__['cmd.run'](\n cmd,\n output_loglevel='trace',\n python_shell=False\n )", - "docstring": "Sanity checks installed packages\n\n jail\n Perform the sanity check in the specified jail\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.check jail=\n\n chroot\n Perform the sanity check in the specified chroot (ignored if ``jail``\n is specified)\n\n root\n Perform the sanity check in the specified root (ignored if ``jail``\n is specified)\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.check chroot=/path/to/chroot\n\n\n Of the below, at least one must be set to ``True``.\n\n depends\n Check for and install missing dependencies.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.check recompute=True\n\n recompute\n Recompute sizes and checksums of installed packages.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.check depends=True\n\n checksum\n Find invalid checksums for installed packages.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.check checksum=True" - }, - { - "code": "def teardown_handler(teardown_fixtures_fn, teardown_fn):\n def handler(obj):\n teardown_fn(obj)\n teardown_fixtures_fn(obj)\n return handler", - "docstring": "Returns a function that adds fixtures handling to the teardown method.\n\n Calls the given teardown method first before calling the fixtures teardown." - }, - { - "code": "def check_duplicate_axis(self, ds):\n ret_val = []\n geophysical_variables = self._find_geophysical_vars(ds)\n for name in geophysical_variables:\n no_duplicates = TestCtx(BaseCheck.HIGH, self.section_titles['5'])\n axis_map = cfutil.get_axis_map(ds, name)\n axes = []\n for axis, coordinates in axis_map.items():\n for coordinate in coordinates:\n axis_attr = getattr(ds.variables[coordinate], 'axis', None)\n no_duplicates.assert_true(axis_attr is None or axis_attr not in axes,\n \"'{}' has duplicate axis {} defined by {}\".format(name, axis_attr, coordinate))\n if axis_attr and axis_attr not in axes:\n axes.append(axis_attr)\n ret_val.append(no_duplicates.to_result())\n return ret_val", - "docstring": "Checks that no variable contains two coordinates defining the same\n axis.\n\n Chapter 5 paragraph 6\n\n If an axis attribute is attached to an auxiliary coordinate variable,\n it can be used by applications in the same way the `axis` attribute\n attached to a coordinate variable is used. However, it is not\n permissible for a [geophysical variable] to have both a coordinate\n variable and an auxiliary coordinate variable, or more than one of\n either type of variable, having an `axis` attribute with any given\n value e.g. there must be no more than one axis attribute for X for any\n [geophysical variable].\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n :return: List of results" - }, - { - "code": "def call(self, additional_fields, restriction, shape, depth, max_items, offset):\n from .folders import Folder\n roots = {f.root for f in self.folders}\n if len(roots) != 1:\n raise ValueError('FindFolder must be called with folders in the same root hierarchy (%r)' % roots)\n root = roots.pop()\n for elem in self._paged_call(payload_func=self.get_payload, max_items=max_items, **dict(\n additional_fields=additional_fields,\n restriction=restriction,\n shape=shape,\n depth=depth,\n page_size=self.chunk_size,\n offset=offset,\n )):\n if isinstance(elem, Exception):\n yield elem\n continue\n yield Folder.from_xml(elem=elem, root=root)", - "docstring": "Find subfolders of a folder.\n\n :param additional_fields: the extra fields that should be returned with the folder, as FieldPath objects\n :param shape: The set of attributes to return\n :param depth: How deep in the folder structure to search for folders\n :param max_items: The maximum number of items to return\n :param offset: the offset relative to the first item in the item collection. Usually 0.\n :return: XML elements for the matching folders" - }, - { - "code": "def remove_info_file():\n try:\n os.unlink(_get_info_file_path())\n except OSError as e:\n if e.errno == errno.ENOENT:\n pass\n else:\n raise", - "docstring": "Remove the current process's TensorBoardInfo file, if it exists.\n\n If the file does not exist, no action is taken and no error is raised." - }, - { - "code": "def waveset(self):\n w = get_waveset(self.model)\n if w is not None:\n utils.validate_wavelengths(w)\n w = w * self._internal_wave_unit\n return w", - "docstring": "Optimal wavelengths for sampling the spectrum or bandpass." - }, - { - "code": "def prev(self):\n seg = Segment(segment_t=idaapi.get_prev_seg(self.ea))\n if seg.ea >= self.ea:\n raise exceptions.NoMoreSegments(\"This is the first segment. no segments exist before it.\")\n return seg", - "docstring": "Get the previous segment." - }, - { - "code": "def _resolve_value(self, name):\n for instance in self.__instances():\n value = instance._resolve_value(name)\n if value:\n return value\n if name in self.__metadata._meta.elements:\n populate_from = self.__metadata._meta.elements[name].populate_from\n if callable(populate_from):\n return populate_from(None)\n elif isinstance(populate_from, Literal):\n return populate_from.value\n elif populate_from is not NotSet:\n return self._resolve_value(populate_from)", - "docstring": "Returns an appropriate value for the given name. \n This simply asks each of the instances for a value." - }, - { - "code": "def apply_search(queryset, search, schema=None):\n ast = DjangoQLParser().parse(search)\n schema = schema or DjangoQLSchema\n schema_instance = schema(queryset.model)\n schema_instance.validate(ast)\n return queryset.filter(build_filter(ast, schema_instance))", - "docstring": "Applies search written in DjangoQL mini-language to given queryset" - }, - { - "code": "def concatmap(source, func, *more_sources, task_limit=None):\n return concat.raw(\n combine.smap.raw(source, func, *more_sources), task_limit=task_limit)", - "docstring": "Apply a given function that creates a sequence from the elements of one\n or several asynchronous sequences, and generate the elements of the created\n sequences in order.\n\n The function is applied as described in `map`, and must return an\n asynchronous sequence. The returned sequences are awaited concurrently,\n although it's possible to limit the amount of running sequences using\n the `task_limit` argument." - }, - { - "code": "def get_format(self, name):\n formats = [format for format in self.list_formats(self.__root_node) if format in name]\n if not formats:\n return QTextCharFormat()\n name = max(formats)\n format = None\n current_node = self.__root_node\n for selector in name.split(\".\"):\n nodes = [node for node in current_node.children if node.name == selector]\n format_node = nodes and nodes[0] or None\n if not format_node:\n break\n current_node = format_node\n if not format_node.format:\n continue\n format = format_node.format\n return format", - "docstring": "Returns the closest format or closest parent format associated to given name.\n\n :param name: Format name.\n :type name: unicode\n :return: Format.\n :rtype: QTextCharFormat" - }, - { - "code": "def url_for_token(self, token):\n book_url = self.get_config_value(\"pages\", token)\n book, _, url_tail = book_url.partition(':')\n book_base = settings.HELP_TOKENS_BOOKS[book]\n url = book_base\n lang = getattr(settings, \"HELP_TOKENS_LANGUAGE_CODE\", None)\n if lang is not None:\n lang = self.get_config_value(\"locales\", lang)\n url += \"/\" + lang\n version = getattr(settings, \"HELP_TOKENS_VERSION\", None)\n if version is not None:\n url += \"/\" + version\n url += \"/\" + url_tail\n return url", - "docstring": "Find the full URL for a help token." - }, - { - "code": "def get_conf(conf, sect, opt):\n argu = getattr(args, \"mambupy_\"+opt.lower())\n if not argu:\n envir = os.environ.get(\"MAMBUPY_\"+opt.upper())\n if not envir:\n try:\n return conf.get(sect,opt)\n except NoSectionError:\n return default_configs[opt]\n return envir\n return argu", - "docstring": "Gets a config 'opt' from 'conf' file, under section 'sect'.\n\n If no 'opt' exists under 'sect', it looks for option on the default_configs\n dictionary\n\n If there exists an environmental variable named MAMBUPY_{upper_case_opt},\n it overrides whatever the conf files or default_configs dict says.\n\n But if you send a command line argument named mambupy_{lower_case_opt},\n it overrides anything else.\n\n Args:\n conf (ConfigParser): ConfigParser that reads from certain config file (INI\n format)\n sect (string): section under the config file\n opt (string): option to read\n\n Returns:\n string: configuration option. If not found on conf, returns a value from\n default_configs dict. If environmental variable exists with name\n MAMBUPY_{upper_case_opt} it overrides anything else" - }, - { - "code": "def clean_stale_refs(self):\n cleaned = []\n cmd_str = 'git remote prune origin'\n env = os.environ.copy()\n env[b\"LANGUAGE\"] = b\"C\"\n env[b\"LC_ALL\"] = b\"C\"\n cmd = subprocess.Popen(\n shlex.split(cmd_str),\n close_fds=not salt.utils.platform.is_windows(),\n cwd=os.path.dirname(self.gitdir),\n env=env,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = cmd.communicate()[0]\n if six.PY3:\n output = output.decode(__salt_system_encoding__)\n if cmd.returncode != 0:\n log.warning(\n 'Failed to prune stale branches for %s remote \\'%s\\'. '\n 'Output from \\'%s\\' follows:\\n%s',\n self.role, self.id, cmd_str, output\n )\n else:\n marker = ' * [pruned] '\n for line in salt.utils.itertools.split(output, '\\n'):\n if line.startswith(marker):\n cleaned.append(line[len(marker):].strip())\n if cleaned:\n log.debug(\n '%s pruned the following stale refs: %s',\n self.role, ', '.join(cleaned)\n )\n return cleaned", - "docstring": "Remove stale refs so that they are no longer seen as fileserver envs" - }, - { - "code": "def _initialize_progress_bar(self):\n widgets = ['Download: ', Percentage(), ' ', Bar(),\n ' ', AdaptiveETA(), ' ', FileTransferSpeed()]\n self._downloadProgressBar = ProgressBar(\n widgets=widgets, max_value=self._imageCount).start()", - "docstring": "Initializes the progress bar" - }, - { - "code": "def _init_nt_hdr(self, line):\n line = line.replace('.', '_')\n line = line.replace(' ', '_')\n line = line.replace('\n line = line.replace('-', '_')\n line = line.replace('\"', '')\n hdrs = re.split(self.sep, line)\n if '' in hdrs:\n hdrs = NCBIgeneFileReader.replace_nulls(hdrs)\n self.idxs_int = [idx for idx, hdr in enumerate(hdrs) if hdr in self.int_hdrs]\n self.idxs_float = [idx for idx, hdr in enumerate(hdrs) if hdr in self.float_hdrs]\n assert hdrs[6] == 'Aliases'\n return namedtuple('ntncbi', ' '.join(hdrs))", - "docstring": "Convert headers into valid namedtuple fields." - }, - { - "code": "def saveWeightsToFile(self, filename, mode='pickle', counter=None):\n self.saveWeights(filename, mode, counter)", - "docstring": "Deprecated. Use saveWeights instead." - }, - { - "code": "def add_catalogue_cluster(self, catalogue, vcl, flagvector,\n cluster_id=None, overlay=True):\n self.add_size_scaled_points(catalogue.data['longitude'],\n catalogue.data['latitude'],\n catalogue.data['magnitude'],\n shape=\"o\",\n alpha=0.8,\n colour=(0.5, 0.5, 0.5),\n smin=1.0,\n sscale=1.5,\n overlay=True)\n if cluster_id is None:\n idx = flagvector == 0\n self.add_size_scaled_points(catalogue.data['longitude'][idx],\n catalogue.data['latitude'][idx],\n catalogue.data['magnitude'][idx],\n shape=\"o\",\n colour=\"r\",\n smin=1.0,\n sscale=1.5,\n overlay=overlay)\n return\n if not isinstance(cluster_id, collections.Iterable):\n cluster_id = [cluster_id]\n for iloc, clid in enumerate(cluster_id):\n if iloc == (len(cluster_id) - 1):\n temp_overlay = overlay\n else:\n temp_overlay = True\n idx = vcl == clid\n self.add_size_scaled_points(\n catalogue.data[\"longitude\"][idx],\n catalogue.data[\"latitude\"][idx],\n catalogue.data[\"magnitude\"][idx],\n shape=\"o\",\n colour=DISSIMILAR_COLOURLIST[(iloc + 1) % NCOLS],\n smin=1.0,\n sscale=1.5,\n overlay=temp_overlay)", - "docstring": "Creates a plot of a catalogue showing where particular clusters exist" - }, - { - "code": "def clear_all(self):\n keys = self._analytics_backend.keys()\n for key in itertools.chain(*keys):\n with self._analytics_backend.map() as conn:\n if key.startswith(self._prefix):\n conn.delete(key)", - "docstring": "Deletes all ``sandsnake`` related data from redis.\n\n .. warning::\n\n Very expensive and destructive operation. Use with causion" - }, - { - "code": "def __is_json_error(self, status, headers):\n content_header = headers.get('content-type', '')\n content_type, unused_params = cgi.parse_header(content_header)\n return (status.startswith('400') and\n content_type.lower() in _ALL_JSON_CONTENT_TYPES)", - "docstring": "Determine if response is an error.\n\n Args:\n status: HTTP status code.\n headers: Dictionary of (lowercase) header name to value.\n\n Returns:\n True if the response was an error, else False." - }, - { - "code": "def rollback(self):\n with self.native(writeAccess=True) as conn:\n return self._rollback(conn)", - "docstring": "Rolls back changes to this database." - }, - { - "code": "def get_mnist():\n np.random.seed(1234)\n mnist_data = mx.test_utils.get_mnist()\n X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']])\n Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']])\n p = np.random.permutation(X.shape[0])\n X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5\n Y = Y[p]\n return X, Y", - "docstring": "Gets MNIST dataset" - }, - { - "code": "def ascent(self):\n total_ascent = 0.0\n altitude_data = self.altitude_points()\n for i in range(len(altitude_data) - 1):\n diff = altitude_data[i+1] - altitude_data[i]\n if diff > 0.0:\n total_ascent += diff\n return total_ascent", - "docstring": "Returns ascent of workout in meters" - }, - { - "code": "def disk_param(name, size, snapshot_profile, cmdline=None, kernel=None):\n disk_params = {}\n if cmdline:\n disk_params['cmdline'] = cmdline\n if kernel:\n disk_params['kernel'] = kernel\n if name:\n disk_params['name'] = name\n if snapshot_profile is not None:\n disk_params['snapshot_profile'] = snapshot_profile\n if size:\n disk_params['size'] = size\n return disk_params", - "docstring": "Return disk parameter structure." - }, - { - "code": "def remove_interface_router(self, router, body=None):\n return self.put((self.router_path % router) +\n \"/remove_router_interface\", body=body)", - "docstring": "Removes an internal network interface from the specified router." - }, - { - "code": "def normalize_url(base_url, rel_url):\n if not rel_url:\n return None\n if not is_absolute_url(rel_url):\n rel_url = rel_url.replace(\"../\", \"/\")\n if (not base_url.endswith(\"/\")) and (not rel_url.startswith(\"/\")):\n return base_url + \"/\" + rel_url.replace(\"../\", \"/\")\n return base_url + rel_url.replace(\"../\", \"/\")\n return rel_url", - "docstring": "Normalize the `url` - from relative, create absolute URL.\n\n Args:\n base_url (str): Domain with ``protocol://`` string\n rel_url (str): Relative or absolute url.\n\n Returns:\n str/None: Normalized URL or None if `url` is blank." - }, - { - "code": "def from_config(cls, cp, variable_params):\n if not cp.has_section('sampling_params'):\n raise ValueError(\"no sampling_params section found in config file\")\n sampling_params, replace_parameters = \\\n read_sampling_params_from_config(cp)\n sampling_transforms = transforms.read_transforms_from_config(\n cp, 'sampling_transforms')\n logging.info(\"Sampling in {} in place of {}\".format(\n ', '.join(sampling_params), ', '.join(replace_parameters)))\n return cls(variable_params, sampling_params,\n replace_parameters, sampling_transforms)", - "docstring": "Gets sampling transforms specified in a config file.\n\n Sampling parameters and the parameters they replace are read from the\n ``sampling_params`` section, if it exists. Sampling transforms are\n read from the ``sampling_transforms`` section(s), using\n ``transforms.read_transforms_from_config``.\n\n An ``AssertionError`` is raised if no ``sampling_params`` section\n exists in the config file.\n\n Parameters\n ----------\n cp : WorkflowConfigParser\n Config file parser to read.\n variable_params : list\n List of parameter names of the original variable params.\n\n Returns\n -------\n SamplingTransforms\n A sampling transforms class." - }, - { - "code": "def to_str(delta, extended=False):\n total_seconds = delta.total_seconds()\n sign = \"-\" if total_seconds < 0 else \"\"\n nanoseconds = abs(total_seconds * _second_size)\n if total_seconds < 1:\n result_str = _to_str_small(nanoseconds, extended)\n else:\n result_str = _to_str_large(nanoseconds, extended)\n return \"{}{}\".format(sign, result_str)", - "docstring": "Format a datetime.timedelta to a duration string" - }, - { - "code": "def parse_fingerprint(self, cmdline, key=None, sep=None):\n key = key or self.FINGERPRINT_CMD_KEY\n if key:\n sep = sep or self.FINGERPRINT_CMD_SEP\n cmdline = cmdline or []\n for cmd_part in cmdline:\n if cmd_part.startswith('{}{}'.format(key, sep)):\n return cmd_part.split(sep)[1]", - "docstring": "Given a psutil.Process.cmdline, parse and return a fingerprint.\n\n :param list cmdline: The psutil.Process.cmdline of the current process.\n :param string key: The key for fingerprint discovery.\n :param string sep: The key/value separator for fingerprint discovery.\n :returns: The parsed fingerprint or `None`.\n :rtype: string or `None`" - }, - { - "code": "def check_permissions(self):\n changed_permissions = []\n changed_users = []\n warnings = []\n for model, perms in ASSIGNED_PERMISSIONS.items():\n if model == 'profile':\n model_obj = get_profile_model()\n else: model_obj = get_user_model()\n model_content_type = ContentType.objects.get_for_model(model_obj)\n for perm in perms:\n try:\n Permission.objects.get(codename=perm[0],\n content_type=model_content_type)\n except Permission.DoesNotExist:\n changed_permissions.append(perm[1])\n Permission.objects.create(name=perm[1],\n codename=perm[0],\n content_type=model_content_type)\n for user in get_user_model().objects.exclude(username=settings.ANONYMOUS_USER_NAME):\n try:\n user_profile = get_user_profile(user=user)\n except ObjectDoesNotExist:\n warnings.append(_(\"No profile found for %(username)s\") \\\n % {'username': user.username})\n else:\n all_permissions = get_perms(user, user_profile) + get_perms(user, user)\n for model, perms in ASSIGNED_PERMISSIONS.items():\n if model == 'profile':\n perm_object = get_user_profile(user=user)\n else: perm_object = user\n for perm in perms:\n if perm[0] not in all_permissions:\n assign_perm(perm[0], user, perm_object)\n changed_users.append(user)\n return (changed_permissions, changed_users, warnings)", - "docstring": "Checks that all permissions are set correctly for the users.\n\n :return: A set of users whose permissions was wrong." - }, - { - "code": "def count_generated_adv_examples(self):\n result = {}\n for v in itervalues(self.data):\n s_id = v['submission_id']\n result[s_id] = result.get(s_id, 0) + len(v['images'])\n return result", - "docstring": "Returns total number of all generated adversarial examples." - }, - { - "code": "def parse_xml_node(self, node):\n self.connector_id = node.getAttributeNS(RTS_NS, 'connectorId')\n self.name = node.getAttributeNS(RTS_NS, 'name')\n if node.hasAttributeNS(RTS_NS, 'transMethod'):\n self.trans_method = node.getAttributeNS(RTS_NS,\n 'transMethod')\n else:\n self.trans_method = ''\n self.comment = node.getAttributeNS(RTS_EXT_NS, 'comment')\n if node.hasAttributeNS(RTS_EXT_NS, 'visible'):\n visible = node.getAttributeNS(RTS_EXT_NS, 'visible')\n if visible == 'true' or visible == '1':\n self.visible = True\n else:\n self.visible = False\n if node.getElementsByTagNameNS(RTS_NS, 'sourceServicePort').length != 1:\n raise InvalidServicePortConnectorNodeError\n self.source_service_port = TargetPort().parse_xml_node(\\\n node.getElementsByTagNameNS(RTS_NS, 'sourceServicePort')[0])\n if node.getElementsByTagNameNS(RTS_NS, 'targetServicePort').length != 1:\n raise InvalidServicePortConnectorNodeError\n self.target_service_port = TargetPort().parse_xml_node(\\\n node.getElementsByTagNameNS(RTS_NS, 'targetServicePort')[0])\n for c in get_direct_child_elements_xml(node, prefix=RTS_EXT_NS,\n local_name='Properties'):\n name, value = parse_properties_xml(c)\n self._properties[name] = value\n return self", - "docstring": "Parse an xml.dom Node object representing a service port connector into\n this object." - }, - { - "code": "def process_email(ctx, param, value):\n user = User.query.filter(User.email == value).first()\n if not user:\n raise click.BadParameter('User with email \\'%s\\' not found.', value)\n return user", - "docstring": "Return an user if it exists." - }, - { - "code": "def lighting(im, b, c):\n if b==0 and c==1: return im\n mu = np.average(im)\n return np.clip((im-mu)*c+mu+b,0.,1.).astype(np.float32)", - "docstring": "Adjust image balance and contrast" - }, - { - "code": "def logparse(*args, **kwargs):\n from clitool.cli import clistream\n from clitool.processor import SimpleDictReporter\n lst = [parse] + args\n reporter = SimpleDictReporter()\n stats = clistream(reporter, *lst, **kwargs)\n return stats, reporter.report()", - "docstring": "Parse access log on the terminal application.\n If list of files are given, parse each file. Otherwise, parse standard\n input.\n\n :param args: supporting functions after processed raw log line\n :type: list of callables\n :rtype: tuple of (statistics, key/value report)" - }, - { - "code": "def _summary(self, name, tensor):\n if tensor.shape.ndims == 0:\n return tf.summary.scalar(name, tensor)\n else:\n return tf.summary.histogram(name, tensor)", - "docstring": "Create a scalar or histogram summary matching the rank of the tensor.\n\n Args:\n name: Name for the summary.\n tensor: Tensor to summarize.\n\n Returns:\n Summary tensor." - }, - { - "code": "def _consume_callback(consumers):\n for consumer in consumers:\n def errback(failure):\n global _exit_code\n if failure.check(exceptions.HaltConsumer):\n _exit_code = failure.value.exit_code\n if _exit_code:\n _log.error(\n \"Consumer halted with non-zero exit code (%d): %s\",\n _exit_code,\n str(failure.value.reason),\n )\n elif failure.check(exceptions.ConsumerCanceled):\n _exit_code = 12\n _log.error(\n \"The consumer was canceled server-side, check with system administrators.\"\n )\n elif failure.check(exceptions.PermissionException):\n _exit_code = 15\n _log.error(\n \"The consumer could not proceed because of a permissions problem: %s\",\n str(failure.value),\n )\n else:\n _exit_code = 13\n _log.error(\n \"Unexpected error occurred in consumer %r: %r\", consumer, failure\n )\n try:\n reactor.stop()\n except error.ReactorNotRunning:\n pass\n def callback(consumer):\n _log.info(\"The %r consumer halted.\", consumer)\n if all([c.result.called for c in consumers]):\n _log.info(\"All consumers have stopped; shutting down.\")\n try:\n reactor.stop()\n except error.ReactorNotRunning:\n pass\n consumer.result.addCallbacks(callback, errback)", - "docstring": "Callback when consumers are successfully registered.\n\n This simply registers callbacks for consumer.result deferred object which\n fires when the consumer stops.\n\n Args\n consumers (list of fedora_messaging.api.Consumer):\n The list of consumers that were successfully created." - }, - { - "code": "def _get_hanging_wall_coeffs_rx(self, C, rup, r_x):\n r_1 = rup.width * cos(radians(rup.dip))\n r_2 = 62.0 * rup.mag - 350.0\n fhngrx = np.zeros(len(r_x))\n idx = np.logical_and(r_x >= 0., r_x < r_1)\n fhngrx[idx] = self._get_f1rx(C, r_x[idx], r_1)\n idx = r_x >= r_1\n f2rx = self._get_f2rx(C, r_x[idx], r_1, r_2)\n f2rx[f2rx < 0.0] = 0.0\n fhngrx[idx] = f2rx\n return fhngrx", - "docstring": "Returns the hanging wall r-x caling term defined in equation 7 to 12" - }, - { - "code": "def candidate(cls):\n return relationship(\n \"Candidate\",\n backref=backref(\n camel_to_under(cls.__name__) + \"s\",\n cascade=\"all, delete-orphan\",\n cascade_backrefs=False,\n ),\n cascade_backrefs=False,\n )", - "docstring": "The ``Candidate``." - }, - { - "code": "def _find_conda():\n if 'CONDA_EXE' in os.environ:\n conda = os.environ['CONDA_EXE']\n else:\n conda = util.which('conda')\n return conda", - "docstring": "Find the conda executable robustly across conda versions.\n\n Returns\n -------\n conda : str\n Path to the conda executable.\n\n Raises\n ------\n IOError\n If the executable cannot be found in either the CONDA_EXE environment\n variable or in the PATH.\n\n Notes\n -----\n In POSIX platforms in conda >= 4.4, conda can be set up as a bash function\n rather than an executable. (This is to enable the syntax\n ``conda activate env-name``.) In this case, the environment variable\n ``CONDA_EXE`` contains the path to the conda executable. In other cases,\n we use standard search for the appropriate name in the PATH.\n\n See https://github.com/airspeed-velocity/asv/issues/645 for more details." - }, - { - "code": "def update_extent_location(self, extent_loc):\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Path Table Record not yet initialized')\n self.extent_location = extent_loc", - "docstring": "A method to update the extent location for this Path Table Record.\n\n Parameters:\n extent_loc - The new extent location.\n Returns:\n Nothing." - }, - { - "code": "def get_all_templates(server, token):\n method = 'GET'\n uri = 'https://' + server + '/template'\n connect.tonicdns_client(uri, method, token, data=False)", - "docstring": "Retrieve all templates.\n\n Argument:\n\n server: TonicDNS API server\n token: TonicDNS API authentication token\n\n x-authentication-token: token" - }, - { - "code": "def remove_on_exception(dirname, remove=True):\n os.makedirs(dirname)\n try:\n yield\n except:\n if remove:\n shutil.rmtree(dirname, ignore_errors=True)\n raise", - "docstring": "Creates a directory, yields to the caller, and removes that directory\n if an exception is thrown." - }, - { - "code": "def add_node(self, node, **kwargs):\n if not isinstance(node, (list, set, tuple)):\n raise TypeError('Node can only be a list, set or tuple of nodes forming a clique')\n node = tuple(node)\n super(ClusterGraph, self).add_node(node, **kwargs)", - "docstring": "Add a single node to the cluster graph.\n\n Parameters\n ----------\n node: node\n A node should be a collection of nodes forming a clique. It can be\n a list, set or tuple of nodes\n\n Examples\n --------\n >>> from pgmpy.models import ClusterGraph\n >>> G = ClusterGraph()\n >>> G.add_node(('a', 'b', 'c'))" - }, - { - "code": "def log_entries(self, time_zone='UTC', is_overview=False,\n include=None, fetch_all=True):\n endpoint = '/'.join((self.endpoint, self.id, 'log_entries'))\n query_params = {\n 'time_zone': time_zone,\n 'is_overview': json.dumps(is_overview),\n }\n if include:\n query_params['include'] = include\n result = self.logEntryFactory.find(\n endpoint=endpoint,\n api_key=self.api_key,\n fetch_all=fetch_all,\n **query_params\n )\n return result", - "docstring": "Query for log entries on an incident instance." - }, - { - "code": "def check_new_videos(self):\n resp = api.request_videos(self.blink,\n time=self.blink.last_refresh,\n page=0)\n for camera in self.cameras.keys():\n self.motion[camera] = False\n try:\n info = resp['videos']\n except (KeyError, TypeError):\n _LOGGER.warning(\"Could not check for motion. Response: %s\", resp)\n return False\n for entry in info:\n try:\n name = entry['camera_name']\n clip = entry['address']\n timestamp = entry['created_at']\n self.motion[name] = True\n self.last_record[name] = {'clip': clip, 'time': timestamp}\n except KeyError:\n _LOGGER.debug(\"No new videos since last refresh.\")\n return True", - "docstring": "Check if new videos since last refresh." - }, - { - "code": "def build_projection_kwargs(cls, source, mapping):\n return cls._map_arg_names(source, cls._default_attr_mapping + mapping)", - "docstring": "Handle mapping a dictionary of metadata to keyword arguments." - }, - { - "code": "def char_pad_trunc(data, maxlen):\n new_dataset = []\n for sample in data:\n if len(sample) > maxlen:\n new_data = sample[:maxlen]\n elif len(sample) < maxlen:\n pads = maxlen - len(sample)\n new_data = sample + ['PAD'] * pads\n else:\n new_data = sample\n new_dataset.append(new_data)\n return new_dataset", - "docstring": "We truncate to maxlen or add in PAD tokens" - }, - { - "code": "def analysis_title_header_element(feature, parent):\n _ = feature, parent\n header = analysis_title_header['string_format']\n return header.capitalize()", - "docstring": "Retrieve analysis title header string from definitions." - }, - { - "code": "def read_string(self, string, source=''):\n sfile = io.StringIO(string)\n self.read_file(sfile, source)", - "docstring": "Read configuration from a given string." - }, - { - "code": "def weld_cast_scalar(scalar, to_weld_type):\n if _not_possible_to_cast(scalar, to_weld_type):\n raise TypeError('Cannot cast scalar of type={} to type={}'.format(type(scalar), to_weld_type))\n weld_obj = create_empty_weld_object()\n if isinstance(scalar, WeldObject):\n scalar = get_weld_obj_id(weld_obj, scalar)\n weld_template = '{type}({scalar})'\n weld_obj.weld_code = weld_template.format(scalar=scalar,\n type=to_weld_type)\n return weld_obj", - "docstring": "Returns the scalar casted to the request Weld type.\n\n Parameters\n ----------\n scalar : {int, float, WeldObject}\n Input array.\n to_weld_type : WeldType\n Type of each element in the input array.\n\n Returns\n -------\n WeldObject\n Representation of this computation." - }, - { - "code": "def fill_with_defaults(process_input, input_schema):\n for field_schema, fields, path in iterate_schema(process_input, input_schema):\n if 'default' in field_schema and field_schema['name'] not in fields:\n dict_dot(process_input, path, field_schema['default'])", - "docstring": "Fill empty optional fields in input with default values." - }, - { - "code": "def _forceInt(x,y,z,dens,b2,c2,i,glx=None,glw=None):\n def integrand(s):\n t= 1/s**2.-1.\n return dens(numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)))\\\n *(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2))\\\n /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))\n if glx is None:\n return integrate.quad(integrand,0.,1.)[0] \n else:\n return numpy.sum(glw*integrand(glx))", - "docstring": "Integral that gives the force in x,y,z" - }, - { - "code": "def getDigitalMinimum(self, chn=None):\n if chn is not None:\n if 0 <= chn < self.signals_in_file:\n return self.digital_min(chn)\n else:\n return 0\n else:\n digMin = np.zeros(self.signals_in_file)\n for i in np.arange(self.signals_in_file):\n digMin[i] = self.digital_min(i)\n return digMin", - "docstring": "Returns the minimum digital value of signal edfsignal.\n\n Parameters\n ----------\n chn : int\n channel number\n\n Examples\n --------\n >>> import pyedflib\n >>> f = pyedflib.data.test_generator()\n >>> f.getDigitalMinimum(0)\n -32768\n >>> f._close()\n >>> del f" - }, - { - "code": "def srem(self, key, *values):\n redis_set = self._get_set(key, 'SREM')\n if not redis_set:\n return 0\n before_count = len(redis_set)\n for value in values:\n redis_set.discard(self._encode(value))\n after_count = len(redis_set)\n if before_count > 0 and len(redis_set) == 0:\n self.delete(key)\n return before_count - after_count", - "docstring": "Emulate srem." - }, - { - "code": "def get_comment_form(self, *args, **kwargs):\n if isinstance(args[-1], list) or 'comment_record_types' in kwargs:\n return self.get_comment_form_for_create(*args, **kwargs)\n else:\n return self.get_comment_form_for_update(*args, **kwargs)", - "docstring": "Pass through to provider CommentAdminSession.get_comment_form_for_update" - }, - { - "code": "def plot(self, key=None,\n cmap=None, ms=4, vmin=None, vmax=None,\n vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False,\n ntMax=None, nchMax=None, nlbdMax=3,\n lls=None, lct=None, lcch=None, lclbd=None, cbck=None,\n inct=[1,10], incX=[1,5], inclbd=[1,10],\n fmt_t='06.3f', fmt_X='01.0f',\n invert=True, Lplot='In', dmarker=None,\n Bck=True, fs=None, dmargin=None, wintit=None, tit=None,\n fontsize=None, labelpad=None, draw=True, connect=True):\n kh = _plot.Data_plot(self, key=key, indref=0,\n cmap=cmap, ms=ms, vmin=vmin, vmax=vmax,\n vmin_map=vmin_map, vmax_map=vmax_map,\n cmap_map=cmap_map, normt_map=normt_map,\n ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax,\n lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck,\n inct=inct, incX=incX, inclbd=inclbd,\n fmt_t=fmt_t, fmt_X=fmt_X, Lplot=Lplot,\n invert=invert, dmarker=dmarker, Bck=Bck,\n fs=fs, dmargin=dmargin, wintit=wintit, tit=tit,\n fontsize=fontsize, labelpad=labelpad,\n draw=draw, connect=connect)\n return kh", - "docstring": "Plot the data content in a generic interactive figure" - }, - { - "code": "def delete_database(self, database):\n url = \"db/{0}\".format(database)\n self.request(\n url=url,\n method='DELETE',\n expected_response_code=204\n )\n return True", - "docstring": "Drop a database on the InfluxDB server.\n\n :param database: the name of the database to delete\n :type database: string\n :rtype: boolean" - }, - { - "code": "async def __handle_ping(self, _ : Ping):\n self.__last_ping = time.time()\n await ZMQUtils.send(self.__backend_socket, Pong())", - "docstring": "Handle a Ping message. Pong the backend" - }, - { - "code": "def average_dtu_configurations(list_of_objects):\n result = DtuConfiguration()\n if len(list_of_objects) == 0:\n return result\n list_of_members = result.__dict__.keys()\n for member in list_of_members:\n result.__dict__[member] = np.mean(\n [tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects]\n )\n return result", - "docstring": "Return DtuConfiguration instance with averaged values.\n\n Parameters\n ----------\n list_of_objects : python list\n List of DtuConfiguration instances to be averaged.\n\n Returns\n -------\n result : DtuConfiguration instance\n Object with averaged values." - }, - { - "code": "def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout):\n timeout_expiration = datetime.datetime.now() + timeout\n print(\"Monitoring all tasks for 'Completed' state, timeout in {}...\".format(timeout), end='')\n while datetime.datetime.now() < timeout_expiration:\n print('.', end='')\n sys.stdout.flush()\n for (job_id, _) in job_ids:\n tasks = batch_service_client.task.list(job_id)\n incomplete_tasks = [task for task in tasks if\n task.state != batchmodels.TaskState.completed]\n if incomplete_tasks:\n break\n if not incomplete_tasks:\n print()\n return True\n else:\n time.sleep(1)\n raise RuntimeError(\"ERROR: Tasks did not reach 'Completed' state within \"\n \"timeout period of \" + str(timeout))", - "docstring": "Returns when all tasks in the specified job reach the Completed state.\n\n :param batch_service_client: A Batch service client.\n :type batch_service_client: `azure.batch.BatchServiceClient`\n :param str job_id: The id of the job whose tasks should be to monitored.\n :param timedelta timeout: The duration to wait for task completion. If all\n tasks in the specified job do not reach Completed state within this time\n period, an exception will be raised." - }, - { - "code": "def _align_sequences_to_hmm(self, hmm_file, sequences_file, output_alignment_file):\n ss = SequenceSearcher(hmm_file)\n with tempfile.NamedTemporaryFile(prefix='graftm', suffix='.aln.fasta') as tempalign:\n ss.hmmalign_sequences(hmm_file, sequences_file, tempalign.name)\n ss.alignment_correcter([tempalign.name], output_alignment_file)", - "docstring": "Align sequences to an HMM, and write an alignment of\n these proteins after cleanup so that they can be used for tree-making\n\n Parameters\n ----------\n sequences_file: str\n path to file of unaligned protein sequences\n hmm_file: str\n path to hmm file\n output_alignment_file: str\n write alignment to this file\n\n Returns\n -------\n nothing" - }, - { - "code": "def get_minimum():\n r\n d = dict(min_freq=_min_freq,\n min_time=_min_time,\n min_off=_min_off,\n min_res=_min_res,\n min_angle=_min_angle)\n return d", - "docstring": "r\"\"\"\n Return the current minimum values.\n\n Returns\n -------\n min_vals : dict\n Dictionary of current minimum values with keys\n\n - min_freq : float\n - min_time : float\n - min_off : float\n - min_res : float\n - min_angle : float\n\n For a full description of these options, see `set_minimum`.\n\n Note\n ----\n set_minimum and get_minimum are derived after set_printoptions and\n get_printoptions from arrayprint.py in numpy." - }, - { - "code": "def mathTransformToMatrix(mathTransform):\n m = MathTransform().compose(mathTransform.offset, mathTransform.scale, mathTransform.rotation)\n return tuple(m)", - "docstring": "Take a ShallowTransform object and return a 6-tuple." - }, - { - "code": "def put(self, key, value):\n if key == None or key == '':\n return None\n elif key.find('.') > 0:\n RecursiveObjectWriter.set_property(self, key, value)\n return value\n else:\n self[key] = value\n return value", - "docstring": "Puts a new value into map element specified by its key.\n\n The key can be defined using dot notation\n and allows to recursively access elements of elements.\n\n :param key: a key of the element to put.\n\n :param value: a new value for map element." - }, - { - "code": "def createReference(self, fromnode, tonode, edge_data=None):\n if fromnode is None:\n fromnode = self\n fromident, toident = self.getIdent(fromnode), self.getIdent(tonode)\n if fromident is None or toident is None:\n return\n self.msg(4, \"createReference\", fromnode, tonode, edge_data)\n self.graph.add_edge(fromident, toident, edge_data=edge_data)", - "docstring": "Create a reference from fromnode to tonode" - }, - { - "code": "def average_balance_recharges(user, **kwargs):\n balance = 0\n for r1, r2 in pairwise(user.recharges):\n balance += r1.amount * min(1, (r2.datetime - r1.datetime).days) / 2\n first_recharge = user.recharges[0]\n last_recharge = user.recharges[-1]\n duration = (last_recharge.datetime - first_recharge.datetime).days\n return balance / min(1, duration)", - "docstring": "Return the average daily balance estimated from all recharges. We assume a\n linear usage between two recharges, and an empty balance before a recharge.\n\n The average balance can be seen as the area under the curve delimited by\n all recharges." - }, - { - "code": "def convert_raw_tuple(value_tuple, format_string):\n values = []\n for v, c in zip(value_tuple, format_string):\n if v is None:\n values.append(v)\n elif c == u\"s\":\n values.append(v)\n elif c == u\"S\":\n values.append([s for s in v.split(u\" \") if len(s) > 0])\n elif c == u\"i\":\n values.append(int(v))\n elif c == u\"U\":\n values.append(convert_unicode_field(v))\n elif c == u\"A\":\n values.append(convert_ascii_field(v))\n return tuple(values)", - "docstring": "Convert a tuple of raw values, according to the given line format.\n\n :param tuple value_tuple: the tuple of raw values\n :param str format_string: the format of the tuple\n :rtype: list of tuples" - }, - { - "code": "def run(self, exp_config=None, app_id=None, bot=False, **kwargs):\n import dallinger as dlgr\n app_id = self.make_uuid(app_id)\n if bot:\n kwargs[\"recruiter\"] = \"bots\"\n self.app_id = app_id\n self.exp_config = exp_config or kwargs\n self.update_status(\"Starting\")\n try:\n if self.exp_config.get(\"mode\") == \"debug\":\n dlgr.command_line.debug.callback(\n verbose=True, bot=bot, proxy=None, exp_config=self.exp_config\n )\n else:\n dlgr.deployment.deploy_sandbox_shared_setup(\n dlgr.command_line.log,\n app=app_id,\n verbose=self.verbose,\n exp_config=self.exp_config,\n )\n except Exception:\n self.update_status(\"Errored\")\n raise\n else:\n self.update_status(\"Running\")\n self._await_completion()\n self.update_status(\"Retrieving data\")\n data = self.retrieve_data()\n self.update_status(\"Completed\")\n return data", - "docstring": "Deploy and run an experiment.\n\n The exp_config object is either a dictionary or a\n ``localconfig.LocalConfig`` object with parameters\n specific to the experiment run grouped by section." - }, - { - "code": "def attribute_result(cls, sprites):\n retval = dict((x, True) for x in cls.ATTRIBUTES)\n for properties in sprites.values():\n for attribute, state in properties.items():\n retval[attribute] &= state != cls.STATE_MODIFIED\n return retval", - "docstring": "Return mapping of attributes to if they were initialized or not." - }, - { - "code": "def get_context_for_image(self, zoom):\n cairo_context = Context(self.__image)\n cairo_context.scale(zoom * self.multiplicator, zoom * self.multiplicator)\n return cairo_context", - "docstring": "Creates a temporary cairo context for the image surface\n\n :param zoom: The current scaling factor\n :return: Cairo context to draw on" - }, - { - "code": "def _find_path_between(self, p: GridQubit, q: GridQubit,\n used: Set[GridQubit]) -> Optional[List[GridQubit]]:\n def assemble_path(n: GridQubit, parent: Dict[GridQubit, GridQubit]):\n path = [n]\n while n in parent:\n n = parent[n]\n path.append(n)\n return path\n other = {p: q, q: p}\n parents = {p: dict(), q: dict()} \\\n visited = {p: set(), q: set()}\n queue = collections.deque([(p, p), (q, q)])\n while queue:\n n, s = queue.popleft()\n for n_adj in self._c_adj[n]:\n if n_adj in visited[other[s]]:\n path_s = assemble_path(n, parents[s])[-2::-1]\n path_other = assemble_path(n_adj, parents[other[s]])[:-1]\n path = path_s + path_other\n if s == q:\n path.reverse()\n return path\n elif n_adj not in used and n_adj not in visited[s]:\n queue.append((n_adj, s))\n visited[s].add(n_adj)\n parents[s][n_adj] = n\n return None", - "docstring": "Searches for continuous sequence between two qubits.\n\n This method runs two BFS algorithms in parallel (alternating variable s\n in each iteration); the first one starting from qubit p, and the second\n one starting from qubit q. If at some point a qubit reachable from p is\n found to be on the set of qubits already reached from q (or vice versa),\n the search is stopped and new path returned.\n\n Args:\n p: The first qubit, start of the sequence.\n q: The second qubit, end of the sequence.\n used: Set of forbidden qubits which cannot appear on the sequence.\n\n Returns:\n Continues sequence of qubits with new path between p and q, or None\n if no path was found." - }, - { - "code": "def p0(self):\n if self._p0 is None:\n raise ValueError(\"initial positions not set; run set_p0\")\n p0 = {param: self._p0[..., k]\n for (k, param) in enumerate(self.sampling_params)}\n return p0", - "docstring": "A dictionary of the initial position of the walkers.\n\n This is set by using ``set_p0``. If not set yet, a ``ValueError`` is\n raised when the attribute is accessed." - }, - { - "code": "def convert(self, infile, item=None):\n if not os.path.isfile(infile):\n raise IOError('{} is not a valid file'.format(infile))\n if item is None:\n item = os.path.splitext(infile)[0]\n ext = os.path.splitext(infile)[1]\n if ext == '.npz':\n self.npz_convert(infile, item)\n elif ext == '.mat':\n self.mat_convert(infile, item)\n elif ext == '.h5':\n self.h5features_convert(infile)\n else:\n raise IOError('Unknown file format for {}'.format(infile))", - "docstring": "Convert an input file to h5features based on its extension.\n\n :raise IOError: if `infile` is not a valid file.\n :raise IOError: if `infile` extension is not supported." - }, - { - "code": "def hits(self):\n self.quality_sort()\n hits = dict((query, list(blines)) for (query, blines) in \\\n groupby(self, lambda x: x.query))\n self.ref_sort()\n return hits", - "docstring": "returns a dict with query => blastline" - }, - { - "code": "def get_experiment_from_key(self, experiment_key):\n experiment = self.experiment_key_map.get(experiment_key)\n if experiment:\n return experiment\n self.logger.error('Experiment key \"%s\" is not in datafile.' % experiment_key)\n self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n return None", - "docstring": "Get experiment for the provided experiment key.\n\n Args:\n experiment_key: Experiment key for which experiment is to be determined.\n\n Returns:\n Experiment corresponding to the provided experiment key." - }, - { - "code": "def fetch_url(src, dst):\n if sys.version_info[0] > 2:\n import urllib.request\n class URLopener(urllib.request.FancyURLopener):\n def http_error_default(self, url, fp, errcode, errmsg, headers):\n sys.stderr.write(\"ERROR: could not fetch {0}\\n\".format(url))\n sys.exit(-1)\n else:\n import urllib\n class URLopener(urllib.FancyURLopener):\n def http_error_default(self, url, fp, errcode, errmsg, headers):\n sys.stderr.write(\"ERROR: could not fetch {0}\\n\".format(url))\n sys.exit(-1)\n dirname = os.path.dirname(dst)\n if dirname != '':\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n opener = URLopener()\n opener.retrieve(src, dst)", - "docstring": "Fetch file from URL src and save it to dst." - }, - { - "code": "def Remove(self, *descriptor_names):\n new_descriptor_map = self.descriptor_map.copy()\n for name in descriptor_names:\n new_descriptor_map.pop(name, None)\n new_descriptors = [\n desc for desc in self.descriptors\n if desc in itervalues(new_descriptor_map)\n ]\n return TypeDescriptorSet(*new_descriptors)", - "docstring": "Returns a copy of this set without elements with given names." - }, - { - "code": "def _validate_alias_command(alias_command):\n if not alias_command:\n raise CLIError(EMPTY_ALIAS_ERROR)\n split_command = shlex.split(alias_command)\n boundary_index = len(split_command)\n for i, subcommand in enumerate(split_command):\n if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:\n boundary_index = i\n break\n command_to_validate = ' '.join(split_command[:boundary_index]).lower()\n for command in azext_alias.cached_reserved_commands:\n if re.match(r'([a-z\\-]*\\s)*{}($|\\s)'.format(command_to_validate), command):\n return\n _validate_positional_arguments(shlex.split(alias_command))", - "docstring": "Check if the alias command is valid.\n\n Args:\n alias_command: The command to validate." - }, - { - "code": "def continuous_frequency(self, data_frame):\n tap_timestamps = data_frame.td[data_frame.action_type==1]\n cont_freq = 1.0/(np.array(tap_timestamps[1:-1])-np.array(tap_timestamps[0:-2]))\n duration = math.ceil(data_frame.td[-1])\n return cont_freq, duration", - "docstring": "This method returns continuous frequency\n\n :param data_frame: the data frame\n :type data_frame: pandas.DataFrame\n :return cont_freq: frequency\n :rtype cont_freq: float" - }, - { - "code": "def _ycbcr2l(self, mode):\n self._check_modes((\"YCbCr\", \"YCbCrA\"))\n self.channels = [self.channels[0]] + self.channels[3:]\n if self.fill_value is not None:\n self.fill_value = [self.fill_value[0]] + self.fill_value[3:]\n self.mode = mode", - "docstring": "Convert from YCbCr to L." - }, - { - "code": "def parse(self):\n for section_name, section_options in self.sections.items():\n method_postfix = ''\n if section_name:\n method_postfix = '_%s' % section_name\n section_parser_method = getattr(\n self,\n ('parse_section%s' % method_postfix).replace('.', '__'),\n None)\n if section_parser_method is None:\n raise DistutilsOptionError(\n 'Unsupported distribution option section: [%s.%s]' % (\n self.section_prefix, section_name))\n section_parser_method(section_options)", - "docstring": "Parses configuration file items from one\n or more related sections." - }, - { - "code": "def message_checksum(msg):\n from .mavcrc import x25crc\n crc = x25crc()\n crc.accumulate_str(msg.name + ' ')\n crc_end = msg.base_fields()\n for i in range(crc_end):\n f = msg.ordered_fields[i]\n crc.accumulate_str(f.type + ' ')\n crc.accumulate_str(f.name + ' ')\n if f.array_length:\n crc.accumulate([f.array_length])\n return (crc.crc&0xFF) ^ (crc.crc>>8)", - "docstring": "calculate a 8-bit checksum of the key fields of a message, so we\n can detect incompatible XML changes" - }, - { - "code": "def generate_read_batches(\n self,\n table,\n columns,\n keyset,\n index=\"\",\n partition_size_bytes=None,\n max_partitions=None,\n ):\n partitions = self._get_snapshot().partition_read(\n table=table,\n columns=columns,\n keyset=keyset,\n index=index,\n partition_size_bytes=partition_size_bytes,\n max_partitions=max_partitions,\n )\n read_info = {\n \"table\": table,\n \"columns\": columns,\n \"keyset\": keyset._to_dict(),\n \"index\": index,\n }\n for partition in partitions:\n yield {\"partition\": partition, \"read\": read_info.copy()}", - "docstring": "Start a partitioned batch read operation.\n\n Uses the ``PartitionRead`` API request to initiate the partitioned\n read. Returns a list of batch information needed to perform the\n actual reads.\n\n :type table: str\n :param table: name of the table from which to fetch data\n\n :type columns: list of str\n :param columns: names of columns to be retrieved\n\n :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`\n :param keyset: keys / ranges identifying rows to be retrieved\n\n :type index: str\n :param index: (Optional) name of index to use, rather than the\n table's primary key\n\n :type partition_size_bytes: int\n :param partition_size_bytes:\n (Optional) desired size for each partition generated. The service\n uses this as a hint, the actual partition size may differ.\n\n :type max_partitions: int\n :param max_partitions:\n (Optional) desired maximum number of partitions generated. The\n service uses this as a hint, the actual number of partitions may\n differ.\n\n :rtype: iterable of dict\n :returns:\n mappings of information used peform actual partitioned reads via\n :meth:`process_read_batch`." - }, - { - "code": "def _process_tz(self, dt, naive, tz):\n\t\tdef _tz(t):\n\t\t\tif t in (None, 'naive'):\n\t\t\t\treturn t\n\t\t\tif t == 'local':\n\t\t\t\tif __debug__ and not localtz:\n\t\t\t\t\traise ValueError(\"Requested conversion to local timezone, but `localtz` not installed.\")\n\t\t\t\tt = localtz\n\t\t\tif not isinstance(t, tzinfo):\n\t\t\t\tif __debug__ and not localtz:\n\t\t\t\t\traise ValueError(\"The `pytz` package must be installed to look up timezone: \" + repr(t))\n\t\t\t\tt = get_tz(t)\n\t\t\tif not hasattr(t, 'normalize') and get_tz:\n\t\t\t\tt = get_tz(t.tzname(dt))\n\t\t\treturn t\n\t\tnaive = _tz(naive)\n\t\ttz = _tz(tz)\n\t\tif not dt.tzinfo and naive:\n\t\t\tif hasattr(naive, 'localize'):\n\t\t\t\tdt = naive.localize(dt)\n\t\t\telse:\n\t\t\t\tdt = dt.replace(tzinfo=naive)\n\t\tif not tz:\n\t\t\treturn dt\n\t\tif hasattr(tz, 'normalize'):\n\t\t\tdt = tz.normalize(dt.astimezone(tz))\n\t\telif tz == 'naive':\n\t\t\tdt = dt.replace(tzinfo=None)\n\t\telse:\n\t\t\tdt = dt.astimezone(tz)\n\t\treturn dt", - "docstring": "Process timezone casting and conversion." - }, - { - "code": "def name2rgb(hue):\n r, g, b = colorsys.hsv_to_rgb(hue / 360.0, .8, .7)\n return tuple(int(x * 256) for x in [r, g, b])", - "docstring": "Originally used to calculate color based on module name." - }, - { - "code": "def storages(self):\n grids = [self.network.mv_grid] + list(self.network.mv_grid.lv_grids)\n storage_results = {}\n storage_results['storage_id'] = []\n storage_results['nominal_power'] = []\n storage_results['voltage_level'] = []\n storage_results['grid_connection_point'] = []\n for grid in grids:\n for storage in grid.graph.nodes_by_attribute('storage'):\n storage_results['storage_id'].append(repr(storage))\n storage_results['nominal_power'].append(storage.nominal_power)\n storage_results['voltage_level'].append(\n 'mv' if isinstance(grid, MVGrid) else 'lv')\n storage_results['grid_connection_point'].append(\n grid.graph.neighbors(storage)[0])\n return pd.DataFrame(storage_results).set_index('storage_id')", - "docstring": "Gathers relevant storage results.\n\n Returns\n -------\n :pandas:`pandas.DataFrame`\n\n Dataframe containing all storages installed in the MV grid and\n LV grids. Index of the dataframe are the storage representatives,\n columns are the following:\n\n nominal_power : :obj:`float`\n Nominal power of the storage in kW.\n\n voltage_level : :obj:`str`\n Voltage level the storage is connected to. Can either be 'mv'\n or 'lv'." - }, - { - "code": "def leaveEvent(self, event):\n super(FoldingPanel, self).leaveEvent(event)\n QtWidgets.QApplication.restoreOverrideCursor()\n self._highlight_runner.cancel_requests()\n if not self.highlight_caret_scope:\n self._clear_scope_decos()\n self._mouse_over_line = None\n self._current_scope = None\n else:\n self._block_nbr = -1\n self._highlight_caret_scope()\n self.editor.repaint()", - "docstring": "Removes scope decorations and background from the editor and the panel\n if highlight_caret_scope, else simply update the scope decorations to\n match the caret scope." - }, - { - "code": "def setup_lookup_table( self, hamiltonian='nearest-neighbour' ):\n expected_hamiltonian_values = [ 'nearest-neighbour', 'coordination_number' ]\n if hamiltonian not in expected_hamiltonian_values:\n raise ValueError\n self.lattice.jump_lookup_table = lookup_table.LookupTable( self.lattice, hamiltonian )", - "docstring": "Create a jump-probability look-up table corresponding to the appropriate Hamiltonian.\n\n Args:\n hamiltonian (Str, optional): String specifying the simulation Hamiltonian.\n valid values are 'nearest-neighbour' (default) and 'coordination_number'.\n\n Returns:\n None" - }, - { - "code": "def run(self, run_config, controller, max_game_steps=0, max_episodes=0,\n game_steps_per_episode=0, save_replay=False):\n is_replay = (controller.status == remote_controller.Status.in_replay)\n total_game_steps = 0\n start_time = time.time()\n num_episodes = 0\n try:\n while True:\n self.init(controller.game_info(), controller.data())\n episode_steps = 0\n num_episodes += 1\n controller.step()\n while True:\n total_game_steps += self._step_mul\n episode_steps += self._step_mul\n frame_start_time = time.time()\n obs = controller.observe()\n self.render(obs)\n if obs.player_result:\n break\n cmd = self.get_actions(run_config, controller)\n if cmd == ActionCmd.STEP:\n pass\n elif cmd == ActionCmd.QUIT:\n if not is_replay and save_replay:\n self.save_replay(run_config, controller)\n return\n elif cmd == ActionCmd.RESTART:\n break\n else:\n raise Exception(\"Unexpected command: %s\" % cmd)\n controller.step(self._step_mul)\n if max_game_steps and total_game_steps >= max_game_steps:\n return\n if game_steps_per_episode and episode_steps >= game_steps_per_episode:\n break\n with sw(\"sleep\"):\n elapsed_time = time.time() - frame_start_time\n time.sleep(max(0, 1 / self._fps - elapsed_time))\n if is_replay:\n break\n if save_replay:\n self.save_replay(run_config, controller)\n if max_episodes and num_episodes >= max_episodes:\n break\n print(\"Restarting\")\n controller.restart()\n except KeyboardInterrupt:\n pass\n finally:\n self.close()\n elapsed_time = time.time() - start_time\n print(\"took %.3f seconds for %s steps: %.3f fps\" %\n (elapsed_time, total_game_steps, total_game_steps / elapsed_time))", - "docstring": "Run loop that gets observations, renders them, and sends back actions." - }, - { - "code": "def update_params(self, parameters):\n self.url_paramaters.update(parameters)\n self.response = requests.get(self.base_url, params=self.url_paramaters,\n headers=HEADERS)\n self.response.raise_for_status()\n return self", - "docstring": "Pass in a dictionary to update url parameters for NBA stats API\n\n Parameters\n ----------\n parameters : dict\n A dict containing key, value pairs that correspond with NBA stats\n API parameters.\n\n Returns\n -------\n self : TeamLog\n The TeamLog object containing the updated NBA stats API\n parameters." - }, - { - "code": "def get(self, sid):\n return ChallengeContext(\n self._version,\n service_sid=self._solution['service_sid'],\n identity=self._solution['identity'],\n factor_sid=self._solution['factor_sid'],\n sid=sid,\n )", - "docstring": "Constructs a ChallengeContext\n\n :param sid: A string that uniquely identifies this Challenge, or `latest`.\n\n :returns: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeContext\n :rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeContext" - }, - { - "code": "def disconnect_signals(self):\n for work in self:\n work.disconnect_signals()\n for cbk in self._callbacks:\n cbk.disable()", - "docstring": "Disable the signals within the `Flow`." - }, - { - "code": "def move_safe(origin, target):\n if origin == target:\n return origin\n if file_exists(target):\n return target\n shutil.move(origin, target)\n return target", - "docstring": "Move file, skip if exists" - }, - { - "code": "def time_between_updates(self):\n if 'last_updated' not in self._original:\n return 0\n last_update = self._original['last_updated']\n this_update = self.last_updated\n return this_update - last_update", - "docstring": "Time between current `last_updated` and previous `last_updated`" - }, - { - "code": "def workspace_backup_restore(ctx, choose_first, bak):\n backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))\n backup_manager.restore(bak, choose_first)", - "docstring": "Restore backup BAK" - }, - { - "code": "def play(self):\n if self._proc.state() == QProcess.Running:\n if self.isPlaying is False:\n self._execute(\"pause\")\n self._changePlayingState(True)\n elif self._filePath is not None:\n self._kill()\n self._run(self._filePath)\n self._changePlayingState(True)", - "docstring": "Starts a playback" - }, - { - "code": "def get_log_entries(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.LogEntryList(self._results, runtime=self._runtime)", - "docstring": "Gets the log entry list resulting from a search.\n\n return: (osid.logging.LogEntryList) - the log entry list\n raise: IllegalState - list already retrieved\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def start(self):\n if os.isatty(self.fd.fileno()) and self.israw():\n self.original_attributes = termios.tcgetattr(self.fd)\n tty.setraw(self.fd)", - "docstring": "Saves the current terminal attributes and makes the tty raw.\n\n This method returns None immediately." - }, - { - "code": "def _get_db():\n from .settings import settings\n mongo = settings.MONGODB\n if 'URI' in mongo and mongo['URI']:\n uri = mongo['URI']\n else:\n uri = 'mongodb://'\n if all(mongo.get(key) for key in ('USERNAME', 'PASSWORD')):\n uri += '{0}:{1}@'.format(mongo['USERNAME'], mongo['PASSWORD'])\n if 'HOSTS' in mongo and mongo['HOSTS']:\n uri += ','.join(\n '{0}:{1}'.format(host, port)\n for (host, port) in zip(mongo['HOSTS'], mongo['PORTS']),\n )\n else:\n uri += '{0}:{1}'.format(mongo['HOST'], mongo.get('PORT', 27017))\n uri += '/' + mongo['DATABASE']\n if 'OPTIONS' in mongo and mongo['OPTIONS']:\n uri += '?{0}'.format('&'.join(mongo['OPTIONS']))\n client = ConnectionFailureProxy(MongoClient(uri, connect=False))\n database = client[parse_uri(uri)['database']]\n return database", - "docstring": "Returns the connection to the database using the settings.\n This function should not be called outside of this file.\n Use db instead." - }, - { - "code": "def keys(self):\n keys = []\n for series in self._get_series():\n keys.append(\n (series.get('measurement',\n series.get('name', 'results')),\n series.get('tags', None))\n )\n return keys", - "docstring": "Return the list of keys in the ResultSet.\n\n :return: List of keys. Keys are tuples (series_name, tags)" - }, - { - "code": "def on_api_error_14(self, request):\n request.method_params['captcha_key'] = self.get_captcha_key(request)\n request.method_params['captcha_sid'] = request.api_error.captcha_sid\n return self.send(request)", - "docstring": "14. Captcha needed" - }, - { - "code": "def body(self):\n if self._body is None:\n if self._body_reader is None:\n self._body = self.input.read(self.content_length or 0)\n else:\n self._body = self._body_reader(self.input)\n return self._body", - "docstring": "Reads and returns the entire request body.\n\n On first access, reads `content_length` bytes from `input` and stores\n the result on the request object. On subsequent access, returns the\n cached value." - }, - { - "code": "def knapsack_ilp(items, maxweight, verbose=False):\n import pulp\n values = [t[0] for t in items]\n weights = [t[1] for t in items]\n indices = [t[2] for t in items]\n prob = pulp.LpProblem(\"Knapsack\", pulp.LpMaximize)\n x = pulp.LpVariable.dicts(name='x', indexs=indices,\n lowBound=0, upBound=1, cat=pulp.LpInteger)\n prob.objective = sum(v * x[i] for v, i in zip(values, indices))\n prob.add(sum(w * x[i] for w, i in zip(weights, indices)) <= maxweight)\n pulp.PULP_CBC_CMD().solve(prob)\n flags = [x[i].varValue for i in indices]\n total_value = sum([val for val, flag in zip(values, flags) if flag])\n items_subset = [item for item, flag in zip(items, flags) if flag]\n if verbose:\n print(prob)\n print('OPT:')\n print('\\n'.join([' %s = %s' % (x[i].name, x[i].varValue) for i in indices]))\n print('total_value = %r' % (total_value,))\n return total_value, items_subset", - "docstring": "solves knapsack using an integer linear program\n\n CommandLine:\n python -m utool.util_alg knapsack_ilp\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_alg import * # NOQA\n >>> import utool as ut\n >>> # Solve https://xkcd.com/287/\n >>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]\n >>> values = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]\n >>> indices = ['mixed fruit', 'french fries', 'side salad',\n >>> 'hot wings', 'mozzarella sticks', 'sampler plate',\n >>> 'barbecue']\n >>> items = [(v, w, i) for v, w, i in zip(values, weights, indices)]\n >>> #items += [(3.95, 3.95, 'mystery plate')]\n >>> maxweight = 15.05\n >>> verbose = True\n >>> total_value, items_subset = knapsack_ilp(items, maxweight, verbose)\n >>> print('items_subset = %s' % (ut.repr3(items_subset, nl=1),))" - }, - { - "code": "def get_epub_opf_xml(filepath):\n if not zipfile.is_zipfile(filepath):\n raise EPubException('Unknown file')\n zf = zipfile.ZipFile(filepath, 'r', compression=zipfile.ZIP_DEFLATED, allowZip64=True)\n container = zf.read('META-INF/container.xml')\n container_xmldoc = minidom.parseString(container)\n opf_filepath = container_xmldoc.getElementsByTagName('rootfile')[0].attributes['full-path'].value\n return zf.read(opf_filepath)", - "docstring": "Returns the file.OPF contents of the ePub file" - }, - { - "code": "def list_skus(access_token, subscription_id, location, publisher, offer):\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/Microsoft.Compute/',\n 'locations/', location,\n '/publishers/', publisher,\n '/artifacttypes/vmimage/offers/', offer,\n '/skus?api-version=', COMP_API])\n return do_get(endpoint, access_token)", - "docstring": "List available VM image skus for a publisher offer.\n\n Args:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n location (str): Azure data center location. E.g. westus.\n publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.\n offer (str): VM image offer. E.g. WindowsServer.\n\n Returns:\n HTTP response with JSON list of skus." - }, - { - "code": "def threaded_start(self, no_init=False):\n thread = Thread(target=self.init_connections, kwargs={\n 'no_init': no_init})\n thread.setDaemon(True)\n thread.start()\n thread.join()", - "docstring": "Spawns a worker thread to set up the zookeeper connection" - }, - { - "code": "def get_skeleton(self):\n src_groups = []\n for grp in self.src_groups:\n sg = copy.copy(grp)\n sg.sources = []\n src_groups.append(sg)\n return self.__class__(self.names, self.weight, self.path, src_groups,\n self.num_gsim_paths, self.ordinal, self.samples)", - "docstring": "Return an empty copy of the source model, i.e. without sources,\n but with the proper attributes for each SourceGroup contained within." - }, - { - "code": "def token(self, adata, load):\n try:\n token = self.loadauth.get_tok(load['token'])\n except Exception as exc:\n log.error('Exception occurred when generating auth token: %s', exc)\n yield {}\n if not token:\n log.warning('Authentication failure of type \"token\" occurred.')\n yield {}\n for sub_auth in adata:\n for sub_adata in adata:\n if token['eauth'] not in adata:\n continue\n if not ((token['name'] in adata[token['eauth']]) |\n ('*' in adata[token['eauth']])):\n continue\n yield {'sub_auth': sub_auth, 'token': token}\n yield {}", - "docstring": "Determine if token auth is valid and yield the adata" - }, - { - "code": "def main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"h:v\", [\"help\", \"nack=\",\n \"servers=\", \"queues=\"])\n except getopt.GetoptError as err:\n print str(err)\n usage()\n sys.exit()\n nack = 0.0\n verbose = False\n servers = \"localhost:7712,localhost:7711\"\n queues = \"test\"\n for o, a in opts:\n if o == \"-v\":\n verbose = True\n elif o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"--nack\"):\n nack = float(a)\n elif o in (\"--servers\"):\n servers = a\n elif o in (\"--queues\"):\n queues = a\n else:\n assert False, \"unhandled option\"\n servers = servers.split(\",\")\n queues = queues.split(\",\")\n c = Client(servers)\n c.connect()\n while True:\n jobs = c.get_job(queues)\n for queue_name, job_id, job in jobs:\n rnd = random.random()\n if rnd >= nack:\n print \">>> received job:\", job_id\n c.ack_job(job_id)\n else:\n print \">>> bouncing job:\", job_id\n c.nack_job(job_id)", - "docstring": "Start the poor_consumer." - }, - { - "code": "def abs_contact_2_coo_file(abs_contact_file, coo_file):\n sparse_dict = dict()\n h = open(abs_contact_file, \"r\")\n all_lines = h.readlines()\n n_lines = len(all_lines)\n for i in range(1, n_lines):\n line = all_lines[i]\n dat = line.split()\n mates = [int(dat[0]), int(dat[1])]\n mates.sort()\n f1 = mates[0] - 1\n f2 = mates[1] - 1\n if f1 in sparse_dict:\n if f2 in sparse_dict[f1]:\n sparse_dict[f1][f2] += 1\n else:\n sparse_dict[f1][f2] = 1\n else:\n sparse_dict[f1] = dict()\n sparse_dict[f1][f2] = 1\n keys = list(sparse_dict.keys())\n keys.sort()\n h.close()\n h_coo = open(coo_file, \"w\")\n h_coo.write(\"%s\\t%s\\t%s\\n\" % (\"id_frag_a\", \"id_frag_b\", \"n_contact\"))\n for fa in keys:\n d_fb = sparse_dict[fa]\n keys_b = list(d_fb.keys())\n keys_b.sort()\n for fb in keys_b:\n nc = d_fb[fb]\n h_coo.write(\"%s\\t%s\\t%s\\n\" % (str(fa), str(fb), str(nc)))\n h_coo.close()\n h.close()", - "docstring": "Convert contact maps between old-style and new-style formats.\n\n A legacy function that converts contact maps from the older GRAAL format to\n the simpler instaGRAAL format. This is useful with datasets generated by\n Hi-C box.\n\n Parameters\n ----------\n abs_contact_file : str, file or pathlib.Path\n The input old-style contact map.\n coo_file : str, file, or pathlib.Path\n The output path to the generated contact map; must be writable." - }, - { - "code": "def include_file(self, path, include_dirs = []):\n if self.include_includes:\n if self.debug: print(\"------------------ Including a file: %s\"%path)\n inc_dirs = include_dirs if include_dirs else self.include_dirs\n parser = LEMSFileParser(self, inc_dirs, self.include_includes)\n if os.access(path, os.F_OK):\n if not path in self.included_files:\n parser.parse(open(path).read()) \n self.included_files.append(path)\n return\n else:\n if self.debug: print(\"Already included: %s\"%path)\n return\n else:\n for inc_dir in inc_dirs:\n new_path = (inc_dir + '/' + path)\n if os.access(new_path, os.F_OK):\n if not new_path in self.included_files:\n parser.parse(open(new_path).read())\n self.included_files.append(new_path)\n return\n else:\n if self.debug: print(\"Already included: %s\"%path)\n return\n msg = 'Unable to open ' + path\n if self.fail_on_missing_includes:\n raise Exception(msg)\n elif self.debug: \n print(msg)", - "docstring": "Includes a file into the current model.\n\n @param path: Path to the file to be included.\n @type path: str\n\n @param include_dirs: Optional alternate include search path.\n @type include_dirs: list(str)" - }, - { - "code": "def send(self, peer_id, message):\n assert isinstance(message, beans.RawMessage)\n peer = self._directory.get_peer(peer_id)\n link = self._get_link(peer)\n assert isinstance(link, beans.AbstractLink)\n return link.send(message)", - "docstring": "Synchronously sends a message\n\n :param peer_id: UUID of a peer\n :param message: Message to send to the peer\n :raise KeyError: Unknown peer\n :raise ValueError: No link to the peer" - }, - { - "code": "def list(region, profile):\n ini_data = {}\n environment = {}\n if region:\n environment['region'] = region\n else:\n environment['region'] = find_myself()\n if profile:\n environment['profile'] = profile\n ini_data['environment'] = environment\n if start_list(ini_data):\n sys.exit(0)\n else:\n sys.exit(1)", - "docstring": "List all the CloudFormation stacks in the given region." - }, - { - "code": "def _ssweek_of_month(date_value):\n \"0-starting index which Sundaystarting-week in the month this date is\"\n weekday_of_first = (date_value.replace(day=1).weekday() + 1) % 7\n return (date_value.day + weekday_of_first - 1) // 7", - "docstring": "0-starting index which Sundaystarting-week in the month this date is" - }, - { - "code": "def _decode_all_selective(data, codec_options, fields):\n if not codec_options.type_registry._decoder_map:\n return decode_all(data, codec_options)\n if not fields:\n return decode_all(data, codec_options.with_options(type_registry=None))\n from bson.raw_bson import RawBSONDocument\n internal_codec_options = codec_options.with_options(\n document_class=RawBSONDocument, type_registry=None)\n _doc = _bson_to_dict(data, internal_codec_options)\n return [_decode_selective(_doc, fields, codec_options,)]", - "docstring": "Decode BSON data to a single document while using user-provided\n custom decoding logic.\n\n `data` must be a string representing a valid, BSON-encoded document.\n\n :Parameters:\n - `data`: BSON data\n - `codec_options`: An instance of\n :class:`~bson.codec_options.CodecOptions` with user-specified type\n decoders. If no decoders are found, this method is the same as\n ``decode_all``.\n - `fields`: Map of document namespaces where data that needs\n to be custom decoded lives or None. For example, to custom decode a\n list of objects in 'field1.subfield1', the specified value should be\n ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or\n None, this method is the same as ``decode_all``.\n\n :Returns:\n - `document_list`: Single-member list containing the decoded document.\n\n .. versionadded:: 3.8" - }, - { - "code": "def save_asset(self, asset_form, *args, **kwargs):\n if asset_form.is_for_update():\n return self.update_asset(asset_form, *args, **kwargs)\n else:\n return self.create_asset(asset_form, *args, **kwargs)", - "docstring": "Pass through to provider AssetAdminSession.update_asset" - }, - { - "code": "def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs):\n request = self.mk_request(defines.Codes.POST, path)\n request.token = generate_random_token(2)\n request.payload = payload\n if no_response:\n request.add_no_response()\n request.type = defines.Types[\"NON\"]\n for k, v in kwargs.items():\n if hasattr(request, k):\n setattr(request, k, v)\n return self.send_request(request, callback, timeout, no_response=no_response)", - "docstring": "Perform a POST on a certain path.\n\n :param path: the path\n :param payload: the request payload\n :param callback: the callback function to invoke upon response\n :param timeout: the timeout of the request\n :return: the response" - }, - { - "code": "def GetCoinAssets(self):\n assets = set()\n for coin in self.GetCoins():\n assets.add(coin.Output.AssetId)\n return list(assets)", - "docstring": "Get asset ids of all coins present in the wallet.\n\n Returns:\n list: of UInt256 asset id's." - }, - { - "code": "def get_comments_of_confirmation_per_page(self, confirmation_id, per_page=1000, page=1):\n return self._get_resource_per_page(\n resource=CONFIRMATION_COMMENTS,\n per_page=per_page,\n page=page,\n params={'confirmation_id': confirmation_id},\n )", - "docstring": "Get comments of confirmation per page\n\n :param confirmation_id: the confirmation id\n :param per_page: How many objects per page. Default: 1000\n :param page: Which page. Default: 1\n :return: list" - }, - { - "code": "def cli(env):\n username, secret, endpoint_url, timeout = get_user_input(env)\n new_client = SoftLayer.Client(username=username, api_key=secret, endpoint_url=endpoint_url, timeout=timeout)\n api_key = get_api_key(new_client, username, secret)\n path = '~/.softlayer'\n if env.config_file:\n path = env.config_file\n config_path = os.path.expanduser(path)\n env.out(env.fmt(config.config_table({'username': username,\n 'api_key': api_key,\n 'endpoint_url': endpoint_url,\n 'timeout': timeout})))\n if not formatting.confirm('Are you sure you want to write settings '\n 'to \"%s\"?' % config_path, default=True):\n raise exceptions.CLIAbort('Aborted.')\n parsed_config = utils.configparser.RawConfigParser()\n parsed_config.read(config_path)\n try:\n parsed_config.add_section('softlayer')\n except utils.configparser.DuplicateSectionError:\n pass\n parsed_config.set('softlayer', 'username', username)\n parsed_config.set('softlayer', 'api_key', api_key)\n parsed_config.set('softlayer', 'endpoint_url', endpoint_url)\n parsed_config.set('softlayer', 'timeout', timeout)\n config_fd = os.fdopen(os.open(config_path,\n (os.O_WRONLY | os.O_CREAT | os.O_TRUNC),\n 0o600),\n 'w')\n try:\n parsed_config.write(config_fd)\n finally:\n config_fd.close()\n env.fout(\"Configuration Updated Successfully\")", - "docstring": "Edit configuration." - }, - { - "code": "def generate_uuid():\n r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes)\n return r_uuid.decode().replace('=', '')", - "docstring": "Generate a UUID." - }, - { - "code": "def protocol_str(protocol):\n if protocol == const.PROTOCOL_MRP:\n return 'MRP'\n if protocol == const.PROTOCOL_DMAP:\n return 'DMAP'\n if protocol == const.PROTOCOL_AIRPLAY:\n return 'AirPlay'\n return 'Unknown'", - "docstring": "Convert internal API protocol to string." - }, - { - "code": "def union_rectangles(R):\n if R == []:\n return 0\n X = []\n Y = []\n for j in range(len(R)):\n (x1, y1, x2, y2) = R[j]\n assert x1 <= x2 and y1 <= y2\n X.append(x1)\n X.append(x2)\n Y.append((y1, +1, j))\n Y.append((y2, -1, j))\n X.sort()\n Y.sort()\n X2i = {X[i]: i for i in range(len(X))}\n L = [X[i + 1] - X[i] for i in range(len(X) - 1)]\n C = Cover_query(L)\n area = 0\n last = 0\n for (y, delta, j) in Y:\n area += (y - last) * C.cover()\n last = y\n (x1, y1, x2, y2) = R[j]\n i = X2i[x1]\n k = X2i[x2]\n C.change(i, k, delta)\n return area", - "docstring": "Area of union of rectangles\n\n :param R: list of rectangles defined by (x1, y1, x2, y2)\n where (x1, y1) is top left corner and (x2, y2) bottom right corner\n :returns: area\n :complexity: :math:`O(n^2)`" - }, - { - "code": "def get_lastfm(method, lastfm_key='', **kwargs):\n if not lastfm_key:\n if 'lastfm_key' not in CONFIG or not CONFIG['lastfm_key']:\n logger.warning('No lastfm key configured')\n return ''\n else:\n lastfm_key = CONFIG['lastfm_key']\n url = 'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json'\n url = url.format(method, lastfm_key)\n for key in kwargs:\n url += '&{}={}'.format(key, kwargs[key])\n response = get_url(url, parser='json')\n if 'error' in response:\n logger.error('Error number %d in lastfm query: %s',\n response['error'], response['message'])\n return ''\n return response", - "docstring": "Request the specified method from the lastfm api." - }, - { - "code": "def resolve_path(file_path, calling_function):\n if not file_path:\n resolved = os.path.join(os.getcwd(), calling_function)\n elif file_path.count(os.sep) == 0:\n resolved = os.path.join(os.getcwd(), file_path)\n else:\n resolved = file_path\n if not resolved.endswith('.csv'):\n resolved = resolved + '.csv'\n return resolved", - "docstring": "Conditionally set a path to a CSV file.\n\n Option 1 - Join working directory and calling function name (file_name)\n Option 2 - Join working directory and provided file_path string\n Option 3 - Return provided file_path\n\n :param file_path: None, filename string or Full file path\n :param calling_function: Name of the function that initialized the CSV class\n :return:" - }, - { - "code": "def daemons_start(self, run_daemons=True):\n result = True\n if run_daemons:\n logger.info(\"Alignak configured daemons start:\")\n else:\n logger.info(\"Alignak configured daemons check:\")\n for satellites_list in [self.conf.arbiters, self.conf.receivers, self.conf.reactionners,\n self.conf.pollers, self.conf.brokers, self.conf.schedulers]:\n for satellite in satellites_list:\n logger.info(\"- found %s, to be launched: %s, address: %s\",\n satellite.name, satellite.alignak_launched, satellite.uri)\n if satellite == self.link_to_myself:\n continue\n if satellite.alignak_launched and \\\n satellite.address not in ['127.0.0.1', 'localhost']:\n logger.error(\"Alignak is required to launch a daemon for %s %s \"\n \"but the satelitte is defined on an external address: %s\",\n satellite.type, satellite.name, satellite.address)\n result = False\n continue\n if not run_daemons:\n continue\n if not satellite.alignak_launched:\n logger.debug(\"Alignak will not launch '%s'\")\n continue\n if not satellite.active:\n logger.warning(\"- daemon '%s' is declared but not set as active, \"\n \"do not start...\", satellite.name)\n continue\n if satellite.name in self.my_daemons:\n logger.warning(\"- daemon '%s' is already running\", satellite.name)\n continue\n started = self.start_daemon(satellite)\n result = result and started\n return result", - "docstring": "Manage the list of the daemons in the configuration\n\n Check if the daemon needs to be started by the Arbiter.\n\n If so, starts the daemon if `run_daemons` is True\n\n :param run_daemons: run the daemons or make a simple check\n :type run_daemons: bool\n\n :return: True if all daemons are running, else False. always True for a simple check" - }, - { - "code": "def show_bandwidth_limit_rule(self, rule, policy, body=None):\n return self.get(self.qos_bandwidth_limit_rule_path %\n (policy, rule), body=body)", - "docstring": "Fetches information of a certain bandwidth limit rule." - }, - { - "code": "def make_merged_name(self, source_name, galkey, fullpath):\n format_dict = self.__dict__.copy()\n format_dict['sourcekey'] = self._name_factory.galprop_sourcekey(source_name=source_name,\n galpropkey=galkey)\n format_dict['fullpath'] = fullpath\n return self._name_factory.merged_gasmap(**format_dict)", - "docstring": "Make the name of a gasmap file for a set of merged rings\n\n Parameters\n ----------\n\n source_name : str\n The galprop component, used to define path to gasmap files\n galkey : str\n A short key identifying the galprop parameters\n fullpath : bool\n Return the full path name" - }, - { - "code": "def run_defenses(self):\n logging.info('******** Start evaluation of defenses ********')\n prev_submission_id = None\n need_reload_work = True\n while True:\n if need_reload_work:\n if self.num_defense_shards:\n shard_with_work = self.defense_work.read_undone_from_datastore(\n shard_id=(self.worker_id % self.num_defense_shards),\n num_shards=self.num_defense_shards)\n else:\n shard_with_work = self.defense_work.read_undone_from_datastore()\n logging.info('Loaded %d records of undone work from shard %s',\n len(self.defense_work), str(shard_with_work))\n if not self.defense_work.work:\n logging.info('Work is not populated, waiting...')\n time.sleep(SLEEP_TIME)\n continue\n if self.defense_work.is_all_work_competed():\n logging.info('All defense work completed.')\n break\n self.fetch_defense_data()\n need_reload_work = False\n work_id = self.defense_work.try_pick_piece_of_work(\n self.worker_id, submission_id=prev_submission_id)\n if not work_id:\n need_reload_work = True\n logging.info('Failed to pick work, waiting...')\n time.sleep(SLEEP_TIME_SHORT)\n continue\n logging.info('Selected work_id: %s', work_id)\n try:\n elapsed_time_sec, prev_submission_id, batch_result = (\n self.run_defense_work(work_id))\n logging.info('Work %s is done', work_id)\n is_work_update = self.defense_work.update_work_as_completed(\n self.worker_id, work_id,\n other_values={'elapsed_time': elapsed_time_sec,\n 'stat_correct': batch_result[0],\n 'stat_error': batch_result[1],\n 'stat_target_class': batch_result[2],\n 'stat_num_images': batch_result[3]})\n except WorkerError as e:\n logging.info('Failed to run work:\\n%s', str(e))\n if str(e).startswith('Docker returned non-zero retval'):\n logging.info('Running nvidia-docker to ensure that GPU works')\n shell_call(['nvidia-docker', 'run', '--rm', 'nvidia/cuda',\n 'nvidia-smi'])\n is_work_update = self.defense_work.update_work_as_completed(\n self.worker_id, work_id, error=str(e))\n if not is_work_update:\n logging.warning('Can''t update work \"%s\" as completed by worker %d',\n work_id, self.worker_id)\n need_reload_work = True\n logging.info('******** Finished evaluation of defenses ********')", - "docstring": "Method which evaluates all defense work.\n\n In a loop this method queries not completed defense work,\n picks one defense work and runs it." - }, - { - "code": "def safe_repr(self, obj):\n try:\n return repr(obj)\n except Exception as e:\n return '??? Broken repr (%s: %s)' % (type(e).__name__, e)", - "docstring": "Like a repr but without exception" - }, - { - "code": "def _create_linked_clone(self):\n gns3_snapshot_exists = False\n vm_info = yield from self._get_vm_info()\n for entry, value in vm_info.items():\n if entry.startswith(\"SnapshotName\") and value == \"GNS3 Linked Base for clones\":\n gns3_snapshot_exists = True\n if not gns3_snapshot_exists:\n result = yield from self.manager.execute(\"snapshot\", [self._vmname, \"take\", \"GNS3 Linked Base for clones\"])\n log.debug(\"GNS3 snapshot created: {}\".format(result))\n args = [self._vmname,\n \"--snapshot\",\n \"GNS3 Linked Base for clones\",\n \"--options\",\n \"link\",\n \"--name\",\n self.name,\n \"--basefolder\",\n self.working_dir,\n \"--register\"]\n result = yield from self.manager.execute(\"clonevm\", args)\n log.debug(\"VirtualBox VM: {} cloned\".format(result))\n self._vmname = self._name\n yield from self.manager.execute(\"setextradata\", [self._vmname, \"GNS3/Clone\", \"yes\"])\n try:\n args = [self._vmname, \"take\", \"reset\"]\n result = yield from self.manager.execute(\"snapshot\", args)\n log.debug(\"Snapshot 'reset' created: {}\".format(result))\n except VirtualBoxError:\n log.warn(\"Snapshot 'reset' not created\")\n os.makedirs(os.path.join(self.working_dir, self._vmname), exist_ok=True)", - "docstring": "Creates a new linked clone." - }, - { - "code": "def _get_status_code(self, http_status):\n try:\n return int(http_status.split(' ', 1)[0])\n except TypeError:\n _logger.warning('Unable to find status code in HTTP status %r.',\n http_status)\n return 500", - "docstring": "Get the HTTP status code from an HTTP status string.\n\n Args:\n http_status: A string containing a HTTP status code and reason.\n\n Returns:\n An integer with the status code number from http_status." - }, - { - "code": "def comicPageLink(self, comic, url, prevUrl):\n pageInfo = self.getPageInfo(comic, url)\n pageInfo['prev'] = prevUrl", - "docstring": "Write previous link into JSON." - }, - { - "code": "def transcribe(decoder, audio_file, libdir=None):\n decoder = get_decoder()\n decoder.start_utt()\n stream = open(audio_file, 'rb')\n while True:\n buf = stream.read(1024)\n if buf:\n decoder.process_raw(buf, False, False)\n else:\n break\n decoder.end_utt()\n return evaluate_results(decoder)", - "docstring": "Decode streaming audio data from raw binary file on disk." - }, - { - "code": "def SetTimezone(self, timezone):\n if not timezone:\n return\n try:\n self._timezone = pytz.timezone(timezone)\n except pytz.UnknownTimeZoneError:\n raise ValueError('Unsupported timezone: {0:s}'.format(timezone))", - "docstring": "Sets the timezone.\n\n Args:\n timezone (str): timezone.\n\n Raises:\n ValueError: if the timezone is not supported." - }, - { - "code": "def local_decorated_likelihoods(obj):\n for name, like in six.iteritems(likelihoods):\n obj[name + '_like'] = gofwrapper(like, snapshot)", - "docstring": "New interface likelihoods" - }, - { - "code": "def update(self, byte_arr):\n if byte_arr:\n self.value = self.calculate(byte_arr, self.value)", - "docstring": "Read bytes and update the CRC computed." - }, - { - "code": "def read_string(source, offset, length):\n end = offset + length\n try:\n return (codecs.decode(source[offset:end], aws_encryption_sdk.internal.defaults.ENCODING), end)\n except Exception:\n raise SerializationError(\"Bad format of serialized context.\")", - "docstring": "Reads a string from a byte string.\n\n :param bytes source: Source byte string\n :param int offset: Point in byte string to start reading\n :param int length: Length of string to read\n :returns: Read string and offset at point after read data\n :rtype: tuple of str and int\n :raises SerializationError: if unable to unpack" - }, - { - "code": "def get_gpus_in_use(max_devices=None):\n from turicreate.util import _get_cuda_gpus\n gpu_indices = get_gpu_ids_in_use(max_devices=max_devices)\n gpus = _get_cuda_gpus()\n return [gpus[index] for index in gpu_indices]", - "docstring": "Like get_num_gpus_in_use, but returns a list of dictionaries with just\n queried GPU information." - }, - { - "code": "def _ensure_tuple_or_list(arg_name, tuple_or_list):\n if not isinstance(tuple_or_list, (tuple, list)):\n raise TypeError(\n \"Expected %s to be a tuple or list. \"\n \"Received %r\" % (arg_name, tuple_or_list)\n )\n return list(tuple_or_list)", - "docstring": "Ensures an input is a tuple or list.\n\n This effectively reduces the iterable types allowed to a very short\n whitelist: list and tuple.\n\n :type arg_name: str\n :param arg_name: Name of argument to use in error message.\n\n :type tuple_or_list: sequence of str\n :param tuple_or_list: Sequence to be verified.\n\n :rtype: list of str\n :returns: The ``tuple_or_list`` passed in cast to a ``list``.\n :raises TypeError: if the ``tuple_or_list`` is not a tuple or list." - }, - { - "code": "def gaussian(x, a, b, c, d=0):\r\n return a * np.exp( -(((x-b)**2 )/ (2*(c**2))) ) + d", - "docstring": "a -> height of the curve's peak\r\n b -> position of the center of the peak\r\n c -> standard deviation or Gaussian RMS width\r\n d -> offset" - }, - { - "code": "def to_dict(self):\n d = {\n 'id': self.id,\n 'start': self.start,\n 'end': self.end,\n 'form': self.form\n }\n if self.lnk is not None:\n cfrom, cto = self.lnk.data\n d['from'] = cfrom\n d['to'] = cto\n if self.surface is not None:\n d['surface'] = self.surface\n if self.pos:\n d['tags'] = [ps[0] for ps in self.pos]\n d['probabilities'] = [ps[1] for ps in self.pos]\n return d", - "docstring": "Encode the token as a dictionary suitable for JSON serialization." - }, - { - "code": "def eligible_cost(self, column=None, value=None, **kwargs):\n return self._resolve_call('GIC_ELIGIBLE_COST', column, value, **kwargs)", - "docstring": "The assistance dollar amounts by eligible cost category.\n\n >>> GICS().eligible_cost('amount', 100000)" - }, - { - "code": "def _convert_and_assert_per_example_weights_compatible(\n input_, per_example_weights, dtype):\n per_example_weights = tf.convert_to_tensor(\n per_example_weights, name='per_example_weights', dtype=dtype)\n if input_.get_shape().ndims:\n expected_length = input_.get_shape().dims[0]\n message = ('per_example_weights must have rank 1 and length %s, but was: %s'\n % (expected_length, per_example_weights.get_shape()))\n else:\n expected_length = None\n message = ('per_example_weights must have rank 1 and length equal to the '\n 'first dimension of inputs (unknown), but was: %s'\n % per_example_weights.get_shape())\n if per_example_weights.get_shape().ndims not in (1, None):\n raise ValueError(message)\n if not per_example_weights.get_shape().is_compatible_with((expected_length,)):\n raise ValueError(message)\n return per_example_weights", - "docstring": "Converts per_example_weights to a tensor and validates the shape." - }, - { - "code": "def on_dt_changed(self, dt):\n self.datetime = dt\n self.blotter.set_date(dt)", - "docstring": "Callback triggered by the simulation loop whenever the current dt\n changes.\n\n Any logic that should happen exactly once at the start of each datetime\n group should happen here." - }, - { - "code": "def safe_delete(filename):\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise", - "docstring": "Delete a file safely. If it's not present, no-op." - }, - { - "code": "def morphology(image, operation, radius, mtype='binary', value=1,\n shape='ball', radius_is_parametric=False, thickness=1,\n lines=3, include_center=False):\n if image.components > 1:\n raise ValueError('multichannel images not yet supported')\n _sflag_dict = {'ball': 1, 'box': 2, 'cross': 3, 'annulus': 4, 'polygon': 5}\n sFlag = _sflag_dict.get(shape, 0)\n if sFlag == 0:\n raise ValueError('invalid element shape')\n radius_is_parametric = radius_is_parametric * 1\n include_center = include_center * 1\n if (mtype == 'binary'):\n if (operation == 'dilate'):\n if (sFlag == 5):\n ret = iMath(image, 'MD', radius, value, sFlag, lines)\n else:\n ret = iMath(image, 'MD', radius, value, sFlag, radius_is_parametric, thickness, include_center)\n elif (operation == 'erode'):\n if (sFlag == 5):\n ret = iMath(image, 'ME', radius, value, sFlag, lines)\n else:\n ret = iMath(image, 'ME', radius, value, sFlag, radius_is_parametric, thickness, include_center)\n elif (operation == 'open'):\n if (sFlag == 5):\n ret = iMath(image, 'MO', radius, value, sFlag, lines)\n else:\n ret = iMath(image, 'MO', radius, value, sFlag, radius_is_parametric, thickness, include_center)\n elif (operation == 'close'):\n if (sFlag == 5):\n ret = iMath(image, 'MC', radius, value, sFlag, lines)\n else:\n ret = iMath(image, 'MC', radius, value, sFlag, radius_is_parametric, thickness, include_center)\n else:\n raise ValueError('Invalid morphology operation')\n elif (mtype == 'grayscale'):\n if (operation == 'dilate'):\n ret = iMath(image, 'GD', radius)\n elif (operation == 'erode'):\n ret = iMath(image, 'GE', radius)\n elif (operation == 'open'):\n ret = iMath(image, 'GO', radius)\n elif (operation == 'close'):\n ret = iMath(image, 'GC', radius)\n else:\n raise ValueError('Invalid morphology operation')\n else:\n raise ValueError('Invalid morphology type')\n return ret", - "docstring": "Apply morphological operations to an image\n\n ANTsR function: `morphology`\n\n Arguments\n ---------\n input : ANTsImage\n input image\n\n operation : string\n operation to apply\n \"close\" Morpholgical closing\n \"dilate\" Morpholgical dilation\n \"erode\" Morpholgical erosion\n \"open\" Morpholgical opening\n\n radius : scalar\n radius of structuring element\n\n mtype : string\n type of morphology\n \"binary\" Binary operation on a single value\n \"grayscale\" Grayscale operations\n\n value : scalar\n value to operation on (type='binary' only)\n\n shape : string\n shape of the structuring element ( type='binary' only )\n \"ball\" spherical structuring element\n \"box\" box shaped structuring element\n \"cross\" cross shaped structuring element\n \"annulus\" annulus shaped structuring element\n \"polygon\" polygon structuring element\n\n radius_is_parametric : boolean\n used parametric radius boolean (shape='ball' and shape='annulus' only)\n\n thickness : scalar\n thickness (shape='annulus' only)\n\n lines : integer\n number of lines in polygon (shape='polygon' only)\n\n include_center : boolean\n include center of annulus boolean (shape='annulus' only)\n\n Returns\n -------\n ANTsImage\n\n Example\n -------\n >>> import ants\n >>> fi = ants.image_read( ants.get_ants_data('r16') , 2 )\n >>> mask = ants.get_mask( fi )\n >>> dilated_ball = ants.morphology( mask, operation='dilate', radius=3, mtype='binary', shape='ball')\n >>> eroded_box = ants.morphology( mask, operation='erode', radius=3, mtype='binary', shape='box')\n >>> opened_annulus = ants.morphology( mask, operation='open', radius=5, mtype='binary', shape='annulus', thickness=2)" - }, - { - "code": "def finder_for_path(path):\n result = None\n pkgutil.get_importer(path)\n loader = sys.path_importer_cache.get(path)\n finder = _finder_registry.get(type(loader))\n if finder:\n module = _dummy_module\n module.__file__ = os.path.join(path, '')\n module.__loader__ = loader\n result = finder(module)\n return result", - "docstring": "Return a resource finder for a path, which should represent a container.\n\n :param path: The path.\n :return: A :class:`ResourceFinder` instance for the path." - }, - { - "code": "def make_valid_polygon(shape):\n assert shape.geom_type == 'Polygon'\n shape = make_valid_pyclipper(shape)\n assert shape.is_valid\n return shape", - "docstring": "Make a polygon valid. Polygons can be invalid in many ways, such as\n self-intersection, self-touching and degeneracy. This process attempts to\n make a polygon valid while retaining as much of its extent or area as\n possible.\n\n First, we call pyclipper to robustly union the polygon. Using this on its\n own appears to be good for \"cleaning\" the polygon.\n\n This might result in polygons which still have degeneracies according to\n the OCG standard of validity - as pyclipper does not consider these to be\n invalid. Therefore we follow by using the `buffer(0)` technique to attempt\n to remove any remaining degeneracies." - }, - { - "code": "def put(self, requirement, handle):\n filename = self.generate_filename(requirement)\n for backend in list(self.backends):\n handle.seek(0)\n try:\n backend.put(filename, handle)\n except CacheBackendDisabledError as e:\n logger.debug(\"Disabling %s because it requires configuration: %s\", backend, e)\n self.backends.remove(backend)\n except Exception as e:\n logger.exception(\"Disabling %s because it failed: %s\", backend, e)\n self.backends.remove(backend)", - "docstring": "Store a distribution archive in all of the available caches.\n\n :param requirement: A :class:`.Requirement` object.\n :param handle: A file-like object that provides access to the\n distribution archive." - }, - { - "code": "def get_library_instance(self, library_path, library_name):\n if self.is_library_in_libraries(library_path, library_name):\n from rafcon.core.states.library_state import LibraryState\n return LibraryState(library_path, library_name, \"0.1\")\n else:\n logger.warning(\"Library manager will not create a library instance which is not in the mounted libraries.\")", - "docstring": "Generate a Library instance from within libraries dictionary tree." - }, - { - "code": "def query_tensor_store(self,\n watch_key,\n time_indices=None,\n slicing=None,\n mapping=None):\n return self._tensor_store.query(watch_key,\n time_indices=time_indices,\n slicing=slicing,\n mapping=mapping)", - "docstring": "Query tensor store for a given debugged tensor value.\n\n Args:\n watch_key: The watch key of the debugged tensor being sought. Format:\n ::\n E.g., Dense_1/MatMul:0:DebugIdentity.\n time_indices: Optional time indices string By default, the lastest time\n index ('-1') is returned.\n slicing: Optional slicing string.\n mapping: Optional mapping string, e.g., 'image/png'.\n\n Returns:\n If mapping is `None`, the possibly sliced values as a nested list of\n values or its mapped format. A `list` of nested `list` of values,\n If mapping is not `None`, the format of the return value will depend on\n the mapping." - }, - { - "code": "def sh3(cmd):\n p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True,\n env=sub_environment())\n out, err = p.communicate()\n retcode = p.returncode\n if retcode:\n raise CalledProcessError(retcode, cmd)\n else:\n return out.rstrip(), err.rstrip()", - "docstring": "Execute command in a subshell, return stdout, stderr\n\n If anything appears in stderr, print it out to sys.stderr" - }, - { - "code": "def execute_command(self, command, tab=None):\n if not self.get_notebook().has_page():\n self.add_tab()\n if command[-1] != '\\n':\n command += '\\n'\n terminal = self.get_notebook().get_current_terminal()\n terminal.feed_child(command)", - "docstring": "Execute the `command' in the `tab'. If tab is None, the\n command will be executed in the currently selected\n tab. Command should end with '\\n', otherwise it will be\n appended to the string." - }, - { - "code": "def _parse_pkg_string(pkg):\n pkg_name, separator, pkg_ver = pkg.partition('-')\n return (pkg_name.strip(), separator, pkg_ver.strip())", - "docstring": "Parse pkg string and return a tuple of package name, separator, and\n package version.\n\n Cabal support install package with following format:\n\n * foo-1.0\n * foo < 1.2\n * foo > 1.3\n\n For the sake of simplicity only the first form is supported,\n support for other forms can be added later." - }, - { - "code": "def last_component_continued(self):\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!')\n if not self.symlink_components:\n raise pycdlibexception.PyCdlibInternalError('Trying to get continued on a non-existent component!')\n return self.symlink_components[-1].is_continued()", - "docstring": "Determines whether the previous component of this SL record is a\n continued one or not.\n\n Parameters:\n None.\n Returns:\n True if the previous component of this SL record is continued, False otherwise." - }, - { - "code": "async def remove_listener(self, channel, callback):\n if self.is_closed():\n return\n if channel not in self._listeners:\n return\n if callback not in self._listeners[channel]:\n return\n self._listeners[channel].remove(callback)\n if not self._listeners[channel]:\n del self._listeners[channel]\n await self.fetch('UNLISTEN {}'.format(utils._quote_ident(channel)))", - "docstring": "Remove a listening callback on the specified channel." - }, - { - "code": "def first_image(self):\n for model_field in self._meta.fields:\n if isinstance(model_field, ImageField):\n if model_field.name is not 'thumbnail_override':\n field_value = getattr(self, model_field.name)\n if field_value.id is not None:\n return field_value\n return None", - "docstring": "Ready-only attribute that provides the value of the first non-none image that's\n not the thumbnail override field." - }, - { - "code": "def write(self, data, timeout_s=None):\n self.connected.wait(timeout_s)\n self.protocol.transport.write(data)", - "docstring": "Write to serial port.\n\n Waits for serial connection to be established before writing.\n\n Parameters\n ----------\n data : str or bytes\n Data to write to serial port.\n timeout_s : float, optional\n Maximum number of seconds to wait for serial connection to be\n established.\n\n By default, block until serial connection is ready." - }, - { - "code": "def local_accuracy(X, y, model_generator, method_name):\n def score_map(true, pred):\n v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))\n if v < 1e-6:\n return 1.0\n elif v < 0.01:\n return 0.9\n elif v < 0.05:\n return 0.75\n elif v < 0.1:\n return 0.6\n elif v < 0.2:\n return 0.4\n elif v < 0.3:\n return 0.3\n elif v < 0.5:\n return 0.2\n elif v < 0.7:\n return 0.1\n else:\n return 0.0\n def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state):\n return measures.local_accuracy(\n X_train, y_train, X_test, y_test, attr_function(X_test),\n model_generator, score_map, trained_model\n )\n return None, __score_method(X, y, None, model_generator, score_function, method_name)", - "docstring": "Local Accuracy\n transform = \"identity\"\n sort_order = 2" - }, - { - "code": "def _load(self, value: Any):\n if value in self.empty_values:\n if self.default is not None:\n default = self.default\n value = default() if callable(default) else default\n return value\n elif self.required:\n self.fail('required')\n else:\n return None\n if self.choices:\n value_list = value\n if not isinstance(value, (list, tuple)):\n value_list = [value]\n for v in value_list:\n if v not in self.choice_dict:\n self.fail(\n 'invalid_choice', value=v,\n choices=list(self.choice_dict))\n value = self._cast_to_type(value)\n self._run_validators(value)\n return value", - "docstring": "Load the value for the field, run validators and return the value.\n Subclasses can override this to provide custom load logic.\n\n :param value: value of the field" - }, - { - "code": "def validate_custom_interpreters_list(self):\r\n custom_list = self.get_option('custom_interpreters_list')\r\n valid_custom_list = []\r\n for value in custom_list:\r\n if (osp.isfile(value) and programs.is_python_interpreter(value)\r\n and value != get_python_executable()):\r\n valid_custom_list.append(value)\r\n self.set_option('custom_interpreters_list', valid_custom_list)", - "docstring": "Check that the used custom interpreters are still valid." - }, - { - "code": "def _reset_errors(self, msg=None):\n if msg is not None and msg in self._errors:\n del self._errors[msg]\n else:\n self._errors = {}", - "docstring": "Resets the logging throttle cache, so the next error is emitted\n regardless of the value in `self.server_error_interval`\n\n :param msg: if present, only this key is reset. Otherwise, the whole\n cache is cleaned." - }, - { - "code": "def file_ns_handler(importer, path_item, packageName, module):\n subpath = os.path.join(path_item, packageName.split('.')[-1])\n normalized = _normalize_cached(subpath)\n for item in module.__path__:\n if _normalize_cached(item) == normalized:\n break\n else:\n return subpath", - "docstring": "Compute an ns-package subpath for a filesystem or zipfile importer" - }, - { - "code": "def generate(env):\n try:\n bld = env['BUILDERS']['Rpm']\n except KeyError:\n bld = RpmBuilder\n env['BUILDERS']['Rpm'] = bld\n env.SetDefault(RPM = 'LC_ALL=C rpmbuild')\n env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))\n env.SetDefault(RPMCOM = rpmAction)\n env.SetDefault(RPMSUFFIX = '.rpm')", - "docstring": "Add Builders and construction variables for rpm to an Environment." - }, - { - "code": "def _node_has_variant(node: BaseEntity, variant: str) -> bool:\n return VARIANTS in node and any(\n variant_dict[KIND] == variant\n for variant_dict in node[VARIANTS]\n )", - "docstring": "Return true if the node has at least one of the given variant.\n\n :param variant: :data:`PMOD`, :data:`HGVS`, :data:`GMOD`, or :data:`FRAGMENT`" - }, - { - "code": "def version_from_branch(branch):\n try:\n return tuple(\n map(\n int,\n re.match(r\"^.*(?P\\d+(\\.\\d+)+).*$\", branch)\n .groupdict()[\"version\"]\n .split(\".\"),\n )\n )\n except AttributeError as attr_err:\n raise ValueError(\n f\"Branch {branch} seems to not have a version in its name.\"\n ) from attr_err", - "docstring": "return version information from a git branch name" - }, - { - "code": "def _get_filename(request, item):\n if request.keep_image_names:\n filename = OgcImageService.finalize_filename(item['niceName'].replace(' ', '_'))\n else:\n filename = OgcImageService.finalize_filename(\n '_'.join([str(GeopediaService._parse_layer(request.layer)), item['objectPath'].rsplit('/', 1)[-1]]),\n request.image_format\n )\n LOGGER.debug(\"filename=%s\", filename)\n return filename", - "docstring": "Creates a filename" - }, - { - "code": "def insert_injfilterrejector_option_group(parser):\n injfilterrejector_group = \\\n parser.add_argument_group(_injfilterrejector_group_help)\n curr_arg = \"--injection-filter-rejector-chirp-time-window\"\n injfilterrejector_group.add_argument(curr_arg, type=float, default=None,\n help=_injfilterer_cthresh_help)\n curr_arg = \"--injection-filter-rejector-match-threshold\"\n injfilterrejector_group.add_argument(curr_arg, type=float, default=None,\n help=_injfilterer_mthresh_help)\n curr_arg = \"--injection-filter-rejector-coarsematch-deltaf\"\n injfilterrejector_group.add_argument(curr_arg, type=float, default=1.,\n help=_injfilterer_deltaf_help)\n curr_arg = \"--injection-filter-rejector-coarsematch-fmax\"\n injfilterrejector_group.add_argument(curr_arg, type=float, default=256.,\n help=_injfilterer_fmax_help)\n curr_arg = \"--injection-filter-rejector-seg-buffer\"\n injfilterrejector_group.add_argument(curr_arg, type=int, default=10,\n help=_injfilterer_buffer_help)\n curr_arg = \"--injection-filter-rejector-f-lower\"\n injfilterrejector_group.add_argument(curr_arg, type=int, default=None,\n help=_injfilterer_flower_help)", - "docstring": "Add options for injfilterrejector to executable." - }, - { - "code": "def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2):\n return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)", - "docstring": "Empirical Guinier-Porod-Guinier scattering\n\n Inputs:\n -------\n ``q``: independent variable\n ``G``: factor for the first Guinier-branch\n ``Rg1``: the first radius of gyration\n ``alpha``: the power-law exponent\n ``Rg2``: the second radius of gyration\n\n Formula:\n --------\n ``G*exp(-q^2*Rg1^2/3)`` if ``q` of the sent message\n :raises: FBchatException if request failed" - }, - { - "code": "def to_parquet(df, path, engine='auto', compression='snappy', index=None,\n partition_cols=None, **kwargs):\n impl = get_engine(engine)\n return impl.write(df, path, compression=compression, index=index,\n partition_cols=partition_cols, **kwargs)", - "docstring": "Write a DataFrame to the parquet format.\n\n Parameters\n ----------\n path : str\n File path or Root Directory path. Will be used as Root Directory path\n while writing a partitioned dataset.\n\n .. versionchanged:: 0.24.0\n\n engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'\n Name of the compression to use. Use ``None`` for no compression.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output. If\n ``False``, they will not be written to the file. If ``None``, the\n engine's default behavior will be used.\n\n .. versionadded 0.24.0\n\n partition_cols : list, optional, default None\n Column names by which to partition the dataset\n Columns are partitioned in the order they are given\n\n .. versionadded:: 0.24.0\n\n kwargs\n Additional keyword arguments passed to the engine" - }, - { - "code": "def _check_classifier(classifier):\r\n predict = getattr(classifier, \"predict\", None)\r\n if not callable(predict):\r\n raise ValueError('Classifier does not have predict method!')\r\n predict_proba = getattr(classifier, \"predict_proba\", None)\r\n if not callable(predict_proba):\r\n raise ValueError('Classifier does not have predict_proba method!')", - "docstring": "Check if the classifier implements predict and predict_proba methods." - }, - { - "code": "def load_plugin_factories(self):\n for plugin in self.get_plugins(group='enaml_native_android_factories'):\n get_factories = plugin.load()\n PLUGIN_FACTORIES = get_factories()\n factories.ANDROID_FACTORIES.update(PLUGIN_FACTORIES)", - "docstring": "Add any plugin toolkit widgets to the ANDROID_FACTORIES" - }, - { - "code": "def ptmsiReallocationCommand(PTmsiSignature_presence=0):\n a = TpPd(pd=0x3)\n b = MessageType(mesType=0x10)\n c = MobileId()\n d = RoutingAreaIdentification()\n e = ForceToStandbyAndSpareHalfOctets()\n packet = a / b / c / d / e\n if PTmsiSignature_presence is 1:\n g = PTmsiSignature(ieiPTS=0x19)\n packet = packet / g\n return packet", - "docstring": "P-TMSI REALLOCATION COMMAND Section 9.4.7" - }, - { - "code": "def create_seq(self, ):\n name = self.name_le.text()\n desc = self.desc_pte.toPlainText()\n try:\n seq = djadapter.models.Sequence(name=name, project=self._project, description=desc)\n seq.save()\n self.sequence = seq\n self.accept()\n except:\n log.exception(\"Could not create new sequence\")", - "docstring": "Create a sequence and store it in the self.sequence\n\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def restart(name, timeout=90, with_deps=False, with_parents=False):\n if 'salt-minion' in name:\n create_win_salt_restart_task()\n return execute_salt_restart_task()\n ret = set()\n ret.add(stop(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))\n ret.add(start(name=name, timeout=timeout, with_deps=with_deps, with_parents=with_parents))\n return False not in ret", - "docstring": "Restart the named service. This issues a stop command followed by a start.\n\n Args:\n name: The name of the service to restart.\n\n .. note::\n If the name passed is ``salt-minion`` a scheduled task is\n created and executed to restart the salt-minion service.\n\n timeout (int):\n The time in seconds to wait for the service to stop and start before\n returning. Default is 90 seconds\n\n .. note::\n The timeout is cumulative meaning it is applied to the stop and\n then to the start command. A timeout of 90 could take up to 180\n seconds if the service is long in stopping and starting\n\n .. versionadded:: 2017.7.9,2018.3.4\n\n with_deps (bool):\n If enabled restart the given service and the services\n the current service depends on.\n\n with_parents (bool):\n If enabled and in case other running services depend on the to be\n restarted service, this flag indicates that those other services\n will be restarted as well.\n If disabled, the service restart will fail in case other running\n services depend on the to be restarted service.\n\n Returns:\n bool: ``True`` if successful, otherwise ``False``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.restart " - }, - { - "code": "def read(self, n):\n while len(self.buf) < n:\n chunk = self.f.recv(4096)\n if not chunk:\n raise EndOfStreamError()\n self.buf += chunk\n res, self.buf = self.buf[:n], self.buf[n:]\n return res", - "docstring": "Consume `n` characters from the stream." - }, - { - "code": "def _field_controller_generator(self):\n stored_instance = self._ipopo_instance\n def get_value(self, name):\n return stored_instance.get_controller_state(name)\n def set_value(self, name, new_value):\n old_value = stored_instance.get_controller_state(name)\n if new_value != old_value:\n stored_instance.set_controller_state(name, new_value)\n return new_value\n return get_value, set_value", - "docstring": "Generates the methods called by the injected controller" - }, - { - "code": "def ingest_containers(self, containers=None):\n containers = containers or self.stream or {}\n output_containers = []\n for container_name, definition in containers.items():\n container = definition.copy()\n container['name'] = container_name\n output_containers.append(container)\n return output_containers", - "docstring": "Transform the YAML into a dict with normalized keys" - }, - { - "code": "def diff_bisectSplit(self, text1, text2, x, y, deadline):\n text1a = text1[:x]\n text2a = text2[:y]\n text1b = text1[x:]\n text2b = text2[y:]\n diffs = self.diff_main(text1a, text2a, False, deadline)\n diffsb = self.diff_main(text1b, text2b, False, deadline)\n return diffs + diffsb", - "docstring": "Given the location of the 'middle snake', split the diff in two parts\n and recurse.\n\n Args:\n text1: Old string to be diffed.\n text2: New string to be diffed.\n x: Index of split point in text1.\n y: Index of split point in text2.\n deadline: Time at which to bail if not yet complete.\n\n Returns:\n Array of diff tuples." - }, - { - "code": "def arch(self):\n if self.method in ('buildArch', 'createdistrepo', 'livecd'):\n return self.params[2]\n if self.method in ('createrepo', 'runroot'):\n return self.params[1]\n if self.method == 'createImage':\n return self.params[3]\n if self.method == 'indirectionimage':\n return self.params[0]['arch']", - "docstring": "Return an architecture for this task.\n\n :returns: an arch string (eg \"noarch\", or \"ppc64le\"), or None this task\n has no architecture associated with it." - }, - { - "code": "def _metadata_from_video(self, video):\n long_desc = video['long_description']\n if long_desc is not None:\n long_desc = long_desc[:MAX_METADATA_STRING_LEN]\n tags = video.get('tags')\n metadata = {\n 'name': default_to_empty_string(video.get('name')),\n 'description': default_to_empty_string(video.get('description')),\n 'long_description': default_to_empty_string(long_desc),\n 'tags': tags if tags is not None else [],\n 'updated_at': video.get('updated_at'),\n 'created_at': video.get('created_at'),\n 'state': video.get('state')\n }\n return metadata", - "docstring": "Generate the searchable metadata that we'll store in the bundle for the video" - }, - { - "code": "def map_predict(interface, state, label, inp):\n import numpy as np\n out = interface.output(0)\n continuous = [j for i, j in enumerate(state[\"X_indices\"]) if\n state[\"X_meta\"][i] == \"c\"]\n discrete = [j for i, j in enumerate(state[\"X_indices\"]) if\n state[\"X_meta\"][i] == \"d\"]\n cont = True if len(continuous) > 0 else False\n disc = True if len(discrete) > 0 else False\n for row in inp:\n row = row.strip().split(state[\"delimiter\"])\n if len(row) > 1:\n x_id = \"\" if state[\"id_index\"] == -1 else row[state[\"id_index\"]]\n probs = state[\"fit_model\"][\"prior_log\"]\n if cont:\n x = np.array([(0 if row[j] in state[\"missing_vals\"] else float(row[j])) for j in\n continuous])\n probs = probs - 0.5 * np.sum(\n np.true_divide((x - state[\"fit_model\"][\"mean\"]) ** 2, state[\"fit_model\"][\"var\"]) +\n state[\"fit_model\"][\"var_log\"], axis=1)\n if disc:\n probs = probs + np.sum(\n [(0 if row[i] in state[\"missing_vals\"] else state[\"fit_model\"].get((str(i), row[i]), np.zeros(1)))\n for i in discrete], axis=0)\n log_prob_x = np.log(np.sum(np.exp(probs)))\n probs = np.exp(np.array(probs) - log_prob_x)\n y_predicted = max(zip(probs, state[\"fit_model\"][\"y_labels\"]))[1]\n out.add(x_id, (y_predicted, probs.tolist()))", - "docstring": "Function makes a predictions of samples with given model. It calculates probabilities with multinomial and Gaussian distribution." - }, - { - "code": "def get_sshconfig():\n r\n with open(os.path.expanduser('~/.ssh/config')) as f:\n cfg = paramiko.SSHConfig()\n cfg.parse(f)\n ret_dict = {}\n for d in cfg._config:\n _copy = dict(d)\n del _copy['host']\n for host in d['host']:\n ret_dict[host] = _copy['config']\n return ret_dict", - "docstring": "r'''\n Read user's SSH configuration file" - }, - { - "code": "def execute(self):\n def is_cc(source):\n _, ext = os.path.splitext(source)\n return ext in self.get_options().cc_extensions\n targets = self.context.targets(self.is_cpp)\n with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:\n obj_mapping = self.context.products.get('objs')\n for vt in invalidation_check.all_vts:\n for source in vt.target.sources_relative_to_buildroot():\n if is_cc(source):\n if not vt.valid:\n with self.context.new_workunit(name='cpp-compile', labels=[WorkUnitLabel.MULTITOOL]):\n self._compile(vt.target, vt.results_dir, source)\n objpath = self._objpath(vt.target, vt.results_dir, source)\n obj_mapping.add(vt.target, vt.results_dir).append(objpath)", - "docstring": "Compile all sources in a given target to object files." - }, - { - "code": "def hypermedia_out():\n request = cherrypy.serving.request\n request._hypermedia_inner_handler = request.handler\n if request.handler is not None:\n request.handler = hypermedia_handler", - "docstring": "Determine the best handler for the requested content type\n\n Wrap the normal handler and transform the output from that handler into the\n requested content type" - }, - { - "code": "def build_report(self):\n thresholds = self.thresholds\n lower_quantile = self.config['lower_quantile']\n upper_quantile = self.config['upper_quantile']\n if self.n_current_results > self.n_cached_curves:\n colnames = ['_'.join([metric, stat])\n for metric in [self.metric1.name, self.metric2.name]\n for stat in ['Mean', 'Median',\n '%d_Percentile' % (100*lower_quantile),\n '%d_Percentile' % (upper_quantile*100)]]\n self.ret = pd.DataFrame(columns=colnames, index=thresholds, dtype='float64')\n for threshold in thresholds:\n m1s = Series([self.metric1.score(result, threshold) for result in self.results])\n m2s = Series([self.metric2.score(result, threshold) for result in self.results])\n self.ret.loc[threshold] = (m1s.mean(), m1s.quantile(.5), m1s.quantile(.05), m1s.quantile(.95),\n m2s.mean(), m2s.quantile(.5), m2s.quantile(.05), m2s.quantile(.95))\n self.build_curves()\n self.summary_df = self.ret\n return self.ret", - "docstring": "Calculates the pair of metrics for each threshold for each result." - }, - { - "code": "def reproject(rasterobject, reference, outname, targetres=None, resampling='bilinear', format='GTiff'):\n if isinstance(rasterobject, str):\n rasterobject = Raster(rasterobject)\n if not isinstance(rasterobject, Raster):\n raise RuntimeError('rasterobject must be of type Raster or str')\n if isinstance(reference, (Raster, Vector)):\n projection = reference.projection\n if targetres is not None:\n xres, yres = targetres\n elif hasattr(reference, 'res'):\n xres, yres = reference.res\n else:\n raise RuntimeError('parameter targetres is missing and cannot be read from the reference')\n elif isinstance(reference, (int, str, osr.SpatialReference)):\n try:\n projection = crsConvert(reference, 'proj4')\n except TypeError:\n raise RuntimeError('reference projection cannot be read')\n if targetres is None:\n raise RuntimeError('parameter targetres is missing and cannot be read from the reference')\n else:\n xres, yres = targetres\n else:\n raise TypeError('reference must be of type Raster, Vector, osr.SpatialReference, str or int')\n options = {'format': format,\n 'resampleAlg': resampling,\n 'xRes': xres,\n 'yRes': yres,\n 'srcNodata': rasterobject.nodata,\n 'dstNodata': rasterobject.nodata,\n 'dstSRS': projection}\n gdalwarp(rasterobject, outname, options)", - "docstring": "reproject a raster file\n\n Parameters\n ----------\n rasterobject: Raster or str\n the raster image to be reprojected\n reference: Raster, Vector, str, int or osr.SpatialReference\n either a projection string or a spatial object with an attribute 'projection'\n outname: str\n the name of the output file\n targetres: tuple\n the output resolution in the target SRS; a two-entry tuple is required: (xres, yres)\n resampling: str\n the resampling algorithm to be used\n format: str\n the output file format\n\n Returns\n -------" - }, - { - "code": "def register_computer_view(request):\n if request.method == \"POST\":\n form = ComputerRegistrationForm(request.POST)\n logger.debug(form)\n if form.is_valid():\n obj = form.save()\n obj.user = request.user\n obj.save()\n messages.success(request, \"Successfully added computer.\")\n return redirect(\"itemreg\")\n else:\n messages.error(request, \"Error adding computer.\")\n else:\n form = ComputerRegistrationForm()\n return render(request, \"itemreg/register_form.html\", {\"form\": form, \"action\": \"add\", \"type\": \"computer\", \"form_route\": \"itemreg_computer\"})", - "docstring": "Register a computer." - }, - { - "code": "def register_views(*args):\n config = args[0]\n settings = config.get_settings()\n pages_config = settings[CONFIG_MODELS]\n resources = resources_of_config(pages_config)\n for resource in resources:\n if hasattr(resource, '__table__')\\\n and not hasattr(resource, 'model'):\n continue\n resource.model.pyramid_pages_template = resource.template\n config.add_view(resource.view,\n attr=resource.attr,\n route_name=PREFIX_PAGE,\n renderer=resource.template,\n context=resource,\n permission=PREFIX_PAGE)", - "docstring": "Registration view for each resource from config." - }, - { - "code": "def _xor_block(a, b):\n return ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])", - "docstring": "XOR two blocks of equal length." - }, - { - "code": "def userinfo_in_id_token_claims(endpoint_context, session, def_itc=None):\n if def_itc:\n itc = def_itc\n else:\n itc = {}\n itc.update(id_token_claims(session))\n if not itc:\n return None\n _claims = by_schema(endpoint_context.id_token_schema, **itc)\n if _claims:\n return collect_user_info(endpoint_context, session, _claims)\n else:\n return None", - "docstring": "Collect user info claims that are to be placed in the id token.\n\n :param endpoint_context: Endpoint context\n :param session: Session information\n :param def_itc: Default ID Token claims\n :return: User information or None" - }, - { - "code": "def apply_transform(self, matrix):\n matrix = np.asanyarray(matrix, dtype=np.float64)\n if matrix.shape != (4, 4):\n raise ValueError('shape must be 4,4')\n center = np.dot(matrix,\n np.append(self.primitive.center, 1.0))[:3]\n self.primitive.center = center", - "docstring": "Apply a transform to the sphere primitive\n\n Parameters\n ------------\n matrix: (4,4) float, homogenous transformation" - }, - { - "code": "def prepare(self):\n SCons.Node.Node.prepare(self)\n if self.get_state() != SCons.Node.up_to_date:\n if self.exists():\n if self.is_derived() and not self.precious:\n self._rmv_existing()\n else:\n try:\n self._createDir()\n except SCons.Errors.StopError as drive:\n raise SCons.Errors.StopError(\"No drive `{}' for target `{}'.\".format(drive, self))", - "docstring": "Prepare for this file to be created." - }, - { - "code": "def _get_factor(self, belief_prop, evidence):\n final_factor = factor_product(*belief_prop.junction_tree.get_factors())\n if evidence:\n for var in evidence:\n if var in final_factor.scope():\n final_factor.reduce([(var, evidence[var])])\n return final_factor", - "docstring": "Extracts the required factor from the junction tree.\n\n Parameters:\n ----------\n belief_prop: Belief Propagation\n Belief Propagation which needs to be updated.\n\n evidence: dict\n a dict key, value pair as {var: state_of_var_observed}" - }, - { - "code": "def wait_for(self, timeout=3000):\n results = [None]\n results_called = [False]\n def results_callback(val):\n results[0] = val\n results_called[0] = True\n self.then(results_callback)\n start = time.time()\n while not results_called[0]:\n if time.time() - start > timeout / 1000.:\n raise Exception('Timeout of %d ms reached' % timeout)\n ip.kernel.do_one_iteration()\n return results[0]", - "docstring": "Hault execution until self resolves." - }, - { - "code": "def select_by_key(self, key):\n self._selected_key = None\n self._selected_item = None\n for item in self.children.values():\n item.attributes['selected'] = False\n if key in self.children:\n self.children[key].attributes['selected'] = True\n self._selected_key = key\n self._selected_item = self.children[key]", - "docstring": "Selects an item by its key.\n\n Args:\n key (str): The unique string identifier of the item that have to be selected." - }, - { - "code": "def setup_errors(app, error_template=\"error.html\"):\n def error_handler(error):\n if isinstance(error, HTTPException):\n description = error.get_description(request.environ)\n code = error.code\n name = error.name\n else:\n description = error\n code = 500\n name = \"Internal Server Error\"\n return render_template(error_template,\n error=error,\n code=code,\n name=Markup(name),\n description=Markup(description)), code\n for exception in default_exceptions:\n app.register_error_handler(exception, error_handler)", - "docstring": "Add a handler for each of the available HTTP error responses." - }, - { - "code": "def _determine_os_workload_status(\n configs, required_interfaces, charm_func=None,\n services=None, ports=None):\n state, message = _ows_check_if_paused(services, ports)\n if state is None:\n state, message = _ows_check_generic_interfaces(\n configs, required_interfaces)\n if state != 'maintenance' and charm_func:\n state, message = _ows_check_charm_func(\n state, message, lambda: charm_func(configs))\n if state is None:\n state, message = _ows_check_services_running(services, ports)\n if state is None:\n state = 'active'\n message = \"Unit is ready\"\n juju_log(message, 'INFO')\n return state, message", - "docstring": "Determine the state of the workload status for the charm.\n\n This function returns the new workload status for the charm based\n on the state of the interfaces, the paused state and whether the\n services are actually running and any specified ports are open.\n\n This checks:\n\n 1. if the unit should be paused, that it is actually paused. If so the\n state is 'maintenance' + message, else 'broken'.\n 2. that the interfaces/relations are complete. If they are not then\n it sets the state to either 'broken' or 'waiting' and an appropriate\n message.\n 3. If all the relation data is set, then it checks that the actual\n services really are running. If not it sets the state to 'broken'.\n\n If everything is okay then the state returns 'active'.\n\n @param configs: a templating.OSConfigRenderer() object\n @param required_interfaces: {generic: [specific, specific2, ...]}\n @param charm_func: a callable function that returns state, message. The\n signature is charm_func(configs) -> (state, message)\n @param services: list of strings OR dictionary specifying services/ports\n @param ports: OPTIONAL list of port numbers.\n @returns state, message: the new workload status, user message" - }, - { - "code": "def start_task(self, task_type_str, current_task_index=None):\n assert (\n task_type_str in self._task_dict\n ), \"Task type has not been started yet: {}\".format(task_type_str)\n if current_task_index is not None:\n self._task_dict[task_type_str][\"task_idx\"] = current_task_index\n else:\n self._task_dict[task_type_str][\"task_idx\"] += 1\n self._log_progress_if_interval_elapsed()", - "docstring": "Call when processing is about to start on a single task of the given task\n type, typically at the top inside of the loop that processes the tasks.\n\n Args:\n task_type_str (str):\n The name of the task, used as a dict key and printed in the progress\n updates.\n\n current_task_index (int):\n If the task processing loop may skip or repeat tasks, the index of the\n current task must be provided here. This parameter can normally be left\n unset." - }, - { - "code": "def _get_msge_with_gradient(data, delta, xvschema, skipstep, p):\n t, m, l = data.shape\n n = (l - p) * t\n underdetermined = n < m * p\n if underdetermined:\n return _msge_with_gradient_underdetermined(data, delta, xvschema,\n skipstep, p)\n else:\n return _msge_with_gradient_overdetermined(data, delta, xvschema,\n skipstep, p)", - "docstring": "Calculate mean squared generalization error and its gradient,\n automatically selecting the best function." - }, - { - "code": "def playlist_song_add(\n\t\tself,\n\t\tsong,\n\t\tplaylist,\n\t\t*,\n\t\tafter=None,\n\t\tbefore=None,\n\t\tindex=None,\n\t\tposition=None\n\t):\n\t\tprev, next_ = get_ple_prev_next(\n\t\t\tself.playlist_songs(playlist),\n\t\t\tafter=after,\n\t\t\tbefore=before,\n\t\t\tindex=index,\n\t\t\tposition=position\n\t\t)\n\t\tif 'storeId' in song:\n\t\t\tsong_id = song['storeId']\n\t\telif 'trackId' in song:\n\t\t\tsong_id = song['trackId']\n\t\telse:\n\t\t\tsong_id = song['id']\n\t\tmutation = mc_calls.PlaylistEntriesBatch.create(\n\t\t\tsong_id, playlist['id'],\n\t\t\tpreceding_entry_id=prev.get('id'),\n\t\t\tfollowing_entry_id=next_.get('id')\n\t\t)\n\t\tself._call(mc_calls.PlaylistEntriesBatch, mutation)\n\t\treturn self.playlist(playlist['id'], include_songs=True)", - "docstring": "Add a song to a playlist.\n\n\t\tNote:\n\t\t\t* Provide no optional arguments to add to end.\n\t\t\t* Provide playlist song dicts for ``after`` and/or ``before``.\n\t\t\t* Provide a zero-based ``index``.\n\t\t\t* Provide a one-based ``position``.\n\n\t\t\tSongs are inserted *at* given index or position.\n\t\t\tIt's also possible to add to the end by using\n\t\t\t``len(songs)`` for index or ``len(songs) + 1`` for position.\n\n\t\tParameters:\n\t\t\tsong (dict): A song dict.\n\t\t\tplaylist (dict): A playlist dict.\n\t\t\tafter (dict, Optional): A playlist song dict ``songs`` will follow.\n\t\t\tbefore (dict, Optional): A playlist song dict ``songs`` will precede.\n\t\t\tindex (int, Optional): The zero-based index position to insert ``song``.\n\t\t\tposition (int, Optional): The one-based position to insert ``song``.\n\n\t\tReturns:\n\t\t\tdict: Playlist dict including songs." - }, - { - "code": "def image_summary(predictions, targets, hparams):\n del hparams\n results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)\n gold = tf.cast(targets, tf.uint8)\n summary1 = tf.summary.image(\"prediction\", results, max_outputs=2)\n summary2 = tf.summary.image(\"data\", gold, max_outputs=2)\n summary = tf.summary.merge([summary1, summary2])\n return summary, tf.zeros_like(predictions)", - "docstring": "Reshapes predictions and passes it to tensorboard.\n\n Args:\n predictions : The predicted image (logits).\n targets : The ground truth.\n hparams: model hparams.\n\n Returns:\n summary_proto: containing the summary images.\n weights: A Tensor of zeros of the same shape as predictions." - }, - { - "code": "def install_handle_input(self):\n self.pointer = self.get_fptr()\n self.hooked = ctypes.windll.user32.SetWindowsHookExA(\n 13,\n self.pointer,\n ctypes.windll.kernel32.GetModuleHandleW(None),\n 0\n )\n if not self.hooked:\n return False\n return True", - "docstring": "Install the hook." - }, - { - "code": "def tabify_plugins(self, first, second):\r\n self.tabifyDockWidget(first.dockwidget, second.dockwidget)", - "docstring": "Tabify plugin dockwigdets" - }, - { - "code": "def clear_all(self):\n for urlpath in self._metadata.keys():\n self.clear_cache(urlpath)\n if not os.path.isdir(self._cache_dir):\n return\n for subdir in os.listdir(self._cache_dir):\n try:\n fn = posixpath.join(self._cache_dir, subdir)\n if os.path.isdir(fn):\n shutil.rmtree(fn)\n if os.path.isfile(fn):\n os.remove(fn)\n except (OSError, IOError) as e:\n logger.warning(str(e))", - "docstring": "Clears all cache and metadata." - }, - { - "code": "def curvature(self, curv_type='mean'):\n curv_type = curv_type.lower()\n curvefilter = vtk.vtkCurvatures()\n curvefilter.SetInputData(self)\n if curv_type == 'mean':\n curvefilter.SetCurvatureTypeToMean()\n elif curv_type == 'gaussian':\n curvefilter.SetCurvatureTypeToGaussian()\n elif curv_type == 'maximum':\n curvefilter.SetCurvatureTypeToMaximum()\n elif curv_type == 'minimum':\n curvefilter.SetCurvatureTypeToMinimum()\n else:\n raise Exception('Curv_Type must be either \"Mean\", ' +\n '\"Gaussian\", \"Maximum\", or \"Minimum\"')\n curvefilter.Update()\n curv = _get_output(curvefilter)\n return vtk_to_numpy(curv.GetPointData().GetScalars())", - "docstring": "Returns the pointwise curvature of a mesh\n\n Parameters\n ----------\n mesh : vtk.polydata\n vtk polydata mesh\n\n curvature string, optional\n One of the following strings\n Mean\n Gaussian\n Maximum\n Minimum\n\n Returns\n -------\n curvature : np.ndarray\n Curvature values" - }, - { - "code": "def get_v_total_stress_at_depth(self, z):\n if not hasattr(z, \"__len__\"):\n return self.one_vertical_total_stress(z)\n else:\n sigma_v_effs = []\n for value in z:\n sigma_v_effs.append(self.one_vertical_total_stress(value))\n return np.array(sigma_v_effs)", - "docstring": "Determine the vertical total stress at depth z, where z can be a number or an array of numbers." - }, - { - "code": "def preconstrain(self, value, variable):\n if not isinstance(value, claripy.ast.Base):\n value = self.state.solver.BVV(value, len(variable))\n elif value.op != 'BVV':\n raise ValueError(\"Passed a value to preconstrain that was not a BVV or a string\")\n if variable.op not in claripy.operations.leaf_operations:\n l.warning(\"The variable %s to preconstrain is not a leaf AST. This may cause replacement failures in the \"\n \"claripy replacement backend.\", variable)\n l.warning(\"Please use a leaf AST as the preconstraining variable instead.\")\n constraint = variable == value\n l.debug(\"Preconstraint: %s\", constraint)\n self.variable_map[next(iter(variable.variables))] = constraint\n self.preconstraints.append(constraint)\n if o.REPLACEMENT_SOLVER in self.state.options:\n self.state.solver._solver.add_replacement(variable, value, invalidate_cache=False)\n else:\n self.state.add_constraints(*self.preconstraints)\n if not self.state.satisfiable():\n l.warning(\"State went unsat while adding preconstraints\")", - "docstring": "Add a preconstraint that ``variable == value`` to the state.\n\n :param value: The concrete value. Can be a bitvector or a bytestring or an integer.\n :param variable: The BVS to preconstrain." - }, - { - "code": "def _trim_dictionary_parameters(self, dict_param):\n keys = re.findall('(?:[^%]|^)?%\\((\\w*)\\)[a-z]', self.msgid)\n if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):\n params = self._copy_param(dict_param)\n else:\n params = {}\n src = {}\n if isinstance(self.params, dict):\n src.update(self.params)\n src.update(dict_param)\n for key in keys:\n params[key] = self._copy_param(src[key])\n return params", - "docstring": "Return a dict that only has matching entries in the msgid." - }, - { - "code": "def add_repositories():\n if not env.overwrite and env.LINUX_PACKAGE_REPOSITORIES == server_state('linux_package_repositories'): return\n if env.verbosity:\n print env.host, \"UNCOMMENTING SOURCES in /etc/apt/sources.list and adding PPAs\"\n if contains(filename='/etc/apt/sources.list',text='\n _backup_file('/etc/apt/sources.list')\n uncomment('/etc/apt/sources.list','\n install_package('python-software-properties')\n for p in env.LINUX_PACKAGE_REPOSITORIES:\n sudo('add-apt-repository %s'% p)\n if env.verbosity:\n print 'added source', p\n set_server_state('linux_package_repositories',env.LINUX_PACKAGE_REPOSITORIES)", - "docstring": "Adds additional sources as defined in LINUX_PACKAGE_REPOSITORIES." - }, - { - "code": "def addNamespace(self, namespace, **context):\n self.connection().addNamespace(namespace, orb.Context(**context))", - "docstring": "Creates a new namespace within this database.\n\n :param namespace: " - }, - { - "code": "def readiter(d):\n da = ms.getdata([d['datacol'],'axis_info','u','v','w','flag','data_desc_id'], ifraxis=True)\n good = n.where((da['data_desc_id']) == d['spwlist'][0])[0]\n time0 = da['axis_info']['time_axis']['MJDseconds'][good]\n data0 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good]\n flag0 = n.transpose(da['flag'], axes=[3,2,1,0])[good]\n u0 = da['u'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)\n v0 = da['v'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)\n w0 = da['w'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)\n if len(d['spwlist']) > 1:\n for spw in d['spwlist'][1:]:\n good = n.where((da['data_desc_id']) == spw)[0]\n data1 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good]\n data0 = n.concatenate( (data0, data1), axis=2 )\n flag0 = n.concatenate( (flag0, n.transpose(da['flag'], axes=[3,2,1,0])[good]), axis=2 )\n del da\n data0 = data0[:,:,d['chans'],:] * n.invert(flag0[:,:,d['chans'],:])\n iterstatus = ms.iternext() \n return data0.astype('complex64'), u0.astype('float32'), v0.astype('float32'), w0.astype('float32'), time0.astype('float32')", - "docstring": "Read iteration of size iterint" - }, - { - "code": "def from_jwe(self, msg, keys):\n jwe = JWE()\n _res = jwe.decrypt(msg, keys)\n return self.from_json(_res.decode())", - "docstring": "Decrypt an encrypted JWT and load the JSON object that was the body\n of the JWT into this object.\n\n :param msg: An encrypted JWT\n :param keys: Possibly usable keys.\n :type keys: list or KeyJar instance\n :return: The decrypted message. If decryption failed an exception\n will be raised." - }, - { - "code": "def _validate_header(self, hed):\n if not bool(hed):\n return False\n length = -1\n for row in hed:\n if not bool(row):\n return False\n elif length == -1:\n length = len(row)\n elif len(row) != length:\n return False\n return True", - "docstring": "Validate the list that represents the table header.\n\n :param hed: The list that represents the table header.\n :type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))\n :return: True if the table header is valid or False if the table header\n is not valid.\n :rtype: bool" - }, - { - "code": "def detectFileEncoding(self, fileName):\n\t\ttry:\n\t\t\timport chardet\n\t\texcept ImportError:\n\t\t\treturn\n\t\twith open(fileName, 'rb') as inputFile:\n\t\t\traw = inputFile.read(2048)\n\t\tresult = chardet.detect(raw)\n\t\tif result['confidence'] > 0.9:\n\t\t\tif result['encoding'].lower() == 'ascii':\n\t\t\t\treturn 'utf-8'\n\t\t\treturn result['encoding']", - "docstring": "Detect content encoding of specific file.\n\n\t\tIt will return None if it can't determine the encoding." - }, - { - "code": "def changed(self, *value):\n if self._last_checked_value != value:\n self._last_checked_value = value\n return True\n return False", - "docstring": "Checks whether the value has changed since the last call." - }, - { - "code": "def renormalize(self, modelparams):\n norm = modelparams[:, 0] * np.sqrt(self._dim)\n assert not np.sum(norm == 0)\n return modelparams / norm[:, None]", - "docstring": "Renormalizes one or more states represented as model\n parameter vectors, such that each state has trace 1.\n\n :param np.ndarray modelparams: Array of shape ``(n_states,\n dim ** 2)`` representing one or more states as \n model parameter vectors.\n :return: The same state, normalized to trace one." - }, - { - "code": "def parse_template(input_filename, output_filename=''):\n data = load_input()\n with open(input_filename, 'rb') as file:\n template = file.read().decode(\"utf-8\")\n if not 'input' in data:\n raise ValueError(\"Could not find 'input' in data\")\n for field in data['input']:\n subs = [\"filename\", \"value\"] if isinstance(data['input'][field], dict) and \"filename\" in data['input'][field] and \"value\" in data['input'][field] else [\"\"]\n for sub in subs:\n displayed_field = field + (\":\" if sub else \"\") + sub\n regex = re.compile(\"@([^@]*)@\" + displayed_field + '@([^@]*)@')\n for prefix, postfix in set(regex.findall(template)):\n if sub == \"value\":\n text = open(data['input'][field][sub], 'rb').read().decode('utf-8')\n elif sub:\n text = data['input'][field][sub]\n else:\n text = data['input'][field]\n rep = \"\\n\".join([prefix + v + postfix for v in text.splitlines()])\n template = template.replace(\"@{0}@{1}@{2}@\".format(prefix, displayed_field, postfix), rep)\n if output_filename == '':\n output_filename=input_filename\n try:\n os.makedirs(os.path.dirname(output_filename))\n except OSError as e:\n pass\n with open(output_filename, 'wb') as file:\n file.write(template.encode(\"utf-8\"))", - "docstring": "Parses a template file\n Replaces all occurences of @@problem_id@@ by the value\n of the 'problem_id' key in data dictionary\n \n input_filename: file to parse\n output_filename: if not specified, overwrite input file" - }, - { - "code": "def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs):\n args = elements, coordinates\n if com is not None:\n pass\n else:\n com = center_of_mass(elements, coordinates)\n if bounds is None:\n pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2\n bounds = (\n (com[0]-pore_r, com[0]+pore_r),\n (com[1]-pore_r, com[1]+pore_r),\n (com[2]-pore_r, com[2]+pore_r)\n )\n minimisation = minimize(\n correct_pore_diameter, x0=com, args=args, bounds=bounds)\n pored = pore_diameter(elements, coordinates, com=minimisation.x)\n return (pored[0], pored[1], minimisation.x)", - "docstring": "Return optimised pore diameter and it's COM." - }, - { - "code": "async def power(dev: Device, cmd, target, value):\n async def try_turn(cmd):\n state = True if cmd == \"on\" else False\n try:\n return await dev.set_power(state)\n except SongpalException as ex:\n if ex.code == 3:\n err(\"The device is already %s.\" % cmd)\n else:\n raise ex\n if cmd == \"on\" or cmd == \"off\":\n click.echo(await try_turn(cmd))\n elif cmd == \"settings\":\n settings = await dev.get_power_settings()\n print_settings(settings)\n elif cmd == \"set\" and target and value:\n click.echo(await dev.set_power_settings(target, value))\n else:\n power = await dev.get_power()\n click.echo(click.style(str(power), bold=power))", - "docstring": "Turn on and off, control power settings.\n\n Accepts commands 'on', 'off', and 'settings'." - }, - { - "code": "def bind(self, dependency, svc, svc_ref):\n with self._lock:\n self.__set_binding(dependency, svc, svc_ref)\n self.check_lifecycle()", - "docstring": "Called by a dependency manager to inject a new service and update the\n component life cycle." - }, - { - "code": "def save_twi(self, rootpath, raw=False, as_int=True):\n self.twi = np.ma.masked_array(self.twi, mask=self.twi <= 0,\n fill_value=-9999)\n self.twi[self.flats] = 0\n self.twi.mask[self.flats] = True\n self.save_array(self.twi, None, 'twi', rootpath, raw, as_int=as_int)", - "docstring": "Saves the topographic wetness index to a file" - }, - { - "code": "def sort(self, cmp=None, key=None, reverse=False):\n if not key and self._keys:\n key = self.KeyValue\n super(CliTable, self).sort(cmp=cmp, key=key, reverse=reverse)", - "docstring": "Overrides sort func to use the KeyValue for the key." - }, - { - "code": "def report(policies, start_date, options, output_fh, raw_output_fh=None):\n regions = set([p.options.region for p in policies])\n policy_names = set([p.name for p in policies])\n formatter = Formatter(\n policies[0].resource_manager.resource_type,\n extra_fields=options.field,\n include_default_fields=not options.no_default_fields,\n include_region=len(regions) > 1,\n include_policy=len(policy_names) > 1\n )\n records = []\n for policy in policies:\n policy.ctx.initialize()\n if policy.ctx.output.type == 's3':\n policy_records = record_set(\n policy.session_factory,\n policy.ctx.output.config['netloc'],\n policy.ctx.output.config['path'].strip('/'),\n start_date)\n else:\n policy_records = fs_record_set(policy.ctx.log_dir, policy.name)\n log.debug(\"Found %d records for region %s\", len(policy_records), policy.options.region)\n for record in policy_records:\n record['policy'] = policy.name\n record['region'] = policy.options.region\n records += policy_records\n rows = formatter.to_csv(records)\n if options.format == 'csv':\n writer = UnicodeWriter(output_fh, formatter.headers())\n writer.writerow(formatter.headers())\n writer.writerows(rows)\n elif options.format == 'json':\n print(dumps(records, indent=2))\n else:\n print(tabulate(rows, formatter.headers(), tablefmt=options.format))\n if raw_output_fh is not None:\n dumps(records, raw_output_fh, indent=2)", - "docstring": "Format a policy's extant records into a report." - }, - { - "code": "def get_shortname(self):\n disqus_shortname = self.state.document.settings.env.config.disqus_shortname\n if not disqus_shortname:\n raise ExtensionError('disqus_shortname config value must be set for the disqus extension to work.')\n if not RE_SHORTNAME.match(disqus_shortname):\n raise ExtensionError('disqus_shortname config value must be 3-50 letters, numbers, and hyphens only.')\n return disqus_shortname", - "docstring": "Validate and returns disqus_shortname config value.\n\n :returns: disqus_shortname config value.\n :rtype: str" - }, - { - "code": "def add_parser_arguments():\n argparser = parser.get()\n argparser.add_argument('-P', '--profile',\n action='store',\n default=None,\n dest='profile',\n help='Profile the consumer modules, specifying '\n 'the output directory.')\n argparser.add_argument('-o', '--only',\n action='store',\n default=None,\n dest='consumer',\n help='Only run the consumer specified')\n argparser.add_argument('-p', '--prepend-path',\n action='store',\n default=None,\n dest='prepend_path',\n help='Prepend the python path with the value.')\n argparser.add_argument('-q', '--qty',\n action='store',\n type=int,\n default=None,\n dest='quantity',\n help='Run the specified quantity of consumer '\n 'processes when used in conjunction with -o')\n argparser.add_argument('--version', action='version',\n version='%(prog)s {}'.format(__version__))", - "docstring": "Add options to the parser" - }, - { - "code": "def _run_nucmer(self, ref, qry, outfile):\n n = pymummer.nucmer.Runner(\n ref,\n qry,\n outfile,\n min_id=self.nucmer_min_id,\n min_length=self.nucmer_min_length,\n diagdiff=self.nucmer_diagdiff,\n maxmatch=True,\n breaklen=self.nucmer_breaklen,\n simplify=True,\n verbose=self.verbose\n )\n n.run()", - "docstring": "Run nucmer of new assembly vs original assembly" - }, - { - "code": "def dump_service(self, sc):\n def lprint(fmt, data, index):\n ispchr = lambda x: x >= 32 and x <= 126\n def print_bytes(octets):\n return ' '.join(['%02x' % x for x in octets])\n def print_chars(octets):\n return ''.join([chr(x) if ispchr(x) else '.' for x in octets])\n return fmt.format(index, print_bytes(data), print_chars(data))\n data_line_fmt = \"{0:04X}: {1} |{2}|\"\n same_line_fmt = \"{0:<4s} {1} |{2}|\"\n lines = list()\n last_data = None\n same_data = 0\n for i in itertools.count():\n assert i < 0x10000\n try:\n this_data = self.read_without_encryption([sc], [BlockCode(i)])\n except Type3TagCommandError:\n i = i - 1\n break\n if this_data == last_data:\n same_data += 1\n else:\n if same_data > 1:\n lines.append(lprint(same_line_fmt, last_data, \"*\"))\n lines.append(lprint(data_line_fmt, this_data, i))\n last_data = this_data\n same_data = 0\n if same_data > 1:\n lines.append(lprint(same_line_fmt, last_data, \"*\"))\n if same_data > 0:\n lines.append(lprint(data_line_fmt, this_data, i))\n return lines", - "docstring": "Read all data blocks of a given service.\n\n :meth:`dump_service` reads all data blocks from the service\n with service code *sc* and returns a list of strings suitable\n for printing. The number of strings returned does not\n necessarily reflect the number of data blocks because a range\n of data blocks with equal content is reduced to fewer lines of\n output." - }, - { - "code": "def get_distinfo_file(self, path):\n if path.find(os.sep) >= 0:\n distinfo_dirname, path = path.split(os.sep)[-2:]\n if distinfo_dirname != self.path.split(os.sep)[-1]:\n raise DistlibException(\n 'dist-info file %r does not belong to the %r %s '\n 'distribution' % (path, self.name, self.version))\n if path not in DIST_FILES:\n raise DistlibException('invalid path for a dist-info file: '\n '%r at %r' % (path, self.path))\n return os.path.join(self.path, path)", - "docstring": "Returns a path located under the ``.dist-info`` directory. Returns a\n string representing the path.\n\n :parameter path: a ``'/'``-separated path relative to the\n ``.dist-info`` directory or an absolute path;\n If *path* is an absolute path and doesn't start\n with the ``.dist-info`` directory path,\n a :class:`DistlibException` is raised\n :type path: str\n :rtype: str" - }, - { - "code": "def groups_get_integrations(self, room_id, **kwargs):\n return self.__call_api_get('groups.getIntegrations', roomId=room_id, kwargs=kwargs)", - "docstring": "Retrieves the integrations which the group has" - }, - { - "code": "def collect_tokens_until(self, token_type):\n self.next()\n if self.current_token.type == token_type:\n return\n while True:\n yield self.current_token\n self.next()\n if self.current_token.type == token_type:\n return\n if self.current_token.type != 'COMMA':\n raise self.error(f'Expected comma but got '\n f'{self.current_token.value!r}')\n self.next()", - "docstring": "Yield the item tokens in a comma-separated tag collection." - }, - { - "code": "def get_by_uid(post_id):\n return TabPost2Tag.select(\n TabPost2Tag,\n TabTag.name.alias('tag_name'),\n TabTag.uid.alias('tag_uid')\n ).join(\n TabTag, on=(TabPost2Tag.tag_id == TabTag.uid)\n ).where(\n (TabPost2Tag.post_id == post_id) & (TabTag.kind == 'z')\n )", - "docstring": "Get records by post id." - }, - { - "code": "def seek(self, pos):\n if (pos > self.file_size) or (pos < 0):\n raise Exception(\"Unable to seek - position out of file!\")\n self.file.seek(pos)", - "docstring": "Move to new input file position. If position is negative or out of file, raise Exception." - }, - { - "code": "def config_namespace(config_file=None, auto_find=False,\n verify=True, **cfg_options):\n return ConfigNamespace(**config_dict(config_file, auto_find,\n verify, **cfg_options))", - "docstring": "Return configuration options as a Namespace.\n\n .. code:: python\n\n reusables.config_namespace(os.path.join(\"test\", \"data\",\n \"test_config.ini\"))\n # \n\n\n :param config_file: path or paths to the files location\n :param auto_find: look for a config type file at this location or below\n :param verify: make sure the file exists before trying to read\n :param cfg_options: options to pass to the parser\n :return: Namespace of the config files" - }, - { - "code": "def write_roi(self, outfile=None,\n save_model_map=False, **kwargs):\n make_plots = kwargs.get('make_plots', False)\n save_weight_map = kwargs.get('save_weight_map', False)\n if outfile is None:\n pathprefix = os.path.join(self.config['fileio']['workdir'],\n 'results')\n elif not os.path.isabs(outfile):\n pathprefix = os.path.join(self.config['fileio']['workdir'],\n outfile)\n else:\n pathprefix = outfile\n pathprefix = utils.strip_suffix(pathprefix,\n ['fits', 'yaml', 'npy'])\n prefix = os.path.basename(pathprefix)\n xmlfile = pathprefix + '.xml'\n fitsfile = pathprefix + '.fits'\n npyfile = pathprefix + '.npy'\n self.write_xml(xmlfile)\n self.write_fits(fitsfile)\n if not self.config['gtlike']['use_external_srcmap']:\n for c in self.components:\n c.like.logLike.saveSourceMaps(str(c.files['srcmap']))\n if save_model_map:\n self.write_model_map(prefix)\n if save_weight_map:\n self.write_weight_map(prefix)\n o = {}\n o['roi'] = copy.deepcopy(self._roi_data)\n o['config'] = copy.deepcopy(self.config)\n o['version'] = fermipy.__version__\n o['stversion'] = fermipy.get_st_version()\n o['sources'] = {}\n for s in self.roi.sources:\n o['sources'][s.name] = copy.deepcopy(s.data)\n for i, c in enumerate(self.components):\n o['roi']['components'][i][\n 'src_expscale'] = copy.deepcopy(c.src_expscale)\n self.logger.info('Writing %s...', npyfile)\n np.save(npyfile, o)\n if make_plots:\n self.make_plots(prefix, None,\n **kwargs.get('plotting', {}))", - "docstring": "Write current state of the analysis to a file. This method\n writes an XML model definition, a ROI dictionary, and a FITS\n source catalog file. A previously saved analysis state can be\n reloaded from the ROI dictionary file with the\n `~fermipy.gtanalysis.GTAnalysis.load_roi` method.\n\n Parameters\n ----------\n\n outfile : str\n String prefix of the output files. The extension of this\n string will be stripped when generating the XML, YAML and\n npy filenames.\n\n make_plots : bool\n Generate diagnostic plots.\n\n save_model_map : bool\n Save the current counts model to a FITS file." - }, - { - "code": "def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,\n max_velocity=2.0, max_distance=50, is_3d=True):\n nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)\n d = 3 if is_3d else 2\n nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}\n for _ in repeat(None, iterations):\n for node1, node2 in combinations(nodes.values(), 2):\n _coulomb(node1, node2, force_strength, max_distance)\n for edge in edges:\n _hooke(nodes[edge['source']], nodes[edge['target']],\n force_strength * edge.get('size', 1), max_distance)\n for node in nodes.values():\n force = [_constrain(dampening * f, -max_velocity, max_velocity)\n for f in node['force']]\n node['velocity'] = [v + dv\n for v, dv in zip(node['velocity'], force)]\n node['force'] = [0] * d\n for node in nodes.values():\n del node['force']\n node['location'] = node['velocity']\n del node['velocity']\n if not is_3d:\n node['location'] += [0.0]\n return nodes", - "docstring": "Runs a force-directed-layout algorithm on the input graph.\n\n iterations - Number of FDL iterations to run in coordinate generation\n force_strength - Strength of Coulomb and Hooke forces\n (edit this to scale the distance between nodes)\n dampening - Multiplier to reduce force applied to nodes\n max_velocity - Maximum distance a node can move in one step\n max_distance - The maximum distance considered for interactions" - }, - { - "code": "def add_summary_stats_to_table(table_in, table_out, colnames):\n for col in colnames:\n col_in = table_in[col]\n stats = collect_summary_stats(col_in.data)\n for k, v in stats.items():\n out_name = \"%s_%s\" % (col, k)\n col_out = Column(data=np.vstack(\n [v]), name=out_name, dtype=col_in.dtype, shape=v.shape, unit=col_in.unit)\n table_out.add_column(col_out)", - "docstring": "Collect summary statisitics from an input table and add them to an output table\n\n Parameters\n ----------\n\n table_in : `astropy.table.Table`\n Table with the input data.\n\n table_out : `astropy.table.Table`\n Table with the output data.\n\n colnames : list\n List of the column names to get summary statistics for." - }, - { - "code": "def unmarshal(self, values, bind_client=None):\n if values is not None:\n return [super(EntityCollection, self).unmarshal(v, bind_client=bind_client) for v in values]", - "docstring": "Cast the list." - }, - { - "code": "def arg(self, i):\n if self.use_state_arguments:\n r = self.cc.arg(self.state, i)\n else:\n if i >= len(self.arguments):\n raise SimProcedureArgumentError(\"Argument %d does not exist.\" % i)\n r = self.arguments[i]\n l.debug(\"returning argument\")\n return r", - "docstring": "Returns the ith argument. Raise a SimProcedureArgumentError if we don't have such an argument available.\n\n :param int i: The index of the argument to get\n :return: The argument\n :rtype: object" - }, - { - "code": "def getServiceEndpoints(input_url, flt=None):\n result = discover(input_url)\n try:\n endpoints = applyFilter(result.normalized_uri,\n result.response_text, flt)\n except XRDSError, err:\n raise DiscoveryFailure(str(err), None)\n return (result.normalized_uri, endpoints)", - "docstring": "Perform the Yadis protocol on the input URL and return an\n iterable of resulting endpoint objects.\n\n @param flt: A filter object or something that is convertable to\n a filter object (using mkFilter) that will be used to generate\n endpoint objects. This defaults to generating BasicEndpoint\n objects.\n\n @param input_url: The URL on which to perform the Yadis protocol\n\n @return: The normalized identity URL and an iterable of endpoint\n objects generated by the filter function.\n\n @rtype: (str, [endpoint])\n\n @raises DiscoveryFailure: when Yadis fails to obtain an XRDS document." - }, - { - "code": "def getall(self):\n acl_re = re.compile(r'^ip access-list (?:(standard) )?(.+)$', re.M)\n response = {'standard': {}, 'extended': {}}\n for acl_type, name in acl_re.findall(self.config):\n acl = self.get(name)\n if acl_type and acl_type == 'standard':\n response['standard'][name] = acl\n else:\n response['extended'][name] = acl\n return response", - "docstring": "Returns all ACLs in a dict object.\n\n Returns:\n A Python dictionary object containing all ACL\n configuration indexed by ACL name::\n\n {\n \"\": {...},\n \"\": {...}\n }" - }, - { - "code": "def search_responsify(serializer, mimetype):\n def view(pid_fetcher, search_result, code=200, headers=None, links=None,\n item_links_factory=None):\n response = current_app.response_class(\n serializer.serialize_search(pid_fetcher, search_result,\n links=links,\n item_links_factory=item_links_factory),\n mimetype=mimetype)\n response.status_code = code\n if headers is not None:\n response.headers.extend(headers)\n if links is not None:\n add_link_header(response, links)\n return response\n return view", - "docstring": "Create a Records-REST search result response serializer.\n\n :param serializer: Serializer instance.\n :param mimetype: MIME type of response.\n :returns: Function that generates a record HTTP response." - }, - { - "code": "def _handle_oss_error():\n try:\n yield\n except _OssError as exception:\n if exception.status in _ERROR_CODES:\n raise _ERROR_CODES[exception.status](\n exception.details.get('Message', ''))\n raise", - "docstring": "Handle OSS exception and convert to class IO exceptions\n\n Raises:\n OSError subclasses: IO error." - }, - { - "code": "def to_json(self, indent=None, separators=None, sort_keys=False):\n def remove_callables(x):\n if isinstance(x, dict):\n return {k: remove_callables(v) for k, v in six.iteritems(x)\n if not callable(v)}\n elif isinstance(x, list):\n return [remove_callables(i) for i in x if not callable(i)]\n return x\n return json.dumps(\n remove_callables(self.values()),\n indent=indent,\n separators=separators,\n sort_keys=sort_keys)", - "docstring": "Serializes the hyperparameters into JSON.\n\n Args:\n indent: If a non-negative integer, JSON array elements and object members\n will be pretty-printed with that indent level. An indent level of 0, or\n negative, will only insert newlines. `None` (the default) selects the\n most compact representation.\n separators: Optional `(item_separator, key_separator)` tuple. Default is\n `(', ', ': ')`.\n sort_keys: If `True`, the output dictionaries will be sorted by key.\n\n Returns:\n A JSON string." - }, - { - "code": "def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False):\n import astropy.convolution\n dem = malib.checkma(dem)\n if size is not None:\n size = int(np.floor(size/2)*2 + 1)\n size = max(size, 3)\n truncate = 3.0\n if size is not None and sigma is None:\n sigma = (size - 1) / (2*truncate)\n elif size is None and sigma is not None:\n size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)\n elif size is None and sigma is None:\n sigma = 1\n size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)\n size = max(size, 3)\n kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample')\n print(\"Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)\" % \\\n (size, sigma, kernel.array.sum()))\n dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True)\n if origmask:\n print(\"Applying original mask\")\n if fill_interior:\n mask = malib.maskfill(dem)\n else:\n mask = dem.mask\n dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value)\n out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value)\n out.set_fill_value(dem.fill_value.astype(dem.dtype))\n return out.astype(dem.dtype)", - "docstring": "Astropy gaussian filter properly handles convolution with NaN\n\n http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab\n\n width1 = 3; sigma1 = (width1-1) / 6;\n Specify width for smallest feature of interest and determine sigma appropriately\n\n sigma is width of 1 std in pixels (not multiplier)\n\n scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%\n\n 3*sigma on either side of kernel - 99.7%\n\n If sigma is specified, filter width will be a multiple of 8 times sigma \n\n Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.\n\n If size is < the required width for 6-8 sigma, need to use different mode to create kernel\n\n mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3\n\n The sigma/size calculations below should work for non-integer sigma" - }, - { - "code": "def shutdown(self, message=None):\n for name, server in self.servers.items():\n server.quit(message)", - "docstring": "Disconnect all servers with a message.\n\n Args:\n message (str): Quit message to use on each connection." - }, - { - "code": "def _get_aliased_pivot_columns(self):\n defaults = [self._foreign_key, self._other_key]\n columns = []\n for column in defaults + self._pivot_columns:\n value = '%s.%s AS pivot_%s' % (self._table, column, column)\n if value not in columns:\n columns.append('%s.%s AS pivot_%s' % (self._table, column, column))\n return columns", - "docstring": "Get the pivot columns for the relation.\n\n :rtype: list" - }, - { - "code": "def _ensure_datetimelike_to_i8(other, to_utc=False):\n from pandas import Index\n from pandas.core.arrays import PeriodArray\n if lib.is_scalar(other) and isna(other):\n return iNaT\n elif isinstance(other, (PeriodArray, ABCIndexClass,\n DatetimeLikeArrayMixin)):\n if getattr(other, 'tz', None) is not None:\n if to_utc:\n other = other.tz_convert('UTC')\n else:\n other = other.tz_localize(None)\n else:\n try:\n return np.array(other, copy=False).view('i8')\n except TypeError:\n other = Index(other)\n return other.asi8", - "docstring": "Helper for coercing an input scalar or array to i8.\n\n Parameters\n ----------\n other : 1d array\n to_utc : bool, default False\n If True, convert the values to UTC before extracting the i8 values\n If False, extract the i8 values directly.\n\n Returns\n -------\n i8 1d array" - }, - { - "code": "def ChunkedAttentionSelector(x, params, selector=None, **kwargs):\n del params, kwargs\n selector = selector or (lambda x: [] if x < 1 else [x-1])\n triples, masks = zip(*x)\n (queries, keys, values) = zip(*triples)\n result = []\n for i in range(len(x)):\n selected = selector(i)\n new_key_list = [keys[j] for j in selected]\n new_key = np.concatenate(new_key_list + [keys[i]], axis=1)\n new_value = np.concatenate(\n [values[j] for j in selected] + [values[i]], axis=1)\n new_mask_shapes = [(1, queries[i].shape[1], key.shape[1])\n for key in new_key_list]\n cur_mask = masks[i]\n new_mask_list = [np.ones(s, dtype=cur_mask.dtype) for s in new_mask_shapes]\n new_mask = np.concatenate(new_mask_list + [cur_mask], axis=2)\n result.append(((queries[i], new_key, new_value), new_mask))\n return tuple(result)", - "docstring": "Select which chunks to attend to in chunked attention.\n\n Args:\n x: inputs, a list of elements of the form (q, k, v), mask for each chunk.\n params: parameters (unused).\n selector: a function from chunk_number -> list of chunk numbers that says\n which other chunks should be appended to the given one (previous if None).\n **kwargs: unused other arguments.\n\n Returns:\n a list of elements of the form (q, k', v'), mask' where k', v' and mask' are\n concatenations of k, v and identity-extended masks from selected chunks." - }, - { - "code": "def get_editor(self, file):\n for editor in self.__model.list_editors():\n if editor.file == file:\n return editor", - "docstring": "Returns the Model editor associated with given file.\n\n :param file: File to search editors for.\n :type file: unicode\n :return: Editor.\n :rtype: Editor" - }, - { - "code": "def _load_start_paths(self):\n \" Start the Read-Eval-Print Loop. \"\n if self._startup_paths:\n for path in self._startup_paths:\n if os.path.exists(path):\n with open(path, 'rb') as f:\n code = compile(f.read(), path, 'exec')\n six.exec_(code, self.get_globals(), self.get_locals())\n else:\n output = self.app.output\n output.write('WARNING | File not found: {}\\n\\n'.format(path))", - "docstring": "Start the Read-Eval-Print Loop." - }, - { - "code": "def chainable(fn):\n @functools.wraps(fn)\n def wrapper(nxt=lambda x: x):\n if hasattr(nxt, '__call__'):\n return lambda x: nxt(fn(x))\n return fn(nxt)\n return wrapper", - "docstring": "Make function a chainable validator\n\n The returned function is a chainable validator factory which takes the next\n function in the chain and returns a chained version of the original\n validator: ``fn(next(value))``.\n\n The chainable validators are used with the ``make_chain()`` function." - }, - { - "code": "def help_completion_fields(self):\n for name, field in sorted(engine.FieldDefinition.FIELDS.items()):\n if issubclass(field._matcher, matching.BoolFilter):\n yield \"%s=no\" % (name,)\n yield \"%s=yes\" % (name,)\n continue\n elif issubclass(field._matcher, matching.PatternFilter):\n yield \"%s=\" % (name,)\n yield \"%s=/\" % (name,)\n yield \"%s=?\" % (name,)\n yield \"%s=\\\"'*'\\\"\" % (name,)\n continue\n elif issubclass(field._matcher, matching.NumericFilterBase):\n for i in range(10):\n yield \"%s=%d\" % (name, i)\n else:\n yield \"%s=\" % (name,)\n yield r\"%s=+\" % (name,)\n yield r\"%s=-\" % (name,)\n yield \"custom_\"\n yield \"kind_\"", - "docstring": "Return valid field names." - }, - { - "code": "def parseRangeString(s, convertToZeroBased=False):\n result = set()\n for _range in s.split(','):\n match = _rangeRegex.match(_range)\n if match:\n start, end = match.groups()\n start = int(start)\n if end is None:\n end = start\n else:\n end = int(end)\n if start > end:\n start, end = end, start\n if convertToZeroBased:\n result.update(range(start - 1, end))\n else:\n result.update(range(start, end + 1))\n else:\n raise ValueError(\n 'Illegal range %r. Ranges must single numbers or '\n 'number-number.' % _range)\n return result", - "docstring": "Parse a range string of the form 1-5,12,100-200.\n\n @param s: A C{str} specifiying a set of numbers, given in the form of\n comma separated numeric ranges or individual indices.\n @param convertToZeroBased: If C{True} all indices will have one\n subtracted from them.\n @return: A C{set} of all C{int}s in the specified set." - }, - { - "code": "def parse_na_line(txt: str, units: Units) -> typing.Dict[str, str]:\n retwx = {}\n wxdata = txt.split(' ')\n wxdata, _, retwx['wind_shear'] = core.sanitize_report_list(wxdata)\n wxdata, retwx['type'], retwx['start_time'], retwx['end_time'] = core.get_type_and_times(wxdata)\n wxdata, retwx['wind_direction'], retwx['wind_speed'], \\\n retwx['wind_gust'], _ = core.get_wind(wxdata, units)\n wxdata, retwx['visibility'] = core.get_visibility(wxdata, units)\n wxdata, retwx['clouds'] = core.get_clouds(wxdata)\n retwx['other'], retwx['altimeter'], retwx['icing'], retwx['turbulance'] \\\n = core.get_taf_alt_ice_turb(wxdata)\n return retwx", - "docstring": "Parser for the North American TAF forcast varient" - }, - { - "code": "def setWindowTitle(self, newTitle=''):\n title = 'Rampage - ' + newTitle\n super(MainWindow, self).setWindowTitle(title)", - "docstring": "Prepend Rampage to all window titles." - }, - { - "code": "def password_link_expired(self, now=None):\n if not now: now = datetime.datetime.utcnow()\n return self.password_link_expires < now", - "docstring": "Check if password link expired" - }, - { - "code": "def scramble_string(self, length):\n return fake.text(length) if length > 5 else ''.join([fake.random_letter() for n in range(0, length)])", - "docstring": "Return random string" - }, - { - "code": "def nice_number(number, thousands_separator=',', max_ndigits_after_dot=None):\n if isinstance(number, float):\n if max_ndigits_after_dot is not None:\n number = round(number, max_ndigits_after_dot)\n int_part, frac_part = str(number).split('.')\n return '%s.%s' % (nice_number(int(int_part), thousands_separator),\n frac_part)\n else:\n chars_in = list(str(number))\n number = len(chars_in)\n chars_out = []\n for i in range(0, number):\n if i % 3 == 0 and i != 0:\n chars_out.append(thousands_separator)\n chars_out.append(chars_in[number - i - 1])\n chars_out.reverse()\n return ''.join(chars_out)", - "docstring": "Return nicely printed number NUMBER in language LN.\n\n Return nicely printed number NUMBER in language LN using\n given THOUSANDS_SEPARATOR character.\n If max_ndigits_after_dot is specified and the number is float, the\n number is rounded by taking in consideration up to max_ndigits_after_dot\n digit after the dot.\n\n This version does not pay attention to locale. See\n tmpl_nice_number_via_locale()." - }, - { - "code": "def confusion_matrix_and_correct_series(self, y_info):\n a = deepcopy(y_info['true'])\n true_count = dict((i, a.count(i)) for i in set(a))\n a = deepcopy(y_info['pred'])\n pred_count = dict((i, a.count(i)) for i in set(a))\n sorted_cats = sorted(list(set(y_info['true'] + y_info['pred'])))\n conf_mat = confusion_matrix(y_info['true'], y_info['pred'], sorted_cats)\n df_conf = pd.DataFrame(conf_mat, index=sorted_cats, columns=sorted_cats)\n total_correct = np.trace(df_conf)\n total_pred = df_conf.sum().sum()\n fraction_correct = total_correct/float(total_pred)\n correct_list = []\n cat_counts = df_conf.sum(axis=1)\n all_cols = df_conf.columns.tolist()\n for inst_cat in all_cols:\n inst_correct = df_conf[inst_cat].loc[inst_cat] / cat_counts[inst_cat]\n correct_list.append(inst_correct)\n ser_correct = pd.Series(data=correct_list, index=all_cols)\n populations = {}\n populations['true'] = true_count\n populations['pred'] = pred_count\n return df_conf, populations, ser_correct, fraction_correct", - "docstring": "Generate confusion matrix from y_info" - }, - { - "code": "def list_users(self, envs=[], query=\"/users/\"):\n juicer.utils.Log.log_debug(\n \"List Users In: %s\", \", \".join(envs))\n for env in envs:\n juicer.utils.Log.log_info(\"%s:\" % (env))\n _r = self.connectors[env].get(query)\n if _r.status_code == Constants.PULP_GET_OK:\n for user in juicer.utils.load_json_str(_r.content):\n roles = user['roles']\n if roles:\n user_roles = ', '.join(roles)\n else:\n user_roles = \"None\"\n juicer.utils.Log.log_info(\"\\t%s - %s\" % (user['login'], user_roles))\n else:\n _r.raise_for_status()\n return True", - "docstring": "List users in specified environments" - }, - { - "code": "def _strip_dirty(xmltree):\n dirty = xmltree.attrib.pop('dirtyId', None)\n if dirty:\n xmltree.attrib.pop('admin', None)\n xmltree.attrib.pop('time', None)\n for child in xmltree:\n child = _strip_dirty(child)\n return xmltree", - "docstring": "Removes dirtyID tags from the candidate config result. Palo Alto devices will make the candidate configuration with\n a dirty ID after a change. This can cause unexpected results when parsing." - }, - { - "code": "def ask_backend(self):\n response = self._ask_boolean(\n \"Do you have a local docker daemon (on Linux), do you use docker-machine via a local machine, or do you use \"\n \"Docker for macOS?\", True)\n if (response):\n self._display_info(\"If you use docker-machine on macOS, please see \"\n \"http://inginious.readthedocs.io/en/latest/install_doc/troubleshooting.html\")\n return \"local\"\n else:\n self._display_info(\n \"You will have to run inginious-backend and inginious-agent yourself. Please run the commands without argument \"\n \"and/or read the documentation for more info\")\n return self._display_question(\"Please enter the address of your backend\")", - "docstring": "Ask the user to choose the backend" - }, - { - "code": "def join(self, glue=\" \"):\n j = glue.join([str(x) for x in self.obj])\n return self._wrap(j)", - "docstring": "Javascript's join implementation" - }, - { - "code": "def list_signals(self):\n print(\"Signal list\")\n print(\"***********\\n\")\n for key, signal in self.app.signals.signals.items():\n print(\"%s (%s)\\n %s\\n\" % (signal.name, signal.plugin.name, signal.description))", - "docstring": "Prints a list of all registered signals. Including description and plugin name." - }, - { - "code": "def _include_query_example(self, f, method, path, api_version, server_type):\n m = method[\"method\"].lower()\n query_path = \"{}_{}_{}.txt\".format(server_type, m, self._file_path(path))\n if os.path.isfile(os.path.join(self._directory, \"api\", \"examples\", query_path)):\n f.write(\"Sample session\\n***************\\n\")\n f.write(\"\\n\\n.. literalinclude:: ../../../examples/{}\\n\\n\".format(query_path))", - "docstring": "If a sample session is available we include it in documentation" - }, - { - "code": "async def create_websocket_client(sock: anyio.abc.SocketStream,\n addr,\n path: str,\n headers: Optional[List] = None,\n subprotocols: Optional[List[str]] = None):\n ws = Websocket()\n await ws.start_client(\n sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols)\n return ws", - "docstring": "A more low-level form of create_websocket_client.\n You are responsible for closing this websocket." - }, - { - "code": "def make_uniq_for_step(ctx, ukeys, step, stage, full_data, clean_missing_after_seconds, to_uniq):\n if not ukeys:\n return to_uniq\n else:\n uniq_data = bubble_lod_load(ctx, step, stage)\n ctx.say('Creating uniq identifiers for [' + step + '] information', 0)\n ctx.gbc.say('uniq_data:', stuff=uniq_data, verbosity=1000)\n uniq_step_res = make_uniq(ctx=ctx,\n ldict=to_uniq,\n keyed=uniq_data,\n uniqstr=ukeys,\n tag=step,\n full_data=full_data,\n remove_missing_after_seconds=clean_missing_after_seconds)\n ctx.gbc.say('uniq_step_res:', stuff=uniq_step_res, verbosity=1000)\n to_uniq_newest = get_newest_uniq(ctx.gbc, uniq_step_res)\n to_uniq = to_uniq_newest\n uniq_res_list = get_uniq_list(ctx.gbc, uniq_step_res)\n reset = True\n pfr = bubble_lod_dump(ctx=ctx,\n step=step,\n stage=stage,\n full_data=full_data,\n reset=reset,\n data_gen=uniq_res_list)\n ctx.gbc.say('saved uniq ' + step + ' data res:',\n stuff=pfr, verbosity=700)\n return to_uniq", - "docstring": "initially just a copy from UNIQ_PULL" - }, - { - "code": "def check_package_set(package_set, should_ignore=None):\n if should_ignore is None:\n def should_ignore(name):\n return False\n missing = dict()\n conflicting = dict()\n for package_name in package_set:\n missing_deps = set()\n conflicting_deps = set()\n if should_ignore(package_name):\n continue\n for req in package_set[package_name].requires:\n name = canonicalize_name(req.project_name)\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate()\n if missed:\n missing_deps.add((name, req))\n continue\n version = package_set[name].version\n if not req.specifier.contains(version, prereleases=True):\n conflicting_deps.add((name, version, req))\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str)\n return missing, conflicting", - "docstring": "Check if a package set is consistent\n\n If should_ignore is passed, it should be a callable that takes a\n package name and returns a boolean." - }, - { - "code": "def match_keys(inp, p=False):\n _keys = []\n ssh_keys = DO.get_ssh_keys()\n for k in inp.split(\",\"):\n done = False\n if k.isdigit():\n for _ in [s for s in ssh_keys if s[\"id\"] == int(k)]:\n done = True\n _keys.append(_[\"fingerprint\"])\n else:\n for _ in [s for s in ssh_keys if s[\"fingerprint\"] == k]:\n done = True\n _keys.append(_[\"fingerprint\"])\n if p and not done:\n print(\"Could not find a match for '{}', skipping\".format(k), file=sys.stderr)\n return _keys", - "docstring": "Takes a comma-separated string of key ids or fingerprints and returns a list of key ids" - }, - { - "code": "def adjustMargins(self):\n y = 0\n height = 0\n if self._titleLabel.text():\n height += self._titleLabel.height() + 3\n y += height\n if self._subTitleLabel.text():\n self._subTitleLabel.move(0, y)\n height += self._subTitleLabel.height() + 3\n self.setContentsMargins(0, height, 0, 0)", - "docstring": "Adjusts the margins to incorporate enough room for the widget's title and sub-title." - }, - { - "code": "def _get_response_message(self, request, result, input_chat):\n if isinstance(result, types.UpdateShort):\n updates = [result.update]\n entities = {}\n elif isinstance(result, (types.Updates, types.UpdatesCombined)):\n updates = result.updates\n entities = {utils.get_peer_id(x): x\n for x in\n itertools.chain(result.users, result.chats)}\n else:\n return None\n random_to_id = {}\n id_to_message = {}\n for update in updates:\n if isinstance(update, types.UpdateMessageID):\n random_to_id[update.random_id] = update.id\n elif isinstance(update, (\n types.UpdateNewChannelMessage, types.UpdateNewMessage)):\n update.message._finish_init(self, entities, input_chat)\n id_to_message[update.message.id] = update.message\n elif (isinstance(update, types.UpdateEditMessage)\n and not isinstance(request.peer, types.InputPeerChannel)):\n if request.id == update.message.id:\n update.message._finish_init(self, entities, input_chat)\n return update.message\n elif (isinstance(update, types.UpdateEditChannelMessage)\n and utils.get_peer_id(request.peer) ==\n utils.get_peer_id(update.message.to_id)):\n if request.id == update.message.id:\n update.message._finish_init(self, entities, input_chat)\n return update.message\n if request is None:\n return id_to_message\n random_id = request if isinstance(request, int) else request.random_id\n if not utils.is_list_like(random_id):\n if random_id in random_to_id:\n return id_to_message[random_to_id[random_id]]\n else:\n return None\n else:\n return [id_to_message[random_to_id[rnd]]\n if rnd in random_to_id else None\n for rnd in random_id]", - "docstring": "Extracts the response message known a request and Update result.\n The request may also be the ID of the message to match.\n\n If ``request is None`` this method returns ``{id: message}``.\n\n If ``request.random_id`` is a list, this method returns a list too." - }, - { - "code": "def get_sources(arxiv_id):\n try:\n request = requests.get(ARXIV_EPRINT_URL.format(arxiv_id=arxiv_id))\n request.raise_for_status()\n file_object = io.BytesIO(request.content)\n return tarfile.open(fileobj=file_object)\n except (RequestException, AssertionError, tarfile.TarError):\n return None", - "docstring": "Download sources on arXiv for a given preprint.\n\n .. note::\n\n Bulk download of sources from arXiv is not permitted by their API. \\\n You should have a look at http://arxiv.org/help/bulk_data_s3.\n\n :param eprint: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in a \\\n canonical form.\n :returns: A ``TarFile`` object of the sources of the arXiv preprint or \\\n ``None``." - }, - { - "code": "def _ProcessImportBySuffix(name, fromlist, globals):\n _import_local.nest_level -= 1\n if _import_callbacks:\n _import_local.names |= _GenerateNames(name, fromlist, globals)\n if _import_local.nest_level == 0:\n _InvokeImportCallbackBySuffix(_import_local.names)\n if _import_local.nest_level == 0:\n _import_local.names.clear()", - "docstring": "Processes an import.\n\n Calculates the possible names generated from an import and invokes\n registered callbacks if needed.\n\n Args:\n name: Argument as passed to the importer.\n fromlist: Argument as passed to the importer.\n globals: Argument as passed to the importer." - }, - { - "code": "def usearch61_smallmem_cluster(intermediate_fasta,\n percent_id=0.97,\n minlen=64,\n rev=False,\n output_dir=\".\",\n remove_usearch_logs=False,\n wordlength=8,\n usearch61_maxrejects=32,\n usearch61_maxaccepts=1,\n sizeorder=False,\n HALT_EXEC=False,\n output_uc_filepath=None,\n log_name=\"smallmem_clustered.log\",\n sizeout=False,\n consout_filepath=None):\n log_filepath = join(output_dir, log_name)\n params = {'--minseqlength': minlen,\n '--cluster_smallmem': intermediate_fasta,\n '--id': percent_id,\n '--uc': output_uc_filepath,\n '--wordlength': wordlength,\n '--maxrejects': usearch61_maxrejects,\n '--maxaccepts': usearch61_maxaccepts,\n '--usersort': True\n }\n if sizeorder:\n params['--sizeorder'] = True\n if not remove_usearch_logs:\n params['--log'] = log_filepath\n if rev:\n params['--strand'] = 'both'\n else:\n params['--strand'] = 'plus'\n if sizeout:\n params['--sizeout'] = True\n if consout_filepath:\n params['--consout'] = consout_filepath\n clusters_fp = output_uc_filepath\n app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)\n app_result = app()\n return clusters_fp, app_result", - "docstring": "Performs usearch61 de novo clustering via cluster_smallmem option\n\n Only supposed to be used with length sorted data (and performs length\n sorting automatically) and does not support reverse strand matching\n\n intermediate_fasta: fasta filepath to be clustered with usearch61\n percent_id: percentage id to cluster at\n minlen: minimum sequence length\n rev: will enable reverse strand matching if True\n output_dir: directory to output log, OTU mapping, and intermediate files\n remove_usearch_logs: Saves usearch log files\n wordlength: word length to use for initial high probability sequence matches\n usearch61_maxrejects: Set to 'default' or an int value specifying max\n rejects\n usearch61_maxaccepts: Number of accepts allowed by usearch61\n HALT_EXEC: application controller option to halt execution\n output_uc_filepath: Path to write clusters (.uc) file.\n log_name: filepath to write usearch61 generated log file\n sizeout: If True, will save abundance data in output fasta labels.\n consout_filepath: Needs to be set to save clustered consensus fasta\n filepath used for chimera checking." - }, - { - "code": "def gecos(self):\n if not self._gecos:\n return None\n if self._gecos.startswith(text_type('\\'')) and self._gecos.endswith(text_type('\\'')):\n self._gecos = '\\\"{0}\\\"'.format(self._gecos[1:-1])\n return self._gecos\n elif self._gecos.startswith(text_type('\\\"')) and self._gecos.endswith(text_type('\\\"')):\n return self._gecos\n else:\n return '\\\"{0}\\\"'.format(self._gecos)", - "docstring": "Force double quoted gecos.\n\n returns:\n str: The double quoted gecos." - }, - { - "code": "def _validate_schema(schema, body):\n try:\n schema[1](rapidjson.dumps(body))\n except ValueError as exc:\n try:\n jsonschema.validate(body, schema[0])\n except jsonschema.ValidationError as exc2:\n raise SchemaValidationError(str(exc2)) from exc2\n logger.warning('code problem: jsonschema did not raise an exception, wheras rapidjson raised %s', exc)\n raise SchemaValidationError(str(exc)) from exc", - "docstring": "Validate data against a schema" - }, - { - "code": "def matches(self, msg_seq: int, msg: MessageInterface) -> bool:\n return all(crit.matches(msg_seq, msg) for crit in self.all_criteria)", - "docstring": "The message matches if all the defined search key criteria match.\n\n Args:\n msg_seq: The message sequence ID.\n msg: The message object." - }, - { - "code": "def getVersion(data):\n data = data.splitlines()\n return next((\n v\n for v, u in zip(data, data[1:])\n if len(v) == len(u) and allSame(u) and hasDigit(v) and \".\" in v\n ))", - "docstring": "Parse version from changelog written in RST format." - }, - { - "code": "def get_spatial_type(spatial_model):\n if spatial_model in ['SkyDirFunction', 'PointSource',\n 'Gaussian']:\n return 'SkyDirFunction'\n elif spatial_model in ['SpatialMap']:\n return 'SpatialMap'\n elif spatial_model in ['RadialGaussian', 'RadialDisk']:\n try:\n import pyLikelihood\n if hasattr(pyLikelihood, 'RadialGaussian'):\n return spatial_model\n else:\n return 'SpatialMap'\n except Exception:\n return spatial_model\n else:\n return spatial_model", - "docstring": "Translate a spatial model string to a spatial type." - }, - { - "code": "def set_default_mode(self, default_mode):\n if default_mode.lower() not in (CONST.MODE_AWAY, CONST.MODE_HOME):\n raise AbodeException(ERROR.INVALID_DEFAULT_ALARM_MODE)\n self._default_alarm_mode = default_mode.lower()", - "docstring": "Set the default mode when alarms are turned 'on'." - }, - { - "code": "def create_key(self):\n print(\"Creating key. Please input the following options:\")\n name = input(\"Key name (optional): \")\n print(\"To make this key more secure, you should restrict the IP addresses that can use it. \")\n print(\"To use with all IPs, leave blank or use 0.0.0.0/0.\")\n print(\"To use with a single IP, append '/32', such as 207.39.29.22/32. \")\n print(\"See this reference on CIDR blocks: http://software77.net/cidr-101.html\")\n cidr = input(\"CIDR (optional): \")\n key = self._curl_bitmex(\"/apiKey\",\n postdict={\"name\": name, \"cidr\": cidr, \"enabled\": True})\n print(\"Key created. Details:\\n\")\n print(\"API Key: \" + key[\"id\"])\n print(\"Secret: \" + key[\"secret\"])\n print(\"\\nSafeguard your secret key! If somebody gets a hold of your API key and secret,\")\n print(\"your account can be taken over completely.\")\n print(\"\\nKey generation complete.\")", - "docstring": "Create an API key." - }, - { - "code": "def compile_migrations(migrator, models, reverse=False):\n source = migrator.orm.values()\n if reverse:\n source, models = models, source\n migrations = diff_many(models, source, migrator, reverse=reverse)\n if not migrations:\n return False\n migrations = NEWLINE + NEWLINE.join('\\n\\n'.join(migrations).split('\\n'))\n return CLEAN_RE.sub('\\n', migrations)", - "docstring": "Compile migrations for given models." - }, - { - "code": "def link(self):\n if self.linked:\n return self\n self.linked = True\n included_modules = []\n for include in self.includes.values():\n included_modules.append(include.link().surface)\n self.scope.add_surface('__includes__', tuple(included_modules))\n self.scope.add_surface('__thrift_source__', self.thrift_source)\n for linker in LINKERS:\n linker(self.scope).link()\n self.scope.add_surface('loads', Deserializer(self.protocol))\n self.scope.add_surface('dumps', Serializer(self.protocol))\n return self", - "docstring": "Link all the types in this module and all included modules." - }, - { - "code": "def get_public_trades(self, time_frame='hour'):\n self._log('get public trades')\n return self._rest_client.get(\n endpoint='/transactions',\n params={'book': self.name, 'time': time_frame}\n )", - "docstring": "Return public trades that were completed recently.\n\n :param time_frame: Time frame. Allowed values are \"minute\" for trades\n in the last minute, or \"hour\" for trades in the last hour (default:\n \"hour\").\n :type time_frame: str | unicode\n :return: Public trades completed recently.\n :rtype: [dict]" - }, - { - "code": "def string_to_sign(self):\n return (AWS4_HMAC_SHA256 + \"\\n\" +\n self.request_timestamp + \"\\n\" +\n self.credential_scope + \"\\n\" +\n sha256(self.canonical_request.encode(\"utf-8\")).hexdigest())", - "docstring": "The AWS SigV4 string being signed." - }, - { - "code": "def disable_radio_button(self):\n checked = self.default_input_button_group.checkedButton()\n if checked:\n self.default_input_button_group.setExclusive(False)\n checked.setChecked(False)\n self.default_input_button_group.setExclusive(True)\n for button in self.default_input_button_group.buttons():\n button.setDisabled(True)\n self.custom_value.setDisabled(True)", - "docstring": "Disable radio button group and custom value input area." - }, - { - "code": "def get_int_id(self, str_id):\n if not str_id in self.mapping:\n if self.curr_id == IdRemapper.INT_MAX:\n return None\n self.mapping[str_id] = self.curr_id\n self.r_mapping[self.curr_id] = str_id\n self.curr_id += 1\n return self.mapping[str_id]", - "docstring": "Get a unique 32 bits signed integer for the given string Id." - }, - { - "code": "def create_comment(github_object, body):\n try:\n return github_object.create_issue_comment(body)\n except AttributeError:\n return github_object.create_comment(body)", - "docstring": "Create a comment, whatever the object is a PR, a commit or an issue." - }, - { - "code": "def _can_compute(self, _id, persistence):\n if self.store and self._stored(_id, persistence):\n return True\n if self.is_root:\n return False\n return all(\n [n._can_compute(_id, persistence) for n in self.dependencies])", - "docstring": "Return true if this feature stored, or is unstored, but can be computed\n from stored dependencies" - }, - { - "code": "def get_metadata(item):\n for metadata_key in ('METADATA', 'PKG-INFO'):\n try:\n metadata_lines = item.get_metadata_lines(metadata_key)\n break\n except (KeyError, IOError):\n metadata_lines = []\n return metadata_lines", - "docstring": "Get metadata information from the distribution.\n\n Depending on the package this may either be in METADATA or PKG-INFO\n\n :param item: pkg_resources WorkingSet item\n :returns: metadata resource as list of non-blank non-comment lines" - }, - { - "code": "def scansearch(auth, label, filt, project=None, aid=None):\n if not aid:\n aid = accession(auth, label, project)\n url = \"%s/data/experiments/%s/scans?format=csv\" % (auth.url.rstrip('/'), aid)\n logger.debug(\"issuing http request %s\", url)\n r = requests.get(url, auth=(auth.username, auth.password), verify=CHECK_CERTIFICATE)\n if r.status_code != requests.codes.ok:\n raise ScanSearchError(\"response not ok (%s) from %s\" % (r.status_code, r.url))\n if not r.content:\n raise ScanSearchError(\"response is empty from %s\" % r.url)\n reader = csv.reader(io.StringIO(r.content.decode()))\n columns = next(reader)\n conn = sqlite3.connect(\":memory:\")\n c = conn.cursor()\n c.execute(\"CREATE TABLE scans (%s)\" % ','.join(columns))\n query = \"INSERT INTO scans VALUES (%s)\" % ','.join('?' * len(columns))\n for row in reader:\n c.execute(query, [x for x in row])\n conn.commit()\n result = col.defaultdict(list)\n for token,filt in iter(filt.items()):\n try:\n result[token] = [x[0] for x in c.execute(\"SELECT ID FROM scans where %s\" % filt)]\n except sqlite3.OperationalError:\n logger.critical(\"something is wrong with the filter: %s\", filt)\n raise\n return result", - "docstring": "Search for scans by supplying a set of SQL-based conditionals.\n\n Example:\n >>> import yaxil\n >>> auth = yaxil.XnatAuth(url='...', username='...', password='...')\n >>> query = {\n ... 'eor1': \"note LIKE %EOR1%\",\n ... 'eor2': \"note LIKE %EOR2%\",\n ... 'mpr': \"series_description='T1_MEMPRAGE RMS' OR note LIKE %ANAT%\"\n ... }\n >>> yaxil.scansearch(auth, 'AB1234C', query)\n {\"mpr\": [4], \"eor1\": [13], \"eor2\": [14]}\n\n :param auth: XNAT authentication object\n :type auth: :mod:`yaxil.XnatAuth`\n :param label: XNAT MR Session label\n :type label: str\n :param filt: Scan search filter/query\n :type filt: dict\n :param project: XNAT MR Session project\n :type project: str\n :param aid: XNAT Accession ID\n :type aid: str\n :returns: Same dictionary that was passed in, but values are now matching scans\n :rtype: dict" - }, - { - "code": "def center_end(r, window_size):\n res = copy.copy(r)\n res.start = res.end - window_size / 2\n res.end = res.start + window_size\n return res", - "docstring": "Center a region on its end and expand it to window_size bases.\n\n :return: the new region." - }, - { - "code": "def setup_aiohttp_apispec(\n app: web.Application,\n *,\n title: str = \"API documentation\",\n version: str = \"0.0.1\",\n url: str = \"/api/docs/swagger.json\",\n request_data_name: str = \"data\",\n swagger_path: str = None,\n static_path: str = '/static/swagger',\n **kwargs\n) -> None:\n AiohttpApiSpec(\n url,\n app,\n request_data_name,\n title=title,\n version=version,\n swagger_path=swagger_path,\n static_path=static_path,\n **kwargs\n )", - "docstring": "aiohttp-apispec extension.\n\n Usage:\n\n .. code-block:: python\n\n from aiohttp_apispec import docs, request_schema, setup_aiohttp_apispec\n from aiohttp import web\n from marshmallow import Schema, fields\n\n\n class RequestSchema(Schema):\n id = fields.Int()\n name = fields.Str(description='name')\n bool_field = fields.Bool()\n\n\n @docs(tags=['mytag'],\n summary='Test method summary',\n description='Test method description')\n @request_schema(RequestSchema)\n async def index(request):\n return web.json_response({'msg': 'done', 'data': {}})\n\n\n app = web.Application()\n app.router.add_post('/v1/test', index)\n\n # init docs with all parameters, usual for ApiSpec\n setup_aiohttp_apispec(app=app,\n title='My Documentation',\n version='v1',\n url='/api/docs/api-docs')\n\n # now we can find it on 'http://localhost:8080/api/docs/api-docs'\n web.run_app(app)\n\n :param Application app: aiohttp web app\n :param str title: API title\n :param str version: API version\n :param str url: url for swagger spec in JSON format\n :param str request_data_name: name of the key in Request object\n where validated data will be placed by\n validation_middleware (``'data'`` by default)\n :param str swagger_path: experimental SwaggerUI support (starting from v1.1.0).\n By default it is None (disabled)\n :param str static_path: path for static files used by SwaggerUI\n (if it is enabled with ``swagger_path``)\n :param kwargs: any apispec.APISpec kwargs" - }, - { - "code": "def merge(self, ds, inplace=False, axis=1):\n if not isinstance(ds, Dataset):\n raise ValueError('Expected `Dataset`, got %s.' % ds)\n X_train = concat(ds.X_train, self.X_train, axis=axis)\n y_train = concat(ds.y_train, self.y_train, axis=axis)\n if ds.X_test is not None:\n X_test = concat(ds.X_test, self.X_test, axis=axis)\n else:\n X_test = None\n if ds.y_test is not None:\n y_test = concat(ds.y_test, self.y_test, axis=axis)\n else:\n y_test = None\n if inplace:\n self._X_train = X_train\n self._y_train = y_train\n if X_test is not None:\n self._X_test = X_test\n if y_test is not None:\n self._y_test = y_test\n return None\n return Dataset(X_train, y_train, X_test, y_test)", - "docstring": "Merge two datasets.\n\n Parameters\n ----------\n\n axis : {0,1}\n ds : `Dataset`\n inplace : bool, default False\n\n Returns\n -------\n `Dataset`" - }, - { - "code": "def show_hids(target_vid = 0, target_pid = 0, output = None):\r\n if not output:\r\n output = sys.stdout\r\n from . import tools\r\n all_hids = None\r\n if target_vid:\r\n if target_pid:\r\n device_filter = HidDeviceFilter(vendor_id = target_vid,\r\n product_id = target_pid)\r\n else:\r\n device_filter = HidDeviceFilter(vendor_id = target_vid)\r\n all_hids = device_filter.get_devices()\r\n else:\r\n all_hids = find_all_hid_devices()\r\n if all_hids:\r\n print(\"Found HID class devices!, writting details...\")\r\n for dev in all_hids:\r\n device_name = str(dev)\r\n output.write(device_name)\r\n output.write('\\n\\n Path: %s\\n' % dev.device_path)\r\n output.write('\\n Instance: %s\\n' % dev.instance_id)\r\n output.write('\\n Port (ID): %s\\n' % dev.get_parent_instance_id())\r\n output.write('\\n Port (str):%s\\n' % str(dev.get_parent_device()))\r\n try:\r\n dev.open()\r\n tools.write_documentation(dev, output)\r\n finally:\r\n dev.close()\r\n print(\"done!\")\r\n else:\r\n print(\"There's not any non system HID class device available\")", - "docstring": "Check all HID devices conected to PC hosts." - }, - { - "code": "def _get_device_id(self, bus):\n _dbus = bus.get(SERVICE_BUS, PATH)\n devices = _dbus.devices()\n if self.device is None and self.device_id is None and len(devices) == 1:\n return devices[0]\n for id in devices:\n self._dev = bus.get(SERVICE_BUS, DEVICE_PATH + \"/%s\" % id)\n if self.device == self._dev.name:\n return id\n return None", - "docstring": "Find the device id" - }, - { - "code": "def add_fortran_to_env(env):\n try:\n FortranSuffixes = env['FORTRANFILESUFFIXES']\n except KeyError:\n FortranSuffixes = ['.f', '.for', '.ftn']\n try:\n FortranPPSuffixes = env['FORTRANPPFILESUFFIXES']\n except KeyError:\n FortranPPSuffixes = ['.fpp', '.FPP']\n DialectAddToEnv(env, \"FORTRAN\", FortranSuffixes,\n FortranPPSuffixes, support_module = 1)\n env['FORTRANMODPREFIX'] = ''\n env['FORTRANMODSUFFIX'] = '.mod'\n env['FORTRANMODDIR'] = ''\n env['FORTRANMODDIRPREFIX'] = ''\n env['FORTRANMODDIRSUFFIX'] = ''\n env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'", - "docstring": "Add Builders and construction variables for Fortran to an Environment." - }, - { - "code": "def autoreload(self, parameter_s=''):\n r\n if parameter_s == '':\n self._reloader.check(True)\n elif parameter_s == '0':\n self._reloader.enabled = False\n elif parameter_s == '1':\n self._reloader.check_all = False\n self._reloader.enabled = True\n elif parameter_s == '2':\n self._reloader.check_all = True\n self._reloader.enabled = True", - "docstring": "r\"\"\"%autoreload => Reload modules automatically\n\n %autoreload\n Reload all modules (except those excluded by %aimport) automatically\n now.\n\n %autoreload 0\n Disable automatic reloading.\n\n %autoreload 1\n Reload all modules imported with %aimport every time before executing\n the Python code typed.\n\n %autoreload 2\n Reload all modules (except those excluded by %aimport) every time\n before executing the Python code typed.\n\n Reloading Python modules in a reliable way is in general\n difficult, and unexpected things may occur. %autoreload tries to\n work around common pitfalls by replacing function code objects and\n parts of classes previously in the module with new versions. This\n makes the following things to work:\n\n - Functions and classes imported via 'from xxx import foo' are upgraded\n to new versions when 'xxx' is reloaded.\n\n - Methods and properties of classes are upgraded on reload, so that\n calling 'c.foo()' on an object 'c' created before the reload causes\n the new code for 'foo' to be executed.\n\n Some of the known remaining caveats are:\n\n - Replacing code objects does not always succeed: changing a @property\n in a class to an ordinary method or a method to a member variable\n can cause problems (but in old objects only).\n\n - Functions that are removed (eg. via monkey-patching) from a module\n before it is reloaded are not upgraded.\n\n - C extension modules cannot be reloaded, and so cannot be\n autoreloaded." - }, - { - "code": "def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,\n availability_zone=None, subnet_name=None, tags=None,\n region=None, key=None, keyid=None, profile=None, auto_assign_public_ipv4=False):\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {'created': False, 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}\n except BotoServerError as e:\n return {'created': False, 'error': __utils__['boto.get_error'](e)}\n subnet_object_dict = _create_resource('subnet', name=subnet_name, tags=tags, vpc_id=vpc_id,\n availability_zone=availability_zone,\n cidr_block=cidr_block, region=region, key=key,\n keyid=keyid, profile=profile)\n if auto_assign_public_ipv4:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n conn3.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_object_dict['id'])\n return subnet_object_dict", - "docstring": "Given a valid VPC ID or Name and a CIDR block, create a subnet for the VPC.\n\n An optional availability zone argument can be provided.\n\n Returns True if the VPC subnet was created and returns False if the VPC subnet was not created.\n\n .. versionchanged:: 2015.8.0\n Added vpc_name argument\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt myminion boto_vpc.create_subnet vpc_id='vpc-6b1fe402' \\\\\n subnet_name='mysubnet' cidr_block='10.0.0.0/25'\n salt myminion boto_vpc.create_subnet vpc_name='myvpc' \\\\\n subnet_name='mysubnet', cidr_block='10.0.0.0/25'" - }, - { - "code": "def addLineAnnot(self, p1, p2):\n CheckParent(self)\n val = _fitz.Page_addLineAnnot(self, p1, p2)\n if not val: return\n val.thisown = True\n val.parent = weakref.proxy(self)\n self._annot_refs[id(val)] = val\n return val", - "docstring": "Add 'Line' annot for points p1 and p2." - }, - { - "code": "def parse_angle(input_dir):\n if isinstance(input_dir, str):\n abb_dirs = [_abbrieviate_direction(input_dir)]\n elif isinstance(input_dir, list):\n input_dir_str = ','.join(input_dir)\n abb_dir_str = _abbrieviate_direction(input_dir_str)\n abb_dirs = abb_dir_str.split(',')\n return itemgetter(*abb_dirs)(DIR_DICT)", - "docstring": "Calculate the meteorological angle from directional text.\n\n Works for abbrieviations or whole words (E -> 90 | South -> 180)\n and also is able to parse 22.5 degreee angles such as ESE/East South East\n\n Parameters\n ----------\n input_dir : string or array-like strings\n Directional text such as west, [south-west, ne], etc\n\n Returns\n -------\n angle\n The angle in degrees" - }, - { - "code": "def treynor_ratio(self, benchmark, rf=0.02):\n benchmark = _try_to_squeeze(benchmark)\n if benchmark.ndim > 1:\n raise ValueError(\"Treynor ratio requires a single benchmark\")\n rf = self._validate_rf(rf)\n beta = self.beta(benchmark)\n return (self.anlzd_ret() - rf) / beta", - "docstring": "Return over `rf` per unit of systematic risk.\n\n A measure of risk-adjusted performance that relates a\n portfolio's excess returns to the portfolio's beta.\n [Source: CFA Institute]\n\n Parameters\n ----------\n benchmark : {pd.Series, TSeries, 1d np.ndarray}\n The benchmark security to which `self` is compared.\n rf : {float, TSeries, pd.Series}, default 0.02\n If float, this represents an *compounded annualized*\n risk-free rate; 2.0% is the default.\n If a TSeries or pd.Series, this represents a time series\n of periodic returns to a risk-free security.\n\n To download a risk-free rate return series using\n 3-month US T-bill yields, see:`pyfinance.datasets.load_rf`.\n\n Returns\n -------\n float" - }, - { - "code": "def _set_annotation_to_str(annotation_data: Mapping[str, Mapping[str, bool]], key: str) -> str:\n value = annotation_data[key]\n if len(value) == 1:\n return 'SET {} = \"{}\"'.format(key, list(value)[0])\n x = ('\"{}\"'.format(v) for v in sorted(value))\n return 'SET {} = {{{}}}'.format(key, ', '.join(x))", - "docstring": "Return a set annotation string." - }, - { - "code": "def tcache(parser, token):\n nodelist = parser.parse(('endtcache',))\n parser.delete_first_token()\n tokens = token.split_contents()\n if len(tokens) < 3:\n raise template.TemplateSyntaxError(\"'%r' tag requires at least 2 arguments.\" % tokens[0])\n tags = None\n if len(tokens) > 3 and 'tags=' in tokens[-1]:\n tags = parser.compile_filter(tokens[-1][5:])\n del tokens[-1]\n return CacheNode(nodelist,\n parser.compile_filter(tokens[1]),\n tokens[2],\n [parser.compile_filter(token) for token in tokens[3:]],\n tags\n )", - "docstring": "This will cache the contents of a template fragment for a given amount\n of time with support tags.\n\n Usage::\n {% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}\n .. some expensive processing ..\n {% endtcache %}\n\n This tag also supports varying by a list of arguments:\n {% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}\n .. some expensive processing ..\n {% endtcache %}\n\n Each unique set of arguments will result in a unique cache entry." - }, - { - "code": "def scopes(self, scopes):\n validate_scopes(scopes)\n self._scopes = \" \".join(set(scopes)) if scopes else \"\"", - "docstring": "Set scopes.\n\n :param scopes: The list of scopes." - }, - { - "code": "def split(self, indice=None):\n self_size = self.size\n if self_size > 1:\n if not indice:\n mid = self_size // 2\n return Stack(cards=self[0:mid]), Stack(cards=self[mid::])\n else:\n return Stack(cards=self[0:indice]), Stack(cards=self[indice::])\n else:\n return Stack(cards=self.cards), Stack()", - "docstring": "Splits the Stack, either in half, or at the given indice, into two\n separate Stacks.\n\n :arg int indice:\n Optional. The indice to split the Stack at. Defaults to the middle\n of the ``Stack``.\n\n :returns:\n The two parts of the Stack, as separate Stack instances." - }, - { - "code": "def update_lux(self, extend=0):\r\n DEVICE_REG_OUT = 0x1d\r\n LUX_PWR_ON = 0x03\r\n if extend == 1:\r\n LUX_MODE = 0x1d\r\n delay = .08\r\n scale = 5\r\n else:\r\n LUX_MODE = 0x18\r\n delay = .4\r\n scale = 1\r\n LUX_READ_CH0 = 0x43\r\n LUX_READ_CH1 = 0x83\r\n TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)\r\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)\r\n lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)\r\n if (lux_on == LUX_PWR_ON):\r\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)\r\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)\r\n sleep(delay)\r\n adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)\r\n count0 = get_lux_count(adc_ch0) * scale\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)\r\n sleep(delay)\r\n adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)\r\n count1 = get_lux_count(adc_ch1) * scale\n ratio = count1 / (count0 - count1)\r\n lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))\r\n self.light_ratio = float(count1)/float(count0)\r\n print(\"Light ratio Ch1/Ch0: \", self.light_ratio)\r\n self.lux = round(lux, 3)\r\n return TCA_select(SensorCluster.bus, self.mux_addr, \"off\")\r\n else:\r\n raise SensorError(\"The lux sensor is powered down.\")", - "docstring": "Communicates with the TSL2550D light sensor and returns a \r\n lux value. \r\n\r\n Note that this method contains approximately 1 second of total delay.\r\n This delay is necessary in order to obtain full resolution\r\n compensated lux values.\r\n\r\n Alternatively, the device could be put in extended mode, \r\n which drops some resolution in favor of shorter delays." - }, - { - "code": "def stress_invariants(s):\n s = np.asarray(s)\n if s.shape == (6,):\n s = s.reshape(1,-1)\n elif s.shape == (3,3):\n s = s.reshape(1,-1,-1)\n if len(s.shape) == 3:\n s = np.transpose([s[:,0,0],s[:,1,1],s[:,2,2],\n (s[:,0,1]+s[:,1,0])/2,\n (s[:,1,2]+s[:,2,1])/2,\n (s[:,2,0]+s[:,0,2])/2])\n I1 = s[:,0]+s[:,1]+s[:,2]\n I2 = s[:,0]*s[:,1]+s[:,1]*s[:,2]+s[:,2]*s[:,0]-s[:,3]**2-s[:,4]**2-s[:,5]**2\n I3 = s[:,0]*s[:,1]*s[:,2]+2*s[:,3]*s[:,4]*s[:,5]-s[:,3]**2*s[:,2]-s[:,4]**2*s[:,0]-s[:,5]**2*s[:,1]\n J2 = I1**2/3-I2\n J3 = 2*I1**3/27-I1*I2/3+I3\n return -I1/3, np.sqrt(2*J2/3), J3", - "docstring": "Receives a list of stress tensors and returns the three invariants.\n Return hydrostatic pressure, octahedral shear stress and J3" - }, - { - "code": "def override_params(opening_char='{', closing_char='}', separator_char='|'):\n global char_separator, char_opening, char_closing\n char_separator = separator_char\n char_opening = opening_char\n char_closing = closing_char", - "docstring": "Override some character settings\n\n @type opening_char: str\n @param opening_char: Opening character. Default: '{'\n @type closing_char: str\n @param closing_char: Closing character. Default: '}'\n @type separator_char: str\n @param separator_char: Separator char. Default: '|'" - }, - { - "code": "async def close_all_connections(self) -> None:\n while self._connections:\n conn = next(iter(self._connections))\n await conn.close()", - "docstring": "Close all open connections and asynchronously wait for them to finish.\n\n This method is used in combination with `~.TCPServer.stop` to\n support clean shutdowns (especially for unittests). Typical\n usage would call ``stop()`` first to stop accepting new\n connections, then ``await close_all_connections()`` to wait for\n existing connections to finish.\n\n This method does not currently close open websocket connections.\n\n Note that this method is a coroutine and must be caled with ``await``." - }, - { - "code": "def angle2xyz(azi, zen):\n azi = xu.deg2rad(azi)\n zen = xu.deg2rad(zen)\n x = xu.sin(zen) * xu.sin(azi)\n y = xu.sin(zen) * xu.cos(azi)\n z = xu.cos(zen)\n return x, y, z", - "docstring": "Convert azimuth and zenith to cartesian." - }, - { - "code": "def ecuc_extract_signal(signal_node, ns):\n attributes = signal_node.findall(\".//\" + ns + \"DEFINITION-REF\")\n start_bit = None\n size = 0\n is_little = False\n for attribute in attributes:\n if attribute.text.endswith(\"ComBitPosition\"):\n start_bit = int(attribute.getparent().find(\".//\" + ns + \"VALUE\").text)\n if attribute.text.endswith(\"ComBitSize\"):\n size = int(attribute.getparent().find(\".//\" + ns + \"VALUE\").text)\n if attribute.text.endswith(\"ComSignalEndianness\"):\n endianness = attribute.getparent().find(\".//\" + ns + \"VALUE\").text\n is_little = \"LITTLE_ENDIAN\" in endianness\n if attribute.text.endswith(\"ComSignalInitValue\"):\n init_value = int(attribute.getparent().find(\".//\" + ns + \"VALUE\").text)\n if attribute.text.endswith(\"ComSignalType\"):\n signal_type = attribute.getparent().find(\".//\" + ns + \"VALUE\").text\n if attribute.text.endswith(\"ComTimeout\"):\n timeout = int(attribute.getparent().find(\".//\" + ns + \"VALUE\").text)\n return canmatrix.Signal(get_element_name(signal_node, ns), start_bit=start_bit, size=size, is_little_endian=is_little)", - "docstring": "Extract signal from ECUc file." - }, - { - "code": "def WaitForJobChange(r, job_id, fields, prev_job_info, prev_log_serial):\n body = {\n \"fields\": fields,\n \"previous_job_info\": prev_job_info,\n \"previous_log_serial\": prev_log_serial,\n }\n return r.request(\"get\", \"/2/jobs/%s/wait\" % job_id, content=body)", - "docstring": "Waits for job changes.\n\n @type job_id: int\n @param job_id: Job ID for which to wait" - }, - { - "code": "def _get_flag_group(h5f, path):\n if path:\n return h5f[path]\n if _is_flag_group(h5f):\n return h5f\n try:\n path, = _find_flag_groups(h5f)\n except ValueError:\n pass\n else:\n return h5f[path]\n raise ValueError(\n \"please pass a valid HDF5 Group, or specify the HDF5 Group \"\n \"path via the ``path=`` keyword argument\",\n )", - "docstring": "Determine the group to use in order to read a flag" - }, - { - "code": "def numberofsamples(self):\n idline = 0\n linenumber = 0\n with open(self.samplesheet, \"rb\") as ssheet:\n for linenumber, entry in enumerate(ssheet):\n if \"Sample_ID\" in entry:\n idline = linenumber\n self.samplecount = linenumber - idline\n printtime('There are {} samples in this run. '\n 'Running off-hours module with the following parameters:\\n'\n 'MiSeqPath: {},\\n'\n 'MiSeqFolder: {},\\n'\n 'SampleSheet: {}'.format(self.samplecount, self.miseqpath, self.miseqfolder, self.samplesheet),\n self.start)\n self.fastqlinker()", - "docstring": "Count the number of samples is the samplesheet" - }, - { - "code": "def find_distributions(path_item, only=False):\n importer = get_importer(path_item)\n finder = _find_adapter(_distribution_finders, importer)\n return finder(importer, path_item, only)", - "docstring": "Yield distributions accessible via `path_item`" - }, - { - "code": "def auto_generate_missing_tabs(self):\n for config in models_config.get_all_configs():\n model_alias = '{}.{}'.format(config.app_label, config.model_name)\n if model_alias not in self.tabs:\n @self.register(model_alias)\n def general_layout(obj):\n return Layout(\n Column12(\n Panel(\n 'info',\n DescriptionList(*[f.name for f in obj.get_fields()])\n )\n )\n )", - "docstring": "Auto generate tabs for models with no tabs" - }, - { - "code": "def aggregate_key(self, aggregate_key):\n aggregation = self.data_dict[aggregate_key]\n data_dict_keys = {y for x in aggregation for y in x.keys()}\n for key in data_dict_keys:\n stacked = np.stack([d[key] for d in aggregation], axis=0)\n self.data_dict[key] = np.mean(stacked, axis=0)", - "docstring": "Aggregate values from key and put them into the top-level dictionary" - }, - { - "code": "def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:\n pass", - "docstring": "After epoch profile event.\n\n This event provides opportunity to process time profile of the finished epoch.\n\n :param epoch_id: finished epoch id\n :param profile: dictionary of lists of event timings that were measured during the epoch\n :param extra_streams: enumeration of additional stream names" - }, - { - "code": "def check(self):\n if self.fq_name:\n self['uuid'] = self._check_fq_name(self.fq_name)\n elif self.uuid:\n self['fq_name'] = self._check_uuid(self.uuid)\n return True", - "docstring": "Check that the resource exists.\n\n :raises ResourceNotFound: if the resource doesn't exists" - }, - { - "code": "async def ctcp_reply(self, target, query, response):\n if self.is_channel(target) and not self.in_channel(target):\n raise client.NotInChannel(target)\n await self.notice(target, construct_ctcp(query, response))", - "docstring": "Send a CTCP reply to a target." - }, - { - "code": "def get_banks_by_assessment_part(self, assessment_part_id):\n mgr = self._get_provider_manager('ASSESSMENT', local=True)\n lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)\n return lookup_session.get_banks_by_ids(\n self.get_bank_ids_by_assessment_part(assessment_part_id))", - "docstring": "Gets the ``Banks`` mapped to an ``AssessmentPart``.\n\n arg: assessment_part_id (osid.id.Id): ``Id`` of an\n ``AssessmentPart``\n return: (osid.assessment.BankList) - list of banks\n raise: NotFound - ``assessment_part_id`` is not found\n raise: NullArgument - ``assessment_part_id`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def load(self, df, centerings):\n return super().load(\n df,\n {key: value\n for key, value in centerings.items()\n if key in self.filter_}\n )", - "docstring": "Call `load` method with `centerings` filtered to keys in `self.filter_`." - }, - { - "code": "def process_shells(self, shells):\n result = {'success': True, 'output': []}\n if self.parallel and len(shells) > 1:\n result = self.process_shells_parallel(shells)\n elif len(shells) > 0:\n result = self.process_shells_ordered(shells)\n return result", - "docstring": "Processing a list of shells." - }, - { - "code": "def save_function(elements, module_path):\n for elem, signature in elements.items():\n if isinstance(signature, dict):\n save_function(signature, module_path + (elem,))\n elif signature.isstaticfunction():\n functions.setdefault(elem, []).append((module_path, signature,))\n elif isinstance(signature, Class):\n save_function(signature.fields, module_path + (elem,))", - "docstring": "Recursively save functions with module name and signature." - }, - { - "code": "def team_profile_get(self, **kwargs) -> SlackResponse:\n self._validate_xoxp_token()\n return self.api_call(\"team.profile.get\", http_verb=\"GET\", params=kwargs)", - "docstring": "Retrieve a team's profile." - }, - { - "code": "def resume_training(self, train_data, model_path, valid_data=None):\n restore_state = self.checkpointer.restore(model_path)\n loss_fn = self._get_loss_fn()\n self.train()\n self._train_model(\n train_data=train_data,\n loss_fn=loss_fn,\n valid_data=valid_data,\n restore_state=restore_state,\n )", - "docstring": "This model resume training of a classifier by reloading the appropriate state_dicts for each model\n\n Args:\n train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\n X (data) and Y (labels) for the train split\n model_path: the path to the saved checpoint for resuming training\n valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of\n X (data) and Y (labels) for the dev split" - }, - { - "code": "def get_data_dir():\n data_dir_paths = [\n os.path.join(os.path.dirname(ansiblecmdb.__file__), 'data'),\n os.path.join(os.path.dirname(sys.argv[0]), '..', 'lib', 'ansiblecmdb', 'data'),\n '/usr/local/lib/ansiblecmdb/data',\n '/usr/lib/ansiblecmdb/data',\n ]\n data_dir = util.find_path(data_dir_paths, 'tpl/html_fancy.tpl')\n if not data_dir:\n sys.stdout.write(\"Couldn't find the data dir for the templates. I tried: {0}\\n\".format(\", \".join(data_dir_paths)))\n sys.exit(1)\n return data_dir", - "docstring": "Find out our installation prefix and data directory. These can be in\n different places depending on how ansible-cmdb was installed." - }, - { - "code": "def host_names(urls):\n host_names = StringCounter()\n for url in urls:\n host_names[urlparse(url).netloc] += urls[url]\n return host_names", - "docstring": "Takes a StringCounter of normalized URL and parses their hostnames\n\n N.B. this assumes that absolute URLs will begin with\n\n http://\n\n in order to accurately resolve the host name.\n Relative URLs will not have host names." - }, - { - "code": "def activate(self):\n raise NotImplementedError(\"{0} | '{1}' must be implemented by '{2}' subclasses!\".format(\n self.__class__.__name__, self.activate.__name__, self.__class__.__name__))", - "docstring": "Sets Component activation state.\n\n :return: Method success.\n :rtype: bool" - }, - { - "code": "def store_fw_db(self, tenant_id, net, subnet_dict, direc):\n serv_obj = self.get_service_obj(tenant_id)\n sub = subnet_dict.get('allocation_pools')[0].get('start')\n serv_obj.update_fw_local_cache(net, direc, sub)\n serv_obj.commit_fw_db()", - "docstring": "Calls the service object routine to commit the FW entry to DB." - }, - { - "code": "def dt64_to_dt(dt64):\n ts = (dt64 - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's')\n return dt.datetime.utcfromtimestamp(ts)", - "docstring": "Safely converts NumPy datetime64 to a datetime object." - }, - { - "code": "def _wrap_paginated_response(cls, request, response, controls, data,\n head=None):\n paging_response = response['paging']\n if head is None:\n head = response['head_id']\n link = cls._build_url(\n request,\n head=head,\n start=paging_response['start'],\n limit=paging_response['limit'])\n paging = {}\n limit = controls.get('limit')\n start = controls.get(\"start\")\n paging[\"limit\"] = limit\n paging[\"start\"] = start\n if paging_response.get(\"next\") == \"\":\n return cls._wrap_response(\n request,\n data=data,\n metadata={\n 'head': head,\n 'link': link,\n 'paging': paging\n })\n next_id = paging_response['next']\n paging['next_position'] = next_id\n def build_pg_url(start=None):\n return cls._build_url(request, head=head, limit=limit, start=start)\n paging['next'] = build_pg_url(paging_response['next'])\n return cls._wrap_response(\n request,\n data=data,\n metadata={\n 'head': head,\n 'link': link,\n 'paging': paging\n })", - "docstring": "Builds the metadata for a pagingated response and wraps everying in\n a JSON encoded web.Response" - }, - { - "code": "def unmarshaller(self, typed=True):\n if typed:\n return UmxEncoded(self.schema())\n else:\n return RPC.unmarshaller(self, typed)", - "docstring": "Get the appropriate XML decoder.\n @return: Either the (basic|typed) unmarshaller.\n @rtype: L{UmxTyped}" - }, - { - "code": "def _inject(self, value, settings):\n assert isinstance(value, string_types), 'Expected str; got {0.__class__}'.format(value)\n begin, end = '{{', '}}'\n if begin not in value:\n return value, False\n new_value = value\n begin_pos, end_pos = 0, None\n len_begin, len_end = len(begin), len(end)\n len_value = len(new_value)\n while begin_pos < len_value:\n begin_pos = new_value.find(begin, begin_pos)\n if begin_pos == -1:\n break\n before = new_value[:begin_pos]\n begin_pos += len_begin\n end_pos = new_value.find(end, begin_pos)\n if end_pos == -1:\n raise ValueError('Unmatched {begin}...{end} in {value}'.format(**locals()))\n name = new_value[begin_pos:end_pos]\n name = name.strip()\n if not name:\n raise ValueError('Empty name in {value}'.format(**locals()))\n after_pos = end_pos + len_end\n try:\n after = new_value[after_pos:]\n except IndexError:\n after = ''\n try:\n injection_value = settings.get_dotted(name)\n except KeyError:\n raise KeyError('{name} not found in {settings}'.format(**locals()))\n if not isinstance(injection_value, string_types):\n injection_value = self.strategy.encode_value(injection_value)\n new_value = ''.join((before, injection_value, after))\n begin_pos = len(before) + len(injection_value)\n len_value = len(new_value)\n return new_value, (new_value != value)", - "docstring": "Inject ``settings`` into ``value``.\n\n Go through ``value`` looking for ``{{NAME}}`` groups and replace\n each group with the value of the named item from ``settings``.\n\n Args:\n value (str): The value to inject settings into\n settings: An object that provides the dotted access interface\n\n Returns:\n (str, bool): The new value and whether the new value is\n different from the original value" - }, - { - "code": "def pod_absent(name, namespace='default', **kwargs):\n ret = {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''}\n pod = __salt__['kubernetes.show_pod'](name, namespace, **kwargs)\n if pod is None:\n ret['result'] = True if not __opts__['test'] else None\n ret['comment'] = 'The pod does not exist'\n return ret\n if __opts__['test']:\n ret['comment'] = 'The pod is going to be deleted'\n ret['result'] = None\n return ret\n res = __salt__['kubernetes.delete_pod'](name, namespace, **kwargs)\n if res['code'] == 200 or res['code'] is None:\n ret['result'] = True\n ret['changes'] = {\n 'kubernetes.pod': {\n 'new': 'absent', 'old': 'present'}}\n if res['code'] is None:\n ret['comment'] = 'In progress'\n else:\n ret['comment'] = res['message']\n else:\n ret['comment'] = 'Something went wrong, response: {0}'.format(res)\n return ret", - "docstring": "Ensures that the named pod is absent from the given namespace.\n\n name\n The name of the pod\n\n namespace\n The name of the namespace" - }, - { - "code": "def handle_mark_read_request(cls, request, message, dispatch, hash_is_valid, redirect_to):\n if hash_is_valid:\n dispatch.mark_read()\n dispatch.save()\n signal = sig_mark_read_success\n else:\n signal = sig_mark_read_failed\n signal.send(cls, request=request, message=message, dispatch=dispatch)\n return redirect(redirect_to)", - "docstring": "Handles a request to mark a message as read.\n\n :param Request request: Request instance\n :param Message message: Message model instance\n :param Dispatch dispatch: Dispatch model instance\n :param bool hash_is_valid: Flag indicating that user supplied request signature is correct\n :param str redirect_to: Redirection URL\n :rtype: list" - }, - { - "code": "def energy_upperbound(self, spins):\n subtheta = self.theta.copy()\n subtheta.fix_variables(spins)\n trees = self._trees\n if not trees:\n assert not subtheta.linear and not subtheta.quadratic\n return subtheta.offset\n energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)\n return energy", - "docstring": "A formula for an upper bound on the energy of Theta with spins fixed.\n\n Args:\n spins (dict): Spin values for a subset of the variables in Theta.\n\n Returns:\n Formula that upper bounds the energy with spins fixed." - }, - { - "code": "def ExecQuery(self, QueryLanguage, Query, namespace=None, **extra):\n exc = None\n instances = None\n method_name = 'ExecQuery'\n if self._operation_recorders:\n self.operation_recorder_reset()\n self.operation_recorder_stage_pywbem_args(\n method=method_name,\n QueryLanguage=QueryLanguage,\n Query=Query,\n namespace=namespace,\n **extra)\n try:\n stats = self.statistics.start_timer(method_name)\n namespace = self._iparam_namespace_from_namespace(namespace)\n result = self._imethodcall(\n method_name,\n namespace,\n QueryLanguage=QueryLanguage,\n Query=Query,\n **extra)\n if result is None:\n instances = []\n else:\n instances = [x[2] for x in result[0][2]]\n for instance in instances:\n instance.path.namespace = namespace\n return instances\n except (CIMXMLParseError, XMLParseError) as exce:\n exce.request_data = self.last_raw_request\n exce.response_data = self.last_raw_reply\n exc = exce\n raise\n except Exception as exce:\n exc = exce\n raise\n finally:\n self._last_operation_time = stats.stop_timer(\n self.last_request_len, self.last_reply_len,\n self.last_server_response_time, exc)\n if self._operation_recorders:\n self.operation_recorder_stage_result(instances, exc)", - "docstring": "Execute a query in a namespace.\n\n This method performs the ExecQuery operation\n (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all\n methods performing such operations.\n\n If the operation succeeds, this method returns.\n Otherwise, this method raises an exception.\n\n Parameters:\n\n QueryLanguage (:term:`string`):\n Name of the query language used in the `Query` parameter, e.g.\n \"DMTF:CQL\" for CIM Query Language, and \"WQL\" for WBEM Query\n Language.\n\n Query (:term:`string`):\n Query string in the query language specified in the `QueryLanguage`\n parameter.\n\n namespace (:term:`string`):\n Name of the CIM namespace to be used (case independent).\n\n Leading and trailing slash characters will be stripped. The lexical\n case will be preserved.\n\n If `None`, the default namespace of the connection object will be\n used.\n\n **extra :\n Additional keyword arguments are passed as additional operation\n parameters to the WBEM server.\n Note that :term:`DSP0200` does not define any additional parameters\n for this operation.\n\n Returns:\n\n A list of :class:`~pywbem.CIMInstance` objects that represents\n the query result.\n\n These instances have their `path` attribute set to identify\n their creation class and the target namespace of the query, but\n they are not addressable instances.\n\n Raises:\n\n Exceptions described in :class:`~pywbem.WBEMConnection`." - }, - { - "code": "def calc_trades(current_contracts, desired_holdings, trade_weights, prices,\n multipliers, **kwargs):\n if not isinstance(trade_weights, dict):\n trade_weights = {\"\": trade_weights}\n generics = []\n for key in trade_weights:\n generics.extend(trade_weights[key].columns)\n if not set(desired_holdings.index).issubset(set(generics)):\n raise ValueError(\"'desired_holdings.index' contains values which \"\n \"cannot be mapped to tradeables.\\n\"\n \"Received: 'desired_holdings.index'\\n {0}\\n\"\n \"Expected in 'trade_weights' set of columns:\\n {1}\\n\"\n .format(sorted(desired_holdings.index),\n sorted(generics)))\n desired_contracts = []\n for root_key in trade_weights:\n gnrc_weights = trade_weights[root_key]\n subset = gnrc_weights.columns.intersection(desired_holdings.index)\n gnrc_des_hlds = desired_holdings.loc[subset]\n gnrc_weights = gnrc_weights.loc[:, subset]\n gnrc_weights = gnrc_weights.loc[~(gnrc_weights == 0).all(axis=1)]\n instr_des_hlds = gnrc_des_hlds * gnrc_weights\n instr_des_hlds = instr_des_hlds.sum(axis=1)\n wprices = prices.loc[instr_des_hlds.index]\n desired_contracts.append(to_contracts(instr_des_hlds, wprices,\n multipliers, **kwargs))\n desired_contracts = pd.concat(desired_contracts, axis=0)\n trades = desired_contracts.subtract(current_contracts, fill_value=0)\n trades = trades.loc[trades != 0]\n trades = trades.sort_index()\n return trades", - "docstring": "Calculate the number of tradeable contracts for rebalancing from a set\n of current contract holdings to a set of desired generic notional holdings\n based on prevailing prices and mapping from generics to tradeable\n instruments. Differences between current holdings and desired holdings\n are treated as 0. Zero trades are dropped.\n\n Parameters\n ----------\n current_contracts: pandas.Series\n Series of current number of contracts held for tradeable instruments.\n Can pass 0 if all holdings are 0.\n desired_holdings: pandas.Series\n Series of desired holdings in base notional currency of generics. Index\n is generic contracts, these should be the same generics as in\n trade_weights.\n trade_weights: pandas.DataFrame or dict\n A pandas.DataFrame of loadings of generic contracts on tradeable\n instruments **for a given date**. The columns refer to generic\n contracts and the index is strings representing instrument names.\n If dict is given keys should be root generic names, e.g. 'CL', and\n values should be pandas.DataFrames of loadings. The union of all\n columns should be a superset of the desired_holdings.index\n prices: pandas.Series\n Series of instrument prices. Index is instrument name and values are\n number of contracts. Extra instrument prices will be ignored.\n multipliers: pandas.Series\n Series of instrument multipliers. Index is instrument name and\n values are the multiplier associated with the contract.\n multipliers.index should be a superset of mapped desired_holdings\n intruments.\n kwargs: key word arguments\n Key word arguments to be passed to to_contracts()\n\n Returns\n -------\n A pandas.Series of instrument contract trades, lexigraphically sorted.\n\n Example\n -------\n >>> import pandas as pd\n >>> import mapping.util as util\n >>> wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],\n ... index=[\"CLX16\", \"CLZ16\", \"CLF17\"],\n ... columns=[\"CL1\", \"CL2\"])\n >>> desired_holdings = pd.Series([200000, -50000], index=[\"CL1\", \"CL2\"])\n >>> current_contracts = pd.Series([0, 1, 0],\n ... index=['CLX16', 'CLZ16', 'CLF17'])\n >>> prices = pd.Series([50.32, 50.41, 50.48],\n ... index=['CLX16', 'CLZ16', 'CLF17'])\n >>> multipliers = pd.Series([100, 100, 100],\n ... index=['CLX16', 'CLZ16', 'CLF17'])\n >>> trades = util.calc_trades(current_contracts, desired_holdings, wts,\n ... prices, multipliers)" - }, - { - "code": "def send_close(self, status=STATUS_NORMAL, reason=six.b(\"\")):\n if status < 0 or status >= ABNF.LENGTH_16:\n raise ValueError(\"code is invalid range\")\n self.connected = False\n self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)", - "docstring": "send close data to the server.\n\n status: status code to send. see STATUS_XXX.\n\n reason: the reason to close. This must be string or bytes." - }, - { - "code": "def get_version(self, service_id, version_number):\n\t\tcontent = self._fetch(\"/service/%s/version/%d\" % (service_id, version_number))\n\t\treturn FastlyVersion(self, content)", - "docstring": "Get the version for a particular service." - }, - { - "code": "def _add(self, name, *args, **kw):\n argname = list(self.argdict)[self._argno]\n if argname != name:\n raise NameError(\n 'Setting argument %s, but it should be %s' % (name, argname))\n self._group.add_argument(*args, **kw)\n self.all_arguments.append((args, kw))\n self.names.append(name)\n self._argno += 1", - "docstring": "Add an argument to the underlying parser and grow the list\n .all_arguments and the set .names" - }, - { - "code": "def calculate_sleep_time(attempt, delay_factor=5.0, randomization_factor=.5, max_delay=120):\n if attempt <= 0:\n return 0\n delay = float(2 ** (attempt - 1)) * float(delay_factor)\n delay = delay * (randomization_factor * random.random() + 1)\n return min(delay, max_delay)", - "docstring": "Calculate the sleep time between retries, in seconds.\n\n Based off of `taskcluster.utils.calculateSleepTime`, but with kwargs instead\n of constant `delay_factor`/`randomization_factor`/`max_delay`. The taskcluster\n function generally slept for less than a second, which didn't always get\n past server issues.\n\n Args:\n attempt (int): the retry attempt number\n delay_factor (float, optional): a multiplier for the delay time. Defaults to 5.\n randomization_factor (float, optional): a randomization multiplier for the\n delay time. Defaults to .5.\n max_delay (float, optional): the max delay to sleep. Defaults to 120 (seconds).\n\n Returns:\n float: the time to sleep, in seconds." - }, - { - "code": "def json(self):\n return json.dumps(OrderedDict([('metadata', self.metadata), ('records', self.records.values())]))", - "docstring": "Return a list of records as a JSON string. Follows the BibJSON convention." - }, - { - "code": "def __get_value(self, field_name):\r\n value = request.values.get(field_name)\r\n if value is None:\r\n if self.json_form_data is None:\r\n value = None\r\n elif field_name in self.json_form_data:\r\n value = self.json_form_data[field_name]\r\n return value", - "docstring": "Get request Json value by field name" - }, - { - "code": "def command(name, nargs=0, complete=None, range=None, count=None, bang=False,\n register=False, sync=False, allow_nested=False, eval=None):\n def dec(f):\n f._nvim_rpc_method_name = 'command:{}'.format(name)\n f._nvim_rpc_sync = sync\n f._nvim_bind = True\n f._nvim_prefix_plugin_path = True\n opts = {}\n if range is not None:\n opts['range'] = '' if range is True else str(range)\n elif count is not None:\n opts['count'] = count\n if bang:\n opts['bang'] = ''\n if register:\n opts['register'] = ''\n if nargs:\n opts['nargs'] = nargs\n if complete:\n opts['complete'] = complete\n if eval:\n opts['eval'] = eval\n if not sync and allow_nested:\n rpc_sync = \"urgent\"\n else:\n rpc_sync = sync\n f._nvim_rpc_spec = {\n 'type': 'command',\n 'name': name,\n 'sync': rpc_sync,\n 'opts': opts\n }\n return f\n return dec", - "docstring": "Tag a function or plugin method as a Nvim command handler." - }, - { - "code": "def path_wo_ns(obj):\n if isinstance(obj, pywbem.CIMInstance):\n path = obj.path.copy()\n elif isinstance(obj, pywbem.CIMInstanceName):\n path = obj.copy()\n else:\n assert False\n path.host = None\n path.namespace = None\n return path", - "docstring": "Return path of an instance or instance path without host or namespace.\n Creates copy of the object so the original is not changed." - }, - { - "code": "def getTotalBulkPrice(self):\n price = self.getBulkPrice()\n vat = self.getVAT()\n price = price and price or 0\n vat = vat and vat or 0\n return float(price) + (float(price) * float(vat)) / 100", - "docstring": "Compute total bulk price" - }, - { - "code": "def dumps(self):\n io = six.StringIO()\n self.dump(io)\n io.seek(0)\n return io.read()", - "docstring": "Dump data to a string.\n\n :rtype: str" - }, - { - "code": "def init_checks_registry():\n mod = inspect.getmodule(register_check)\n for (name, function) in inspect.getmembers(mod, inspect.isfunction):\n register_check(function)", - "docstring": "Register all globally visible functions.\n\n The first argument name is either 'physical_line' or 'logical_line'." - }, - { - "code": "def _place_input_binding(inp_tool, inp_binding, parallel):\n if (parallel in [\"multi-combined\", \"multi-batch\", \"batch-split\", \"batch-parallel\",\n \"batch-merge\", \"batch-single\"] and\n tz.get_in([\"type\", \"type\"], inp_tool) == \"array\"):\n inp_tool[\"type\"][\"inputBinding\"] = inp_binding\n else:\n inp_tool[\"inputBinding\"] = inp_binding\n return inp_tool", - "docstring": "Check nesting of variables to determine where to place the input binding.\n\n We want to allow having multiple files together (like fasta_indices), combined\n with the itemSeparator, but also support having multiple samples where we pass\n things independently." - }, - { - "code": "def checkBim(fileName, minNumber, chromosome):\n nbMarkers = 0\n with open(fileName, 'r') as inputFile:\n for line in inputFile:\n row = line.rstrip(\"\\r\\n\").split(\"\\t\")\n if row[0] == chromosome:\n nbMarkers += 1\n if nbMarkers < minNumber:\n return False\n return True", - "docstring": "Checks the BIM file for chrN markers.\n\n :param fileName:\n :param minNumber:\n :param chromosome:\n\n :type fileName: str\n :type minNumber: int\n :type chromosome: str\n\n :returns: ``True`` if there are at least ``minNumber`` markers on\n chromosome ``chromosome``, ``False`` otherwise." - }, - { - "code": "def welcome(self):\n if not g.user or not g.user.get_id():\n return redirect(appbuilder.get_url_for_login)\n welcome_dashboard_id = (\n db.session\n .query(UserAttribute.welcome_dashboard_id)\n .filter_by(user_id=g.user.get_id())\n .scalar()\n )\n if welcome_dashboard_id:\n return self.dashboard(str(welcome_dashboard_id))\n payload = {\n 'user': bootstrap_user_data(),\n 'common': self.common_bootsrap_payload(),\n }\n return self.render_template(\n 'superset/basic.html',\n entry='welcome',\n title='Superset',\n bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),\n )", - "docstring": "Personalized welcome page" - }, - { - "code": "def get_primitive_type(stmt):\n type_obj = stmt.search_one('type')\n type_name = getattr(type_obj, 'arg', None)\n typedef_obj = getattr(type_obj, 'i_typedef', None)\n if typedef_obj:\n type_name = get_primitive_type(typedef_obj)\n elif type_obj and not check_primitive_type(type_obj):\n raise Exception('%s is not a primitive! Incomplete parse tree?' %\n type_name)\n return type_name", - "docstring": "Recurses through the typedefs and returns\n the most primitive YANG type defined." - }, - { - "code": "def commentdoc(text):\n if not text:\n raise ValueError(\n 'Expected non-empty comment str, got {}'.format(repr(text))\n )\n commentlines = []\n for line in text.splitlines():\n alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))\n starts_with_whitespace = bool(\n WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])\n )\n if starts_with_whitespace:\n prefix = alternating_words_ws[0]\n alternating_words_ws = alternating_words_ws[1:]\n else:\n prefix = NIL\n if len(alternating_words_ws) % 2 == 0:\n alternating_words_ws = alternating_words_ws[:-1]\n for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):\n part, is_ws = tup\n if is_ws:\n alternating_words_ws[idx] = flat_choice(\n when_flat=part,\n when_broken=always_break(\n concat([\n HARDLINE,\n '\n ])\n )\n )\n commentlines.append(\n concat([\n '\n prefix,\n fill(alternating_words_ws)\n ])\n )\n outer = identity\n if len(commentlines) > 1:\n outer = always_break\n return annotate(\n Token.COMMENT_SINGLE,\n outer(concat(intersperse(HARDLINE, commentlines)))\n )", - "docstring": "Returns a Doc representing a comment `text`. `text` is\n treated as words, and any whitespace may be used to break\n the comment to multiple lines." - }, - { - "code": "def get_error_probability(self):\n a = self._observable.get_error_probability()\n b = self._unobservable.get_error_probability()\n return a+(1-a)*b", - "docstring": "This means for the base we are talking about how many errors between 0 and 1 do we attribute to it?\n For the 'unobserved' errors, these can only count when one is adjacent to base\n\n :returns: error probability p(error_observed)+(1-p_error_observed)*error_unobserved\n :rtype: float" - }, - { - "code": "def extract(self, item, article_candidate_list):\n list_text = []\n min_number_words = 15\n for article_candidate in article_candidate_list:\n if article_candidate.text != None:\n list_text.append((article_candidate.text, article_candidate.extractor))\n for text_tuple in list_text:\n if len(text_tuple[0].split()) < min_number_words:\n list_text.remove(text_tuple)\n if len(list_text) == 0:\n return None\n if len(list_text) < 2:\n return list_text[0][0]\n else:\n list_score = []\n for a, b, in itertools.combinations(list_text, 2):\n set_a = set(a[0].split())\n set_b = set(b[0].split())\n symmetric_difference_a_b = set_a ^ set_b\n intersection_a_b = set_a & set_b\n if intersection_a_b == 0:\n intersection_a_b = -1\n score = 1 - ((len(symmetric_difference_a_b)) / (2 * len(intersection_a_b)))\n list_score.append((score, a[1], b[1]))\n best_score = max(list_score, key=lambda item: item[0])\n if \"newspaper\" in best_score:\n return (list(filter(lambda x: x[1] == \"newspaper\", list_text))[0][0])\n else:\n top_candidates = []\n for tuple in list_text:\n if tuple[1] == best_score[1] or tuple[1] == best_score[2]:\n top_candidates.append(tuple)\n if len(top_candidates[0][0]) > len(top_candidates[1][0]):\n return (top_candidates[0][0])\n else:\n return (top_candidates[1][0])", - "docstring": "Compares the extracted texts.\n\n :param item: The corresponding NewscrawlerItem\n :param article_candidate_list: A list, the list of ArticleCandidate-Objects which have been extracted\n :return: A string, the most likely text" - }, - { - "code": "def _oauth_tokengetter(token=None):\n token = session.get(\"oauth\")\n log.debug(\"Token Get: {0}\".format(token))\n return token", - "docstring": "Default function to return the current user oauth token\n from session cookie." - }, - { - "code": "def parse(cls, signed_request, application_secret_key):\n def decode(encoded):\n padding = '=' * (len(encoded) % 4)\n return base64.urlsafe_b64decode(encoded + padding)\n try:\n encoded_signature, encoded_payload = (str(string) for string in signed_request.split('.', 2))\n signature = decode(encoded_signature)\n signed_request_data = json.loads(decode(encoded_payload).decode('utf-8'))\n except (TypeError, ValueError):\n raise SignedRequestError(\"Signed request had a corrupt payload\")\n if signed_request_data.get('algorithm', '').upper() != 'HMAC-SHA256':\n raise SignedRequestError(\"Signed request is using an unknown algorithm\")\n expected_signature = hmac.new(application_secret_key.encode('utf-8'), msg=encoded_payload.encode('utf-8'),\n digestmod=hashlib.sha256).digest()\n if signature != expected_signature:\n raise SignedRequestError(\"Signed request signature mismatch\")\n return signed_request_data", - "docstring": "Parse a signed request, returning a dictionary describing its payload." - }, - { - "code": "def compile_theme(theme_id=None):\n from engineer.processors import convert_less\n from engineer.themes import ThemeManager\n if theme_id is None:\n themes = ThemeManager.themes().values()\n else:\n themes = [ThemeManager.theme(theme_id)]\n with(indent(2)):\n puts(colored.yellow(\"Compiling %s themes.\" % len(themes)))\n for theme in themes:\n theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath()\n puts(colored.cyan(\"Compiling theme %s to %s\" % (theme.id, theme_output_path)))\n with indent(4):\n puts(\"Compiling...\")\n convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id),\n theme_output_path,\n minify=True)\n puts(colored.green(\"Done.\", bold=True))", - "docstring": "Compiles a theme." - }, - { - "code": "def is_product_owner(self, team_id):\n if self.is_super_admin():\n return True\n team_id = uuid.UUID(str(team_id))\n return team_id in self.child_teams_ids", - "docstring": "Ensure the user is a PRODUCT_OWNER." - }, - { - "code": "def get_password_data(\n name=None,\n kwargs=None,\n instance_id=None,\n call=None,\n ):\n if call != 'action':\n raise SaltCloudSystemExit(\n 'The get_password_data action must be called with '\n '-a or --action.'\n )\n if not instance_id:\n instance_id = _get_node(name)['instanceId']\n if kwargs is None:\n kwargs = {}\n if instance_id is None:\n if 'instance_id' in kwargs:\n instance_id = kwargs['instance_id']\n del kwargs['instance_id']\n params = {'Action': 'GetPasswordData',\n 'InstanceId': instance_id}\n ret = {}\n data = aws.query(params,\n return_root=True,\n location=get_location(),\n provider=get_provider(),\n opts=__opts__,\n sigver='4')\n for item in data:\n ret[next(six.iterkeys(item))] = next(six.itervalues(item))\n if not HAS_M2 and not HAS_PYCRYPTO:\n return ret\n if 'key' not in kwargs:\n if 'key_file' in kwargs:\n with salt.utils.files.fopen(kwargs['key_file'], 'r') as kf_:\n kwargs['key'] = salt.utils.stringutils.to_unicode(kf_.read())\n if 'key' in kwargs:\n pwdata = ret.get('passwordData', None)\n if pwdata is not None:\n rsa_key = kwargs['key']\n pwdata = base64.b64decode(pwdata)\n if HAS_M2:\n key = RSA.load_key_string(rsa_key.encode('ascii'))\n password = key.private_decrypt(pwdata, RSA.pkcs1_padding)\n else:\n dsize = Crypto.Hash.SHA.digest_size\n sentinel = Crypto.Random.new().read(15 + dsize)\n key_obj = Crypto.PublicKey.RSA.importKey(rsa_key)\n key_obj = PKCS1_v1_5.new(key_obj)\n password = key_obj.decrypt(pwdata, sentinel)\n ret['password'] = salt.utils.stringutils.to_unicode(password)\n return ret", - "docstring": "Return password data for a Windows instance.\n\n By default only the encrypted password data will be returned. However, if a\n key_file is passed in, then a decrypted password will also be returned.\n\n Note that the key_file references the private key that was used to generate\n the keypair associated with this instance. This private key will _not_ be\n transmitted to Amazon; it is only used internally inside of Salt Cloud to\n decrypt data _after_ it has been received from Amazon.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt-cloud -a get_password_data mymachine\n salt-cloud -a get_password_data mymachine key_file=/root/ec2key.pem\n\n Note: PKCS1_v1_5 was added in PyCrypto 2.5" - }, - { - "code": "def update_app_profile(\n self,\n app_profile,\n update_mask,\n ignore_warnings=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if \"update_app_profile\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"update_app_profile\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.update_app_profile,\n default_retry=self._method_configs[\"UpdateAppProfile\"].retry,\n default_timeout=self._method_configs[\"UpdateAppProfile\"].timeout,\n client_info=self._client_info,\n )\n request = bigtable_instance_admin_pb2.UpdateAppProfileRequest(\n app_profile=app_profile,\n update_mask=update_mask,\n ignore_warnings=ignore_warnings,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"app_profile.name\", app_profile.name)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n operation = self._inner_api_calls[\"update_app_profile\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )\n return google.api_core.operation.from_gapic(\n operation,\n self.transport._operations_client,\n instance_pb2.AppProfile,\n metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata,\n )", - "docstring": "Updates an app profile within an instance.\n\n Example:\n >>> from google.cloud import bigtable_admin_v2\n >>>\n >>> client = bigtable_admin_v2.BigtableInstanceAdminClient()\n >>>\n >>> # TODO: Initialize `app_profile`:\n >>> app_profile = {}\n >>>\n >>> # TODO: Initialize `update_mask`:\n >>> update_mask = {}\n >>>\n >>> response = client.update_app_profile(app_profile, update_mask)\n >>>\n >>> def callback(operation_future):\n ... # Handle result.\n ... result = operation_future.result()\n >>>\n >>> response.add_done_callback(callback)\n >>>\n >>> # Handle metadata.\n >>> metadata = response.metadata()\n\n Args:\n app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile`\n update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced.\n If unset, all fields will be replaced.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask`\n ignore_warnings (bool): If true, ignore safety checks when updating the app profile.\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will not\n be retried.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid." - }, - { - "code": "def load_hdu(self, hdu):\n image = AstroImage.AstroImage(logger=self.logger)\n image.load_hdu(hdu)\n self.set_image(image)", - "docstring": "Load an HDU into the viewer." - }, - { - "code": "def npoints_towards(lon, lat, depth, azimuth, hdist, vdist, npoints):\n assert npoints > 1\n rlon, rlat = numpy.radians(lon), numpy.radians(lat)\n tc = numpy.radians(360 - azimuth)\n hdists = numpy.arange(npoints, dtype=float)\n hdists *= (hdist / EARTH_RADIUS) / (npoints - 1)\n vdists = numpy.arange(npoints, dtype=float)\n vdists *= vdist / (npoints - 1)\n sin_dists = numpy.sin(hdists)\n cos_dists = numpy.cos(hdists)\n sin_lat = numpy.sin(rlat)\n cos_lat = numpy.cos(rlat)\n sin_lats = sin_lat * cos_dists + cos_lat * sin_dists * numpy.cos(tc)\n lats = numpy.degrees(numpy.arcsin(sin_lats))\n dlon = numpy.arctan2(numpy.sin(tc) * sin_dists * cos_lat,\n cos_dists - sin_lat * sin_lats)\n lons = numpy.mod(rlon - dlon + numpy.pi, 2 * numpy.pi) - numpy.pi\n lons = numpy.degrees(lons)\n depths = vdists + depth\n lons[0] = lon\n lats[0] = lat\n depths[0] = depth\n return lons, lats, depths", - "docstring": "Find a list of specified number of points starting from a given one\n along a great circle arc with a given azimuth measured in a given point.\n\n :param float lon, lat, depth:\n Coordinates of a point to start from. The first point in a resulting\n list has these coordinates.\n :param azimuth:\n A direction representing a great circle arc together with a reference\n point.\n :param hdist:\n Horizontal (geodetic) distance from reference point to the last point\n of the resulting list, in km.\n :param vdist:\n Vertical (depth) distance between reference and the last point, in km.\n :param npoints:\n Integer number of points to return. First and last points count,\n so if there have to be two intervals, ``npoints`` should be 3.\n :returns:\n Tuple of three 1d numpy arrays: longitudes, latitudes and depths\n of resulting points respectively.\n\n Implements \"completely general but more complicated algorithm\" from\n http://williams.best.vwh.net/avform.htm#LL" - }, - { - "code": "def _attend_process(self, proc, sleeptime):\n try:\n proc.wait(timeout=sleeptime)\n except psutil.TimeoutExpired:\n return True\n return False", - "docstring": "Waits on a process for a given time to see if it finishes, returns True\n if it's still running after the given time or False as soon as it \n returns.\n\n :param psutil.Popen proc: Process object opened by psutil.Popen()\n :param float sleeptime: Time to wait\n :return bool: True if process is still running; otherwise false" - }, - { - "code": "async def get(self) -> InfoDict:\n if self._seen_kork:\n raise AnalysisComplete()\n info = await self._queue.get()\n if not info:\n self._seen_kork = True\n await self._finished\n raise AnalysisComplete()\n return info", - "docstring": "Waits for the next dictionary of information from the engine and\n returns it.\n\n It might be more convenient to use ``async for info in analysis: ...``.\n\n :raises: :exc:`chess.engine.AnalysisComplete` if the analysis is\n complete (or has been stopped) and all information has been\n consumed. Use :func:`~chess.engine.AnalysisResult.next()` if you\n prefer to get ``None`` instead of an exception." - }, - { - "code": "def _find_guids(guid_string):\n guids = []\n for found_guid in re.finditer(GUID_REGEX, guid_string):\n if found_guid.groups():\n guids.append(found_guid.group(0).strip('{}'))\n return sorted(list(set(guids)))", - "docstring": "Return the set of GUIDs found in guid_string\n\n :param str guid_string:\n String containing zero or more GUIDs. Each GUID may or may not be\n enclosed in {}\n\n Example data (this string contains two distinct GUIDs):\n\n PARENT_SNAPSHOT_ID SNAPSHOT_ID\n {a5b8999f-5d95-4aff-82de-e515b0101b66}\n {a5b8999f-5d95-4aff-82de-e515b0101b66} *{a7345be5-ab66-478c-946e-a6c2caf14909}" - }, - { - "code": "def safe_download(self):\n def _markStreamKeyOver30Seconds(stream):\n self._connection.request(\n 'markStreamKeyOver30Seconds',\n {'streamServerID': stream.ip,\n 'artistID': self.artist.id,\n 'songQueueID': self._connection.session.queue,\n 'songID': self.id,\n 'songQueueSongID': 1,\n 'streamKey': stream.key},\n self._connection.header('markStreamKeyOver30Seconds', 'jsqueue'))\n stream = self.stream\n timer = threading.Timer(30, _markStreamKeyOver30Seconds, [stream])\n timer.start()\n raw = stream.data.read()\n if len(raw) == stream.size:\n timer.cancel()\n self._connection.request(\n 'markSongDownloadedEx',\n {'streamServerID': stream.ip,\n 'songID': self.id,\n 'streamKey': stream.key},\n self._connection.header('markSongDownloadedEx', 'jsqueue'))\n self._connection.request(\n 'removeSongsFromQueue',\n {'userRemoved': True,\n 'songQueueID': self._connection.session.queue,\n 'songQueueSongIDs': [1]},\n self._connection.header('removeSongsFromQueue', 'jsqueue'))\n return raw\n else:\n raise ValueError(\"Content-Length {}, but read {}\"\n .format(stream.size, len(raw)))", - "docstring": "Download a song respecting Grooveshark's API.\n\n :return: The raw song data." - }, - { - "code": "def _openFile(self):\n file_types = \"Comma Separated Values (*.csv);;Text files (*.txt);;All Files (*)\"\n ret = QtGui.QFileDialog.getOpenFileName(self,\n self.tr('open file'),\n filter=file_types)\n if isinstance(ret, tuple):\n ret = ret[0]\n if ret:\n self._filenameLineEdit.setText(ret)\n self._updateFilename()", - "docstring": "Opens a file dialog and sets a value for the QLineEdit widget.\n\n This method is also a `SLOT`." - }, - { - "code": "def _parse_udevadm_info(udev_info):\n devices = []\n dev = {}\n for line in (line.strip() for line in udev_info.splitlines()):\n if line:\n line = line.split(':', 1)\n if len(line) != 2:\n continue\n query, data = line\n if query == 'E':\n if query not in dev:\n dev[query] = {}\n key, val = data.strip().split('=', 1)\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n pass\n dev[query][key] = val\n else:\n if query not in dev:\n dev[query] = []\n dev[query].append(data.strip())\n else:\n if dev:\n devices.append(_normalize_info(dev))\n dev = {}\n if dev:\n _normalize_info(dev)\n devices.append(_normalize_info(dev))\n return devices", - "docstring": "Parse the info returned by udevadm command." - }, - { - "code": "def get(self, request, *args, **kwargs):\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n top_value = self.get_top_level_facet_value()\n subfacets = SEARCH_SUBFACETS.get(top_value, [])\n self.active_facets = [self.top_facet] + subfacets\n if form.is_valid():\n self.query = form.cleaned_data.get(self.search_field)\n else:\n self.query = \"\"\n sqs = self.pre_facet_sqs()\n for facet in self.active_facets:\n sqs = facet.set_on_sqs(sqs)\n facet_counts = sqs.facet_counts()\n for facet in self.active_facets:\n facet.set_values_from_sqs_facet_counts(facet_counts)\n facet.apply_request_and_page_to_values(self.request, self.fluent_page)\n for facet in self.active_facets:\n sqs = facet.narrow_sqs(sqs)\n context = self.get_context_data(**{\n self.form_name: form,\n 'facets': self.active_facets,\n 'top_facet': self.top_facet,\n 'query': self.query,\n 'object_list': sqs,\n 'page': self.fluent_page,\n 'show_placeholders': self.show_placeholders()\n })\n return self.render_to_response(context)", - "docstring": "User has conducted a search, or default state" - }, - { - "code": "def getExpInfo(expnum):\n col_names=['object',\n 'e.expnum',\n 'mjdate',\n 'uttime',\n 'filter',\n 'elongation',\n 'obs_iq_refccd',\n 'triple', 'qso_status']\n sql=\"SELECT \"\n sep=\" \"\n for col_name in col_names:\n sql=sql+sep+col_name\n sep=\",\"\n sql=sql+\" FROM bucket.exposure e \"\n sql=sql+\" JOIN bucket.circumstance c ON e.expnum=c.expnum \"\n sql=sql+\" LEFT JOIN triple_members t ON e.expnum=t.expnum \"\n sql=sql+\" WHERE e.expnum=%d \" % ( expnum ) \n cfeps.execute(sql)\n rows=cfeps.fetchall()\n result={}\n for idx in range(len(rows[0])):\n result[col_names[idx]]=rows[0][idx]\n return(result)", - "docstring": "Return a dictionary of information about a particular exposure" - }, - { - "code": "def is_sorted(self, ranks=None):\n ranks = ranks or self.ranks\n return check_sorted(self, ranks)", - "docstring": "Checks whether the stack is sorted.\n\n :arg dict ranks:\n The rank dict to reference for checking. If ``None``, it will\n default to ``DEFAULT_RANKS``.\n\n :returns:\n Whether or not the cards are sorted." - }, - { - "code": "def hscan_iter(self, name, match=None, count=10):\n cursor = '0'\n while cursor != 0:\n cursor, data = self.hscan(name, cursor=cursor,\n match=match, count=count)\n for item in data.items():\n yield item", - "docstring": "Emulate hscan_iter." - }, - { - "code": "def canonicalize(message):\n if message.is_multipart() \\\n or message.get('Content-Transfer-Encoding') != 'binary':\n return mime_to_bytes(message, 0).replace(\n b'\\r\\n', b'\\n').replace(b'\\r', b'\\n').replace(b'\\n', b'\\r\\n')\n else:\n message_header = ''\n message_body = message.get_payload(decode=True)\n for k, v in message.items():\n message_header += '{}: {}\\r\\n'.format(k, v)\n message_header += '\\r\\n'\n return message_header.encode('utf-8') + message_body", - "docstring": "Function to convert an email Message to standard format string\n\n :param message: email.Message to be converted to standard string\n :return: the standard representation of the email message in bytes" - }, - { - "code": "def worker(module_path,\n python_version,\n operator_name,\n occurrence,\n test_command,\n timeout):\n try:\n operator_class = cosmic_ray.plugins.get_operator(operator_name)\n operator = operator_class(python_version)\n with cosmic_ray.mutating.use_mutation(module_path, operator,\n occurrence) as (original_code,\n mutated_code):\n if mutated_code is None:\n return WorkResult(worker_outcome=WorkerOutcome.NO_TEST)\n test_outcome, output = run_tests(test_command, timeout)\n diff = _make_diff(original_code, mutated_code, module_path)\n return WorkResult(\n output=output,\n diff='\\n'.join(diff),\n test_outcome=test_outcome,\n worker_outcome=WorkerOutcome.NORMAL)\n except Exception:\n return WorkResult(\n output=traceback.format_exc(),\n test_outcome=TestOutcome.INCOMPETENT,\n worker_outcome=WorkerOutcome.EXCEPTION)", - "docstring": "Mutate the OCCURRENCE-th site for OPERATOR_NAME in MODULE_PATH, run the\n tests, and report the results.\n\n This is fundamentally the single-mutation-and-test-run process\n implementation.\n\n There are three high-level ways that a worker can finish. First, it could\n fail exceptionally, meaning that some uncaught exception made its way from\n some part of the operation to terminate the function. This function will\n intercept all exceptions and return it in a non-exceptional structure.\n\n Second, the mutation testing machinery may determine that there is no\n OCCURENCE-th instance for OPERATOR_NAME in the module under test. In this\n case there is no way to report a test result (i.e. killed, survived, or\n incompetent) so a special value is returned indicating that no mutation is\n possible.\n\n Finally, and hopefully normally, the worker will find that it can run a\n test. It will do so and report back the result - killed, survived, or\n incompetent - in a structured way.\n\n Args:\n module_name: The path to the module to mutate\n python_version: The version of Python to use when interpreting the code in `module_path`.\n A string of the form \"MAJOR.MINOR\", e.g. \"3.6\" for Python 3.6.x.\n operator_name: The name of the operator plugin to use\n occurrence: The occurrence of the operator to apply\n test_command: The command to execute to run the tests\n timeout: The maximum amount of time (seconds) to let the tests run\n\n Returns: A WorkResult\n\n Raises: This will generally not raise any exceptions. Rather, exceptions\n will be reported using the 'exception' result-type in the return value." - }, - { - "code": "def weld_iloc_indices_with_missing(array, weld_type, indices):\n weld_obj = create_empty_weld_object()\n weld_obj_id_array = get_weld_obj_id(weld_obj, array)\n weld_obj_id_indices = get_weld_obj_id(weld_obj, indices)\n missing_literal = default_missing_data_literal(weld_type)\n if weld_type == WeldVec(WeldChar()):\n missing_literal = get_weld_obj_id(weld_obj, missing_literal)\n weld_template =\n weld_obj.weld_code = weld_template.format(array=weld_obj_id_array,\n indices=weld_obj_id_indices,\n type=weld_type,\n missing=missing_literal)\n return weld_obj", - "docstring": "Retrieve the values at indices. Indices greater than array length get replaced with\n a corresponding-type missing value literal.\n\n Parameters\n ----------\n array : numpy.ndarray or WeldObject\n Input data. Assumed to be bool data.\n weld_type : WeldType\n The WeldType of the array data.\n indices : numpy.ndarray or WeldObject\n The indices to lookup.\n\n Returns\n -------\n WeldObject\n Representation of this computation." - }, - { - "code": "def close(self):\n\t\tif not self.connected:\n\t\t\treturn\n\t\tself.connected = False\n\t\tif self.handler.wfile.closed:\n\t\t\treturn\n\t\tif select.select([], [self.handler.wfile], [], 0)[1]:\n\t\t\twith self.lock:\n\t\t\t\tself.handler.wfile.write(b'\\x88\\x00')\n\t\tself.handler.wfile.flush()\n\t\tself.on_closed()", - "docstring": "Close the web socket connection and stop processing results. If the\n\t\tconnection is still open, a WebSocket close message will be sent to the\n\t\tpeer." - }, - { - "code": "def read_stack_qwords(self, count, offset = 0):\n stackData = self.read_stack_data(count * 8, offset)\n return struct.unpack('<'+('Q'*count), stackData)", - "docstring": "Reads QWORDs from the top of the stack.\n\n @type count: int\n @param count: Number of QWORDs to read.\n\n @type offset: int\n @param offset: Offset from the stack pointer to begin reading.\n\n @rtype: tuple( int... )\n @return: Tuple of integers read from the stack.\n\n @raise WindowsError: Could not read the requested data." - }, - { - "code": "def vector_unit_nullnull(v):\n if v.size == 0:\n return v\n mag = vector_mag(v)\n v_new = v.copy()\n v_new[mag > 0.0] /= mag[mag > 0.0][..., np.newaxis]\n return v_new", - "docstring": "Return unit vectors.\n Any null vectors remain null vectors.\n\n Parameters\n ----------\n v: array, shape (a1, a2, ..., d)\n Cartesian vectors, with last axis indexing the dimension.\n\n Returns\n -------\n v_new: array, shape of v" - }, - { - "code": "def generate_image(self, chars):\n background = random_color(238, 255)\n color = random_color(10, 200, random.randint(220, 255))\n im = self.create_captcha_image(chars, color, background)\n self.create_noise_dots(im, color)\n self.create_noise_curve(im, color)\n im = im.filter(ImageFilter.SMOOTH)\n return im", - "docstring": "Generate the image of the given characters.\n\n :param chars: text to be generated." - }, - { - "code": "def ds_cT(ds, x, y, xy_srs=wgs_srs):\n ds_srs = get_ds_srs(ds)\n mX = x\n mY = y\n if xy_srs is not None:\n if not ds_srs.IsSame(xy_srs):\n mX, mY, mZ = cT_helper(x, y, 0, xy_srs, ds_srs)\n return mX, mY", - "docstring": "Convert input point coordinates to map coordinates that match input dataset" - }, - { - "code": "def fix_bam_header(job, bamfile, sample_type, univ_options, samtools_options, retained_chroms=None):\n if retained_chroms is None:\n retained_chroms = []\n work_dir = os.getcwd()\n input_files = {\n sample_type + '.bam': bamfile}\n input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)\n parameters = ['view',\n '-H',\n input_files[sample_type + '.bam']]\n with open('/'.join([work_dir, sample_type + '_input_bam.header']), 'w') as headerfile:\n docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,\n dockerhub=univ_options['dockerhub'], outfile=headerfile,\n tool_version=samtools_options['version'])\n with open(headerfile.name, 'r') as headerfile, \\\n open('/'.join([work_dir, sample_type + '_output_bam.header']), 'w') as outheaderfile:\n for line in headerfile:\n if line.startswith('@PG'):\n line = '\\t'.join([x for x in line.strip().split('\\t') if not x.startswith('CL')])\n if retained_chroms and line.startswith('@SQ'):\n if line.strip().split()[1].lstrip('SN:') not in retained_chroms:\n continue\n print(line.strip(), file=outheaderfile)\n parameters = ['reheader',\n docker_path(outheaderfile.name),\n input_files[sample_type + '.bam']]\n with open('/'.join([work_dir, sample_type + '_fixPG.bam']), 'w') as fixpg_bamfile:\n docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,\n dockerhub=univ_options['dockerhub'], outfile=fixpg_bamfile,\n tool_version=samtools_options['version'])\n output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)\n job.fileStore.deleteGlobalFile(bamfile)\n job.fileStore.logToMaster('Ran reheader on %s:%s successfully'\n % (univ_options['patient'], sample_type))\n return output_file", - "docstring": "Fix the bam header to remove the command line call. Failing to do this causes Picard to reject\n the bam.\n\n :param dict bamfile: The input bam file\n :param str sample_type: Description of the sample to inject into the filename\n :param dict univ_options: Dict of universal options used by almost all tools\n :param dict samtools_options: Options specific to samtools\n :param list retained_chroms: A list of chromosomes to retain\n :return: fsID for the output bam\n :rtype: toil.fileStore.FileID" - }, - { - "code": "def set_state_vector(self, state: Union[int, np.ndarray]):\n self._stepper.reset_state(state)", - "docstring": "Updates the state of the simulator to the given new state.\n\n Args:\n state: If this is an int, then this is the state to reset\n the stepper to, expressed as an integer of the computational basis.\n Integer to bitwise indices is little endian. Otherwise if this is\n a np.ndarray this must be the correct size and have dtype of\n np.complex64.\n\n Raises:\n ValueError if the state is incorrectly sized or not of the correct\n dtype." - }, - { - "code": "def device_id(self):\n if self.is_block:\n for filename in self._P.Block.Symlinks:\n parts = decode_ay(filename).split('/')\n if parts[-2] == 'by-id':\n return parts[-1]\n elif self.is_drive:\n return self._assocdrive._P.Drive.Id\n return ''", - "docstring": "Return a unique and persistent identifier for the device.\n\n This is the basename (last path component) of the symlink in\n `/dev/disk/by-id/`." - }, - { - "code": "def _location(self, obj):\n field_name = self.clean_id_name\n return self.request.route_url(\n self._resource.uid,\n **{self._resource.id_name: getattr(obj, field_name)})", - "docstring": "Get location of the `obj`\n\n Arguments:\n :obj: self.Model instance." - }, - { - "code": "def calc_mass(nu_max, delta_nu, teff):\n NU_MAX = 3140.0\n DELTA_NU = 135.03\n TEFF = 5777.0\n return (nu_max/NU_MAX)**3 * (delta_nu/DELTA_NU)**(-4) * (teff/TEFF)**1.5", - "docstring": "asteroseismic scaling relations" - }, - { - "code": "def delete_where_user_id(cls, user_id):\r\n result = cls.where_user_id(user_id)\r\n if result is None:\r\n return None\r\n result.delete()\r\n return True", - "docstring": "delete by email" - }, - { - "code": "def make_error_redirect(self, authorization_error=None):\n if not self.redirect_uri:\n return HttpResponseRedirect(self.missing_redirect_uri)\n authorization_error = (authorization_error or\n AccessDenied('user denied the request'))\n response_params = get_error_details(authorization_error)\n if self.state is not None:\n response_params['state'] = self.state\n return HttpResponseRedirect(\n update_parameters(self.redirect_uri, response_params))", - "docstring": "Return a Django ``HttpResponseRedirect`` describing the request failure.\n\n If the :py:meth:`validate` method raises an error, the authorization\n endpoint should return the result of calling this method like so:\n\n >>> auth_code_generator = (\n >>> AuthorizationCodeGenerator('/oauth2/missing_redirect_uri/'))\n >>> try:\n >>> auth_code_generator.validate(request)\n >>> except AuthorizationError as authorization_error:\n >>> return auth_code_generator.make_error_redirect(authorization_error)\n\n If there is no known Client ``redirect_uri`` (because it is malformed, or\n the Client is invalid, or if the supplied ``redirect_uri`` does not match\n the regsitered value, or some other request failure) then the response will\n redirect to the ``missing_redirect_uri`` passed to the :py:meth:`__init__`\n method.\n\n Also used to signify user denial; call this method without passing in the\n optional ``authorization_error`` argument to return a generic\n :py:class:`AccessDenied` message.\n\n >>> if not user_accepted_request:\n >>> return auth_code_generator.make_error_redirect()" - }, - { - "code": "def tictactoe(w, i, player, opponent, grid=None):\n \"Put two strategies to a classic battle of wits.\"\n grid = grid or empty_grid\n while True:\n w.render_to_terminal(w.array_from_text(view(grid)))\n if is_won(grid):\n print(whose_move(grid), \"wins.\")\n break\n if not successors(grid):\n print(\"A draw.\")\n break\n grid = player(w, i, grid)\n player, opponent = opponent, player", - "docstring": "Put two strategies to a classic battle of wits." - }, - { - "code": "def wr_xlsx(self, fout_xlsx=\"gos_depth01.xlsx\", **kws):\n data_nts = self.get_d1nts()\n if 'fld2col_widths' not in kws:\n kws['fld2col_widths'] = {'D1': 6, 'NS':3, 'depth': 5, 'GO': 12, 'name': 40}\n if 'hdrs' not in kws:\n kws['hdrs'] = self.hdrs\n wr_xlsx_tbl(fout_xlsx, data_nts, **kws)", - "docstring": "Write xlsx table of depth-01 GO terms and their letter representation." - }, - { - "code": "def total_msg_recv(self):\n return (self.get_count(PeerCounterNames.RECV_UPDATES) +\n self.get_count(PeerCounterNames.RECV_REFRESH) +\n self.get_count(PeerCounterNames.RECV_NOTIFICATION))", - "docstring": "Returns total number of UPDATE, NOTIFICATION and ROUTE_REFRESH\n messages received from this peer." - }, - { - "code": "def read(self):\n\t\twith open(self.default_file) as json_file:\n\t\t\ttry:\n\t\t\t\treturn json.load(json_file)\n\t\t\texcept Exception as e:\n\t\t\t\traise 'empty file'", - "docstring": "read default csp settings from json file" - }, - { - "code": "def extend_request_args(self, args, item_cls, item_type, key,\n parameters, orig=False):\n try:\n item = self.get_item(item_cls, item_type, key)\n except KeyError:\n pass\n else:\n for parameter in parameters:\n if orig:\n try:\n args[parameter] = item[parameter]\n except KeyError:\n pass\n else:\n try:\n args[parameter] = item[verified_claim_name(parameter)]\n except KeyError:\n try:\n args[parameter] = item[parameter]\n except KeyError:\n pass\n return args", - "docstring": "Add a set of parameters and their value to a set of request arguments.\n\n :param args: A dictionary\n :param item_cls: The :py:class:`oidcmsg.message.Message` subclass\n that describes the item\n :param item_type: The type of item, this is one of the parameter\n names in the :py:class:`oidcservice.state_interface.State` class.\n :param key: The key to the information in the database\n :param parameters: A list of parameters who's values this method\n will return.\n :param orig: Where the value of a claim is a signed JWT return\n that.\n :return: A dictionary with keys from the list of parameters and\n values being the values of those parameters in the item.\n If the parameter does not a appear in the item it will not appear\n in the returned dictionary." - }, - { - "code": "def copy(self):\n other = PackageFilterList.__new__(PackageFilterList)\n other.filters = [x.copy() for x in self.filters]\n return other", - "docstring": "Return a copy of the filter list.\n\n Adding rules to the copy will not alter the source." - }, - { - "code": "def send_file(fd, filename=None, size=None, timestamp=None, ctype=None,\n charset=CHARSET, attachment=False, wrapper=DEFAULT_WRAPPER):\n if not hasattr(fd, 'read'):\n raise ValueError(\"Object '{}' has no read() method\".format(fd))\n headers = {}\n status = 200\n if not ctype and filename is not None:\n ctype, enc = mimetypes.guess_type(filename)\n if enc:\n headers['Content-Encoding'] = enc\n if ctype:\n if ctype.startswith('text/'):\n ctype += '; charset=%s' % charset\n headers['Content-Type'] = ctype\n if size:\n headers['Content-Length'] = size\n headers['Accept-Ranges'] = 'bytes'\n if timestamp:\n headers['Last-Modified'] = format_ts(timestamp)\n modsince = request.environ.get('HTTP_IF_MODIFIED_SINCE')\n print(modsince)\n modsince = modsince and parse_date(modsince.split(';')[0].strip())\n if modsince is not None and modsince >= timestamp:\n headers['Date'] = format_ts()\n return HTTPResponse(status=304, **headers)\n if attachment and filename:\n headers['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n if request.method == 'HEAD':\n fd = ''\n ranges = request.environ.get('HTTP_RANGE')\n if size and ranges:\n ranges = list(parse_range_header(ranges, size))\n if not ranges:\n return HTTPError(416, 'Request Range Not Satisfiable')\n start, end = ranges[0]\n headers['Content-Range'] = 'bytes %d-%d/%d' % (start, end - 1, size)\n length = end - start\n headers['Content-Length'] = str(length)\n fd = wrapper(fd, start, length)\n status = 206\n return HTTPResponse(fd, status=status, **headers)", - "docstring": "Send a file represented by file object\n\n This function constcuts a HTTPResponse object that uses a file descriptor\n as response body. The file descriptor is suppled as ``fd`` argument and it\n must have a ``read()`` method. ``ValueError`` is raised when this is not\n the case. It supports `byte serving`_ using Range header, and makes the\n best effort to set all appropriate headers. It also supports HEAD queries.\n\n Because we are dealing with file descriptors and not physical files, the\n user must also supply the file metadata such as filename, size, and\n timestamp.\n\n The ``filename`` argument is an arbitrary filename. It is used to guess the\n content type, and also to set the content disposition in case of\n attachments.\n\n The ``size`` argument is the payload size in bytes. If it is omitted, the\n content length header is not set, and byte serving does not work.\n\n The ``timestamp`` argument is the number of seconds since Unix epoch when\n the file was created or last modified. If this argument is omitted,\n If-Modified-Since request headers cannot be honored.\n\n To explicitly specify the content type, the ``ctype`` argument can be used.\n This should be a valid MIME type of the payload.\n\n Default encoding (used as charset parameter in Content-Type header) is\n 'UTF-8'. This can be overridden by using the ``charset`` argument.\n\n The ``attachment`` argumnet can be set to ``True`` to add the\n Content-Dispositon response header. Value of the header is then set to the\n filename.\n\n The ``wrapper`` argument is used to wrap the file descriptor when doing\n byte serving. The default is to use ``fdsend.rangewrapper.RangeWrapper``\n class, but there are alternatives as ``fdsend.rangewrapper.range_iter`` and\n ``bottle._file_iter_range``. The wrappers provided by this package are\n written to specifically handle file handles that do not have a ``seek()``\n method. If this is not your case, you may safely use the bottle's wrapper.\n\n The primary difference between ``fdsend.rangewrapper.RangeWrapper`` and\n ``fdsend.rangewrapper.range_iter`` is that the former returns a file-like\n object with ``read()`` method, which may or may not increase performance\n when used on a WSGI server that supports ``wsgi.file_wrapper`` feature. The\n latter returns an iterator and the response is returned as is without the\n use of a ``file_wrapper``. This may have some benefits when it comes to\n memory usage.\n\n Benchmarking and profiling is the best way to determine which wrapper you\n want to use, or you need to implement your own.\n\n To implement your own wrapper, you need to create a callable or a class\n that takes the following arguments:\n\n - file descriptor\n - offset (in bytes from start of the file)\n - length (total number of bytes in the range)\n\n The return value of the wrapper must be either an iterable or file-like\n object that implements ``read()`` and ``close()`` methods with the usual\n semantics.\n\n The code is partly based on ``bottle.static_file``.\n\n .. _byte serving: https://tools.ietf.org/html/rfc2616#page-138" - }, - { - "code": "def set_one_freq(self, f_ghz):\n if not (f_ghz >= 0):\n raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))\n self.in_vals[IN_VAL_NFREQ] = 1\n self.in_vals[IN_VAL_FREQ0] = f_ghz * 1e9\n self.in_vals[IN_VAL_LOGDFREQ] = 1.0\n return self", - "docstring": "Set the code to calculate results at just one frequency.\n\n **Call signature**\n\n *f_ghz*\n The frequency to sample, in GHz.\n Returns\n *self* for convenience in chaining." - }, - { - "code": "def set_nodes_aggregation_flag(self, peak_current_branch_max):\n for lv_load_area in self.grid_district.lv_load_areas():\n peak_current_node = (lv_load_area.peak_load / (3**0.5) / self.v_level)\n if peak_current_node > peak_current_branch_max:\n lv_load_area.is_aggregated = True\n self.grid_district.add_aggregated_peak_demand()", - "docstring": "Set Load Areas with too high demand to aggregated type.\n\n Args\n ----\n peak_current_branch_max: float\n Max. allowed current for line/cable" - }, - { - "code": "def every(predicate, *iterables):\n r\n try:\n if len(iterables) == 1: ifilterfalse(predicate, iterables[0]).next()\n else: ifilterfalse(bool, starmap(predicate, izip(*iterables))).next()\n except StopIteration: return True\n else: return False", - "docstring": "r\"\"\"Like `some`, but only returns `True` if all the elements of `iterables`\n satisfy `predicate`.\n\n Examples:\n >>> every(bool, [])\n True\n >>> every(bool, [0])\n False\n >>> every(bool, [1,1])\n True\n >>> every(operator.eq, [1,2,3],[1,2])\n True\n >>> every(operator.eq, [1,2,3],[0,2])\n False" - }, - { - "code": "def release_filename(self, id_):\n entry = self.__entries.get(id_)\n if entry is None:\n raise ValueError(\"Invalid filename id (%d)\" % id_)\n if entry.dec_ref_count() == 0:\n del self.__entries[id_]\n del self.__id_lut[entry.filename]", - "docstring": "Release a file name." - }, - { - "code": "def python_data(self):\n try:\n value = self.clean_value\n except LookupError:\n value = self.get_initial()\n return self.from_python(value)", - "docstring": "Representation of aggregate value as dictionary." - }, - { - "code": "def setUpImports(self):\n i = self.imports\n print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD'\n print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct'\n module = self.getTypesModuleName()\n package = self.getTypesModulePath()\n if package:\n module = '%s.%s' %(package, module)\n print >>i, 'from %s import *' %(module)\n print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name)", - "docstring": "set import statements" - }, - { - "code": "def stack_nested_keys(nested_dict, key=(), depth=-1):\n if depth != 0 and hasattr(nested_dict, 'items'):\n for k, v in nested_dict.items():\n yield from stack_nested_keys(v, key=key + (k,), depth=depth - 1)\n else:\n yield key, nested_dict", - "docstring": "Stacks the keys of nested-dictionaries into tuples and yields a list of\n k-v pairs.\n\n :param nested_dict:\n Nested dictionary.\n :type nested_dict: dict\n\n :param key:\n Initial keys.\n :type key: tuple, optional\n\n :param depth:\n Maximum keys depth.\n :type depth: int, optional\n\n :return:\n List of k-v pairs.\n :rtype: generator" - }, - { - "code": "def request_and_get_output(self, table, outtype, outfn):\n job_id = self.request_output(table, outtype)\n status = self.monitor(job_id)\n if status[0] != 5:\n raise Exception(\"Output request failed.\")\n self.get_output(job_id, outfn)", - "docstring": "Shorthand for requesting an output file and then downloading it when\n ready.\n\n ## Arguments\n\n * `table` (str): The name of the table to export.\n * `outtype` (str): The type of output. Must be one of:\n CSV - Comma Seperated Values\n DataSet - XML DataSet\n FITS - Flexible Image Transfer System (FITS Binary)\n VOTable - XML Virtual Observatory VOTABLE\n * `outfn` (str): The file where the output should be stored.\n May also be a file-like object with a 'write' method." - }, - { - "code": "def fixup_comments(self):\n style_base = self.rawdata.style_base\n comment_text_indexes = np.asarray(list(self.rawdata.extra.comments.keys()), dtype=np.uint32)\n comment_mask = self.get_style_mask(comment=True)\n has_comments = np.where(style_base & comment_bit_mask > 0)[0]\n both = np.intersect1d(comment_text_indexes, has_comments)\n log.info(\"fixup comments: %d correctly marked, %d without style, %d empty text\" % (np.alen(both), np.alen(comment_text_indexes) - np.alen(both), np.alen(has_comments) - np.alen(both)))\n style_base &= comment_mask\n comment_style = self.get_style_bits(comment=True)\n style_base[comment_text_indexes] |= comment_style", - "docstring": "Remove any style bytes that are marked as commented but have no\n comment, and add any style bytes where there's a comment but it isn't\n marked in the style data.\n\n This happens on the base data, so only need to do this on one segment\n that uses this base data." - }, - { - "code": "def meta_contains_article_keyword(self, response, site_dict):\n contains_meta = response.xpath('//meta') \\\n .re('(= ?[\"\\'][^\"\\']*article[^\"\\']*[\"\\'])')\n if not contains_meta:\n return False\n return True", - "docstring": "Determines wether the response's meta data contains the keyword\n 'article'\n\n :param obj response: The scrapy response\n :param dict site_dict: The site object from the JSON-File\n\n :return bool: Determines wether the reponse's meta data contains the\n keyword 'article'" - }, - { - "code": "def com_daltonmaag_check_ufolint(font):\n import subprocess\n ufolint_cmd = [\"ufolint\", font]\n try:\n subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n yield FAIL, (\"ufolint failed the UFO source. Output follows :\"\n \"\\n\\n{}\\n\").format(e.output.decode())\n except OSError:\n yield ERROR, \"ufolint is not available!\"\n else:\n yield PASS, \"ufolint passed the UFO source.\"", - "docstring": "Run ufolint on UFO source directory." - }, - { - "code": "def build_base_document(dom, return_fragment=True):\n body_element = dom.find(\".//body\")\n if body_element is None:\n fragment = fragment_fromstring('
')\n fragment.append(dom)\n else:\n body_element.tag = \"div\"\n body_element.set(\"id\", \"readabilityBody\")\n fragment = body_element\n return document_from_fragment(fragment, return_fragment)", - "docstring": "Builds a base document with the body as root.\n\n :param dom: Parsed lxml tree (Document Object Model).\n :param bool return_fragment: If True only
fragment is returned.\n Otherwise full HTML document is returned." - }, - { - "code": "def async_get_ac_states(self, uid, limit=1, offset=0, fields='*'):\n return (yield from self._get('/pods/{}/acStates'.format(uid),\n limit=limit,\n fields=fields,\n offset=offset))", - "docstring": "Get log entries of a device." - }, - { - "code": "def get_zero_task_agent(generators, market, nOffer, maxSteps):\n env = pyreto.discrete.MarketEnvironment(generators, market, nOffer)\n task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)\n agent = pyreto.util.ZeroAgent(env.outdim, env.indim)\n return task, agent", - "docstring": "Returns a task-agent tuple whose action is always zero." - }, - { - "code": "def to_json(self, path=None, html_out=False,\n html_path='vega_template.html', validate=False,\n pretty_print=True):\n if validate:\n self.validate()\n if pretty_print:\n dumps_args = {'indent': 2, 'separators': (',', ': ')}\n else:\n dumps_args = {}\n def encoder(obj):\n if hasattr(obj, 'grammar'):\n return obj.grammar\n if html_out:\n template = Template(\n str(resource_string('vincent', 'vega_template.html')))\n with open(html_path, 'w') as f:\n f.write(template.substitute(path=path))\n if path:\n with open(path, 'w') as f:\n json.dump(self.grammar, f, default=encoder, sort_keys=True,\n **dumps_args)\n else:\n return json.dumps(self.grammar, default=encoder, sort_keys=True,\n **dumps_args)", - "docstring": "Convert object to JSON\n\n Parameters\n ----------\n path: string, default None\n Path to write JSON out. If there is no path provided, JSON\n will be returned as a string to the console.\n html_out: boolean, default False\n If True, vincent will output an simple HTML scaffold to\n visualize the vega json output.\n html_path: string, default 'vega_template.html'\n Path for the html file (if html_out=True)\n validate : boolean\n If True, call the object's `validate` method before\n serializing. Default is False.\n pretty_print : boolean\n If True (default), JSON is printed in more-readable form with\n indentation and spaces.\n\n Returns\n -------\n string\n JSON serialization of the class's grammar properties." - }, - { - "code": "def get_objective_search_session_for_objective_bank(self, objective_bank_id=None):\n if not objective_bank_id:\n raise NullArgument\n if not self.supports_objective_search():\n raise Unimplemented()\n try:\n from . import sessions\n except ImportError:\n raise OperationFailed()\n try:\n session = sessions.ObjectiveSearchSession(objective_bank_id, runtime=self._runtime)\n except AttributeError:\n raise OperationFailed()\n return session", - "docstring": "Gets the OsidSession associated with the objective search\n service for the given objective bank.\n\n arg: objectiveBankId (osid.id.Id): the Id of the objective\n bank\n return: (osid.learning.ObjectiveSearchSession) - an\n ObjectiveSearchSession\n raise: NotFound - objectiveBankId not found\n raise: NullArgument - objectiveBankId is null\n raise: OperationFailed - unable to complete request\n raise: Unimplemented - supports_objective_search() or\n supports_visible_federation() is false\n compliance: optional - This method must be implemented if\n supports_objective_search() and\n supports_visible_federation() are true." - }, - { - "code": "def _get_date_tuple(self, mediafile):\n datestring = super(DateField, self).__get__(mediafile, None)\n if isinstance(datestring, six.string_types):\n datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))\n items = re.split('[-/]', six.text_type(datestring))\n else:\n items = []\n items = items[:3]\n if len(items) < 3:\n items += [None] * (3 - len(items))\n if not items[0] and hasattr(self, '_year_field'):\n items[0] = self._year_field.__get__(mediafile)\n items_ = []\n for item in items:\n try:\n items_.append(int(item))\n except (TypeError, ValueError):\n items_.append(None)\n return items_", - "docstring": "Get a 3-item sequence representing the date consisting of a\n year, month, and day number. Each number is either an integer or\n None." - }, - { - "code": "def remove_empty_cols(records):\n records = list(records)\n seqstrs = [str(rec.seq) for rec in records]\n clean_cols = [col\n for col in zip(*seqstrs)\n if not all(c == '-' for c in col)]\n clean_seqs = [''.join(row)\n for row in zip(*clean_cols)]\n for rec, clean_seq in zip(records, clean_seqs):\n yield SeqRecord(Seq(clean_seq, rec.seq.alphabet), id=rec.id,\n name=rec.name, description=rec.description,\n dbxrefs=rec.dbxrefs, features=rec.features,\n annotations=rec.annotations,\n letter_annotations=rec.letter_annotations)", - "docstring": "Remove all-gap columns from aligned SeqRecords." - }, - { - "code": "def structparser(token):\n m = STRUCT_PACK_RE.match(token)\n if not m:\n return [token]\n else:\n endian = m.group('endian')\n if endian is None:\n return [token]\n formatlist = re.findall(STRUCT_SPLIT_RE, m.group('fmt'))\n fmt = ''.join([f[-1] * int(f[:-1]) if len(f) != 1 else\n f for f in formatlist])\n if endian == '@':\n if byteorder == 'little':\n endian = '<'\n else:\n assert byteorder == 'big'\n endian = '>'\n if endian == '<':\n tokens = [REPLACEMENTS_LE[c] for c in fmt]\n else:\n assert endian == '>'\n tokens = [REPLACEMENTS_BE[c] for c in fmt]\n return tokens", - "docstring": "Parse struct-like format string token into sub-token list." - }, - { - "code": "def decompress_step(source, hparams, first_relu, name):\n with tf.variable_scope(name):\n shape = common_layers.shape_list(source)\n multiplier = 2\n kernel = (1, 1)\n thicker = common_layers.conv_block(\n source,\n hparams.hidden_size * multiplier, [((1, 1), kernel)],\n first_relu=first_relu,\n name=\"decompress_conv\")\n return tf.reshape(thicker, [shape[0], shape[1] * 2, 1, hparams.hidden_size])", - "docstring": "Decompression function." - }, - { - "code": "def set_outflow_BC(self, pores, mode='merge'):\n r\n mode = self._parse_mode(mode, allowed=['merge', 'overwrite', 'remove'],\n single=True)\n pores = self._parse_indices(pores)\n network = self.project.network\n phase = self.project.phases()[self.settings['phase']]\n throats = network.find_neighbor_throats(pores=pores)\n C12 = network['throat.conns'][throats]\n P12 = phase[self.settings['pressure']][C12]\n gh = phase[self.settings['hydraulic_conductance']][throats]\n Q12 = -gh * np.diff(P12, axis=1).squeeze()\n Qp = np.zeros(self.Np)\n np.add.at(Qp, C12[:, 0], -Q12)\n np.add.at(Qp, C12[:, 1], Q12)\n if ('pore.bc_outflow' not in self.keys()) or (mode == 'overwrite'):\n self['pore.bc_outflow'] = np.nan\n self['pore.bc_outflow'][pores] = Qp[pores]", - "docstring": "r\"\"\"\n Adds outflow boundary condition to the selected pores.\n\n Outflow condition simply means that the gradient of the solved\n quantity does not change, i.e. is 0." - }, - { - "code": "def normalize_init_values(cls, release, species, server):\n release = check_release_number(release)\n species = check_species_object(species)\n return (release, species, server)", - "docstring": "Normalizes the arguments which uniquely specify an EnsemblRelease\n genome." - }, - { - "code": "async def process_updates(self, updates, fast: typing.Optional[bool] = True):\n if fast:\n tasks = []\n for update in updates:\n tasks.append(self.updates_handler.notify(update))\n return await asyncio.gather(*tasks)\n results = []\n for update in updates:\n results.append(await self.updates_handler.notify(update))\n return results", - "docstring": "Process list of updates\n\n :param updates:\n :param fast:\n :return:" - }, - { - "code": "def parse_detail(self, response):\n product_data = {\n 'url': response.url,\n 'name': response.css('div.page-title h1::text').extract_first(),\n }\n inventory_number = re.search(\n r'(?P\\d+)$',\n response.css('span.product-in::text').extract_first(),\n ).group('inv_num')\n product_data.update({'in': inventory_number})\n specs_table = response.css('\n for row in specs_table.css('div.spec-row'):\n keys = row.css('dt::text').extract()\n values = row.css('dd::text').extract()\n product_data.update({\n key: value\n for (key, value) in zip(keys, values)\n })\n self.logger.info(product_data['name'])\n yield product_data", - "docstring": "Parse individual product's detail" - }, - { - "code": "def date_time_this_year(\n self,\n before_now=True,\n after_now=False,\n tzinfo=None):\n now = datetime.now(tzinfo)\n this_year_start = now.replace(\n month=1, day=1, hour=0, minute=0, second=0, microsecond=0)\n next_year_start = datetime(now.year + 1, 1, 1, tzinfo=tzinfo)\n if before_now and after_now:\n return self.date_time_between_dates(\n this_year_start, next_year_start, tzinfo)\n elif not before_now and after_now:\n return self.date_time_between_dates(now, next_year_start, tzinfo)\n elif not after_now and before_now:\n return self.date_time_between_dates(this_year_start, now, tzinfo)\n else:\n return now", - "docstring": "Gets a DateTime object for the current year.\n\n :param before_now: include days in current year before today\n :param after_now: include days in current year after today\n :param tzinfo: timezone, instance of datetime.tzinfo subclass\n :example DateTime('2012-04-04 11:02:02')\n :return DateTime" - }, - { - "code": "def get_sdb_by_id(self, sdb_id):\n sdb_resp = get_with_retry(self.cerberus_url + '/v2/safe-deposit-box/' + sdb_id,\n headers=self.HEADERS)\n throw_if_bad_response(sdb_resp)\n return sdb_resp.json()", - "docstring": "Return the details for the given safe deposit box id\n\n Keyword arguments:\n sdb_id -- this is the id of the safe deposit box, not the path." - }, - { - "code": "def object_new(self, template=None, **kwargs):\n args = (template,) if template is not None else ()\n return self._client.request('/object/new', args,\n decoder='json', **kwargs)", - "docstring": "Creates a new object from an IPFS template.\n\n By default this creates and returns a new empty merkledag node, but you\n may pass an optional template argument to create a preformatted node.\n\n .. code-block:: python\n\n >>> c.object_new()\n {'Hash': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'}\n\n Parameters\n ----------\n template : str\n Blueprints from which to construct the new object. Possible values:\n\n * ``\"unixfs-dir\"``\n * ``None``\n\n Returns\n -------\n dict : Object hash" - }, - { - "code": "def lrun(command, *args, **kwargs):\n return run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs)", - "docstring": "Run a local command from project root" - }, - { - "code": "def IdentifiersIndexer(instance):\n identifiers = instance.Schema()['Identifiers'].get(instance)\n return [safe_unicode(i['Identifier']) for i in identifiers]", - "docstring": "Return a list of unique Identifier strings\n This populates the Identifiers Keyword index, but with some\n replacements to prevent the word-splitter etc from taking effect." - }, - { - "code": "def read_array(self, key, start=None, stop=None):\n import tables\n node = getattr(self.group, key)\n attrs = node._v_attrs\n transposed = getattr(attrs, 'transposed', False)\n if isinstance(node, tables.VLArray):\n ret = node[0][start:stop]\n else:\n dtype = getattr(attrs, 'value_type', None)\n shape = getattr(attrs, 'shape', None)\n if shape is not None:\n ret = np.empty(shape, dtype=dtype)\n else:\n ret = node[start:stop]\n if dtype == 'datetime64':\n ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)\n elif dtype == 'timedelta64':\n ret = np.asarray(ret, dtype='m8[ns]')\n if transposed:\n return ret.T\n else:\n return ret", - "docstring": "read an array for the specified node (off of group" - }, - { - "code": "def get_edge(self, edge_id):\n try:\n edge_object = self.edges[edge_id]\n except KeyError:\n raise NonexistentEdgeError(edge_id)\n return edge_object", - "docstring": "Returns the edge object identified by \"edge_id\"." - }, - { - "code": "def setDefaultValues(self):\n print '%s call setDefaultValues' % self.port\n self.networkName = ModuleHelper.Default_NwkName\n self.networkKey = ModuleHelper.Default_NwkKey\n self.channel = ModuleHelper.Default_Channel\n self.channelMask = \"0x7fff800\"\n self.panId = ModuleHelper.Default_PanId\n self.xpanId = ModuleHelper.Default_XpanId\n self.meshLocalPrefix = ModuleHelper.Default_MLPrefix\n self.pskc = \"00000000000000000000000000000000\"\n self.securityPolicySecs = ModuleHelper.Default_SecurityPolicy\n self.securityPolicyFlags = \"onrcb\"\n self.activetimestamp = ModuleHelper.Default_ActiveTimestamp\n self.sedPollingRate = 3\n self.deviceRole = None\n self.provisioningUrl = ''\n self.hasActiveDatasetToCommit = False\n self.logThread = Queue()\n self.logThreadStatus = self.logStatus['stop']\n self.networkDataRequirement = ''\n self.isPowerDown = False\n self._addressfilterMode = 'disable'\n self._addressfilterSet = set()\n self.isActiveCommissioner = False\n self._lines = None\n try:\n self.setMAC(self.mac)\n self.__setChannelMask(self.channelMask)\n self.__setSecurityPolicy(self.securityPolicySecs, self.securityPolicyFlags)\n self.setChannel(self.channel)\n self.setPANID(self.panId)\n self.setXpanId(self.xpanId)\n self.setNetworkName(self.networkName)\n self.setNetworkKey(self.networkKey)\n self.setMLPrefix(self.meshLocalPrefix)\n self.setPSKc(self.pskc)\n self.setActiveTimestamp(self.activetimestamp)\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger('setDefaultValue() Error: ' + str(e))", - "docstring": "set default mandatory Thread Network parameter value" - }, - { - "code": "def compute(self, base, *args, **kwargs):\n return min(base, super(Discount, self).compute(base, *args, **kwargs))", - "docstring": "Returns the value of the discount.\n @param base:float Computation base.\n @return: Decimal" - }, - { - "code": "def docinfo2dict(doctree):\n nodes = doctree.traverse(docutils.nodes.docinfo)\n md = {}\n if not nodes:\n return md\n for node in nodes[0]:\n if isinstance(node, docutils.nodes.authors):\n md['authors'] = [author.astext() for author in node]\n elif isinstance(node, docutils.nodes.TextElement):\n md[node.__class__.__name__] = node.astext()\n else:\n name, body = node\n md[name.astext()] = body.astext()\n return md", - "docstring": "Return the docinfo field list from a doctree as a dictionary\n\n Note: there can be multiple instances of a single field in the docinfo.\n Since a dictionary is returned, the last instance's value will win.\n\n Example:\n\n pub = rst2pub(rst_string)\n print docinfo2dict(pub.document)" - }, - { - "code": "def _parse_hunk_line(self, line):\n components = line.split('@@')\n if len(components) >= 2:\n hunk_info = components[1]\n groups = self.HUNK_LINE_RE.findall(hunk_info)\n if len(groups) == 1:\n try:\n return int(groups[0])\n except ValueError:\n msg = \"Could not parse '{}' as a line number\".format(groups[0])\n raise GitDiffError(msg)\n else:\n msg = \"Could not find start of hunk in line '{}'\".format(line)\n raise GitDiffError(msg)\n else:\n msg = \"Could not parse hunk in line '{}'\".format(line)\n raise GitDiffError(msg)", - "docstring": "Given a hunk line in `git diff` output, return the line number\n at the start of the hunk. A hunk is a segment of code that\n contains changes.\n\n The format of the hunk line is:\n\n @@ -k,l +n,m @@ TEXT\n\n where `k,l` represent the start line and length before the changes\n and `n,m` represent the start line and length after the changes.\n\n `git diff` will sometimes put a code excerpt from within the hunk\n in the `TEXT` section of the line." - }, - { - "code": "def pool_delete(storage_pool, logger):\n path = etree.fromstring(storage_pool.XMLDesc(0)).find('.//path').text\n volumes_delete(storage_pool, logger)\n try:\n storage_pool.destroy()\n except libvirt.libvirtError:\n logger.exception(\"Unable to delete storage pool.\")\n try:\n if os.path.exists(path):\n shutil.rmtree(path)\n except EnvironmentError:\n logger.exception(\"Unable to delete storage pool folder.\")", - "docstring": "Storage Pool deletion, removes all the created disk images within the pool and the pool itself." - }, - { - "code": "def _can_hold_element(self, element):\n dtype = self.values.dtype.type\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, dtype)\n return isinstance(element, dtype)", - "docstring": "require the same dtype as ourselves" - }, - { - "code": "def load_fetchers_entry_point_group(self, entry_point_group):\n for ep in pkg_resources.iter_entry_points(group=entry_point_group):\n self.register_fetcher(ep.name, ep.load())", - "docstring": "Load fetchers from an entry point group.\n\n :param entry_point_group: The entrypoint group." - }, - { - "code": "def getUserByNumber(self, base, uidNumber):\n\t\tres = self.query(base, \"uidNumber=\"+str(uidNumber), ['uid'])\n\t\tif len(res) > 1:\n\t\t\traise InputError(uidNumber, \"Multiple users found. Expecting one.\")\n\t\treturn res[0][0], res[0][1]['uid'][0]", - "docstring": "search for a user in LDAP and return its DN and uid" - }, - { - "code": "def migrate_constituencies(apps, schema_editor):\n Constituency = apps.get_model(\"representatives\", \"Constituency\")\n for c in Constituency.objects.all():\n c.save()", - "docstring": "Re-save constituencies to recompute fingerprints" - }, - { - "code": "def awsRetry(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n for attempt in retry(delays=truncExpBackoff(),\n timeout=300,\n predicate=awsRetryPredicate):\n with attempt:\n return f(*args, **kwargs)\n return wrapper", - "docstring": "This decorator retries the wrapped function if aws throws unexpected errors\n errors.\n It should wrap any function that makes use of boto" - }, - { - "code": "def to_bigquery_ddl(self, name_case=DdlParseBase.NAME_CASE.original):\n if self.schema is None:\n dataset = \"dataset\"\n elif name_case == self.NAME_CASE.lower:\n dataset = self.schema.lower()\n elif name_case == self.NAME_CASE.upper:\n dataset = self.schema.upper()\n else:\n dataset = self.schema\n cols_defs = []\n for col in self.columns.values():\n col_name = col.get_name(name_case)\n if col.array_dimensional < 1:\n type = col.bigquery_standard_data_type\n not_null = \" NOT NULL\" if col.not_null else \"\"\n else:\n type_front = \"ARRAY<\"\n type_back = \">\"\n for i in range(1, col.array_dimensional):\n type_front += \"STRUCT>\"\n type = \"{}{}{}\".format(type_front, col.bigquery_standard_data_type, type_back)\n not_null = \"\"\n cols_defs.append(\"{name} {type}{not_null}\".format(\n name=col_name,\n type=type,\n not_null=not_null,\n ))\n return textwrap.dedent(\n).format(\n dataset=dataset,\n table=self.get_name(name_case),\n colmns_define=\",\\n \".join(cols_defs),\n )", - "docstring": "Generate BigQuery CREATE TABLE statements\n\n :param name_case: name case type\n * DdlParse.NAME_CASE.original : Return to no convert\n * DdlParse.NAME_CASE.lower : Return to lower\n * DdlParse.NAME_CASE.upper : Return to upper\n\n :return: BigQuery CREATE TABLE statements" - }, - { - "code": "def status(id):\n if id:\n data_source = get_data_object(id, use_data_config=False)\n print_data([data_source] if data_source else [])\n else:\n data_sources = DataClient().get_all()\n print_data(data_sources)", - "docstring": "View status of all versions in a dataset.\n\n The command also accepts a specific dataset version." - }, - { - "code": "def _gitignore_entry_to_regex(entry):\n ret = entry.strip()\n ret = ret.replace('.', '\\.')\n ret = ret.replace('*', '.*')\n return ret", - "docstring": "Take a path that you might find in a .gitignore file and turn it into a regex" - }, - { - "code": "def _parse_template(self, has_content):\n reset = self._head\n context = contexts.TEMPLATE_NAME\n if has_content:\n context |= contexts.HAS_TEMPLATE\n try:\n template = self._parse(context)\n except BadRoute:\n self._head = reset\n raise\n self._emit_first(tokens.TemplateOpen())\n self._emit_all(template)\n self._emit(tokens.TemplateClose())", - "docstring": "Parse a template at the head of the wikicode string." - }, - { - "code": "def convert_merge(builder, layer, input_names, output_names, keras_layer):\n output_name = output_names[0]\n mode = _get_elementwise_name_from_keras_layer(keras_layer)\n builder.add_elementwise(name = layer, input_names = input_names,\n output_name = output_name, mode = mode)", - "docstring": "Convert concat layer from keras to coreml.\n\n Parameters\n ----------\n keras_layer: layer\n A keras layer object.\n\n builder: NeuralNetworkBuilder\n A neural network builder object." - }, - { - "code": "def verify_link_in_task_graph(chain, decision_link, task_link):\n log.info(\"Verifying the {} {} task definition is part of the {} {} task graph...\".format(\n task_link.name, task_link.task_id, decision_link.name, decision_link.task_id\n ))\n if task_link.task_id in decision_link.task_graph:\n graph_defn = deepcopy(decision_link.task_graph[task_link.task_id])\n verify_task_in_task_graph(task_link, graph_defn)\n log.info(\"Found {} in the graph; it's a match\".format(task_link.task_id))\n return\n raise_on_errors([\"Can't find task {} {} in {} {} task-graph.json!\".format(\n task_link.name, task_link.task_id, decision_link.name, decision_link.task_id\n )])", - "docstring": "Compare the runtime task definition against the decision task graph.\n\n Args:\n chain (ChainOfTrust): the chain we're operating on.\n decision_link (LinkOfTrust): the decision task link\n task_link (LinkOfTrust): the task link we're testing\n\n Raises:\n CoTError: on failure." - }, - { - "code": "def pydeps(**args):\n _args = args if args else cli.parse_args(sys.argv[1:])\n inp = target.Target(_args['fname'])\n log.debug(\"Target: %r\", inp)\n if _args.get('output'):\n _args['output'] = os.path.abspath(_args['output'])\n else:\n _args['output'] = os.path.join(\n inp.calling_dir,\n inp.modpath.replace('.', '_') + '.' + _args.get('format', 'svg')\n )\n with inp.chdir_work():\n _args['fname'] = inp.fname\n _args['isdir'] = inp.is_dir\n if _args.get('externals'):\n del _args['fname']\n exts = externals(inp, **_args)\n print(json.dumps(exts, indent=4))\n return exts\n else:\n return _pydeps(inp, **_args)", - "docstring": "Entry point for the ``pydeps`` command.\n\n This function should do all the initial parameter and environment\n munging before calling ``_pydeps`` (so that function has a clean\n execution path)." - }, - { - "code": "def delete_suspect(self, suspect_id):\n suspect_obj = self.suspect(suspect_id)\n logger.debug(\"Deleting suspect {0}\".format(suspect_obj.name))\n self.session.delete(suspect_obj)\n self.save()", - "docstring": "De-link a suspect from a case." - }, - { - "code": "def _render(template, callable_, args, data, as_unicode=False):\n if as_unicode:\n buf = util.FastEncodingBuffer(as_unicode=True)\n elif template.bytestring_passthrough:\n buf = compat.StringIO()\n else:\n buf = util.FastEncodingBuffer(\n as_unicode=as_unicode,\n encoding=template.output_encoding,\n errors=template.encoding_errors)\n context = Context(buf, **data)\n context._outputting_as_unicode = as_unicode\n context._set_with_template(template)\n _render_context(template, callable_, context, *args,\n **_kwargs_for_callable(callable_, data))\n return context._pop_buffer().getvalue()", - "docstring": "create a Context and return the string\n output of the given template and template callable." - }, - { - "code": "def set_map_alpha(alpha):\n if alpha < 0 or alpha > 255:\n raise Exception('invalid alpha ' + str(alpha))\n _global_config.map_alpha = alpha", - "docstring": "Alpha color of the map tiles\n\n :param alpha: int between 0 and 255. 0 is completely dark, 255 is full brightness" - }, - { - "code": "def update(self, container, instances=None, map_name=None, **kwargs):\n return self.run_actions('update', container, instances=instances, map_name=map_name, **kwargs)", - "docstring": "Updates instances from a container configuration. Typically this means restarting or recreating containers based\n on detected changes in the configuration or environment. Note that not all policy classes necessarily implement\n this method.\n\n :param container: Container name.\n :type container: unicode | str\n :param instances: Instance names to remove. If not specified, will remove all instances as specified in the\n configuration (or just one default instance).\n :type instances: collections.Iterable[unicode | str | NoneType]\n :param map_name: Container map name. Optional - if not provided the default map is used.\n :type map_name: unicode | str\n :param kwargs: Additional kwargs. Only options controlling policy behavior are considered.\n :return: Return values of actions.\n :rtype: list[dockermap.map.runner.ActionOutput]" - }, - { - "code": "def S(self):\n if self.c is None:\n raise RuntimeError('Cross-spectral density requires noise '\n 'covariance matrix c.')\n H = self.H()\n S = np.empty(H.shape, dtype=H.dtype)\n for f in range(H.shape[2]):\n S[:, :, f] = H[:, :, f].dot(self.c).dot(H[:, :, f].conj().T)\n return S", - "docstring": "Cross-spectral density.\n\n .. math:: \\mathbf{S}(f) = \\mathbf{H}(f) \\mathbf{C} \\mathbf{H}'(f)" - }, - { - "code": "def _delete_reverses(self):\n for reverse in self.clone_related:\n self._delete_reverse(reverse)\n for field in self._meta.local_many_to_many:\n if field.rel.through and \\\n field.rel.through._meta.auto_created and not \\\n field.name in self.clone_related:\n man = getattr(self, field.name)\n man.clear()", - "docstring": "Delete all objects that would have been cloned\n on a clone command. This is done separately because\n there may be m2m and other relationships that\n would have not been deleted otherwise." - }, - { - "code": "def replace_rdataset(self, replacement):\n self.delete_rdataset(replacement.rdclass, replacement.rdtype,\n replacement.covers)\n self.rdatasets.append(replacement)", - "docstring": "Replace an rdataset.\n\n It is not an error if there is no rdataset matching I{replacement}.\n\n Ownership of the I{replacement} object is transferred to the node;\n in other words, this method does not store a copy of I{replacement}\n at the node, it stores I{replacement} itself." - }, - { - "code": "def load_baseline_from_dict(cls, data):\n result = SecretsCollection()\n if not all(key in data for key in (\n 'plugins_used',\n 'results',\n )):\n raise IOError\n if not any(key in data for key in (\n 'exclude',\n 'exclude_regex',\n )):\n raise IOError\n if 'exclude_regex' in data:\n result.exclude_files = data['exclude_regex']\n else:\n result.exclude_files = data['exclude']['files']\n result.exclude_lines = data['exclude']['lines']\n plugins = []\n for plugin in data['plugins_used']:\n plugin_classname = plugin.pop('name')\n plugins.append(initialize.from_plugin_classname(\n plugin_classname,\n exclude_lines_regex=result.exclude_lines,\n **plugin\n ))\n result.plugins = tuple(plugins)\n for filename in data['results']:\n result.data[filename] = {}\n for item in data['results'][filename]:\n secret = PotentialSecret(\n item['type'],\n filename,\n secret='will be replaced',\n lineno=item['line_number'],\n is_secret=item.get('is_secret'),\n )\n secret.secret_hash = item['hashed_secret']\n result.data[filename][secret] = secret\n result.version = (\n data['version']\n if 'version' in data\n else '0.0.0'\n )\n return result", - "docstring": "Initializes a SecretsCollection object from dictionary.\n\n :type data: dict\n :param data: properly formatted dictionary to load SecretsCollection from.\n\n :rtype: SecretsCollection\n :raises: IOError" - }, - { - "code": "def extract_lzh (archive, compression, cmd, verbosity, interactive, outdir):\n opts = 'x'\n if verbosity > 1:\n opts += 'v'\n opts += \"w=%s\" % outdir\n return [cmd, opts, archive]", - "docstring": "Extract a LZH archive." - }, - { - "code": "def match_namespace(self, el, tag):\n match = True\n namespace = self.get_tag_ns(el)\n default_namespace = self.namespaces.get('')\n tag_ns = '' if tag.prefix is None else self.namespaces.get(tag.prefix, None)\n if tag.prefix is None and (default_namespace is not None and namespace != default_namespace):\n match = False\n elif (tag.prefix is not None and tag.prefix == '' and namespace):\n match = False\n elif (\n tag.prefix and\n tag.prefix != '*' and (tag_ns is None or namespace != tag_ns)\n ):\n match = False\n return match", - "docstring": "Match the namespace of the element." - }, - { - "code": "async def abort(self, *, comment: str = None):\n params = {\n \"system_id\": self.system_id\n }\n if comment:\n params[\"comment\"] = comment\n self._data = await self._handler.abort(**params)\n return self", - "docstring": "Abort the current action.\n\n :param comment: Reason for aborting the action.\n :param type: `str`" - }, - { - "code": "async def destroy(self):\n self.logger.debug(\"destroy command\")\n self.state = 'destroyed'\n await self.set_topic(\"\")\n self.nowplayinglog.debug(\"---\")\n self.nowplayingauthorlog.debug(\"---\")\n self.nowplayingsourcelog.debug(\"---\")\n self.timelog.debug(_timebar.make_timebar())\n self.prev_time = \"---\"\n self.statuslog.debug(\"Destroying\")\n self.mready = False\n self.vready = False\n self.pause_time = None\n self.loop_type = 'off'\n if self.vclient:\n try:\n await self.vclient.disconnect()\n except Exception as e:\n logger.error(e)\n pass\n if self.streamer:\n try:\n self.streamer.stop()\n except:\n pass\n self.vclient = None\n self.vchannel = None\n self.streamer = None\n self.current_duration = 0\n self.current_download_elapsed = 0\n self.is_live = False\n self.queue = []\n self.prev_queue = []\n if self.embed:\n await self.embed.delete()\n self.embed = None\n self.clear_cache()", - "docstring": "Destroy the whole gui and music player" - }, - { - "code": "def collect(self):\n for root, dirname, files in walk(self.migration_home):\n for file_name in file_filter(files, \"*.py\"):\n file_name = file_name.replace('.py', '')\n file = None\n try:\n if file_name == '__init__':\n continue\n file, pathname, description = find_module(\n file_name, [root])\n load_module(file_name, file, pathname, description)\n finally:\n if file is not None:\n file.close()", - "docstring": "Walks self.migration_home and load all potential migration modules" - }, - { - "code": "def intersection(self, range):\n if self.worksheet != range.worksheet:\n return None\n start = (max(self._start[0], range._start[0]),\n max(self._start[1], range._start[1]))\n end = (min(self._end[0], range._end[0]),\n min(self._end[1], range._end[1]))\n if end[0] < start[0] or end[1] < start[1]:\n return None\n return Range(start, end, self.worksheet, validate=False)", - "docstring": "Calculates the intersection with another range object" - }, - { - "code": "def find_files_for_use(self, all_files):\n for path in all_files:\n relpath = self.relpath_for(path)\n if relpath.startswith(\"./\"):\n relpath = relpath[2:]\n if not self.is_filtered(relpath):\n yield Path(path, relpath)", - "docstring": "Given a list of all the files to consider, only yield Path objects\n for those we care about, given our filters" - }, - { - "code": "def post_tweet(self, auth_secret, tweet):\n result = {pytwis_constants.ERROR_KEY: None}\n loggedin, userid = self._is_loggedin(auth_secret)\n if not loggedin:\n result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN\n return (False, result)\n post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)\n post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)\n post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)\n post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)\n follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)\n followers = self._rc.zrange(follower_zset_key, 0, -1)\n unix_time = int(time.time())\n with self._rc.pipeline() as pipe:\n pipe.multi()\n pipe.hmset(post_id_key,\n {pytwis_constants.TWEET_USERID_KEY: userid,\n pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,\n pytwis_constants.TWEET_BODY_KEY: tweet})\n pipe.lpush(post_id_timeline_key, post_id)\n pipe.lpush(post_id_user_key, post_id)\n for follower in followers:\n post_id_follower_key = \\\n pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)\n pipe.lpush(post_id_follower_key, post_id)\n pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)\n pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,\n 0,\n pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)\n pipe.execute()\n return (True, result)", - "docstring": "Post a tweet.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of the logged-in user.\n tweet: str\n The tweet that will be posted.\n\n Returns\n -------\n bool\n True if the tweet is successfully posted, False otherwise.\n result\n None if the tweet is successfully posted, a dict containing\n the error string with the key ERROR_KEY otherwise.\n\n Note\n ----\n Possible error strings are listed as below:\n\n - ERROR_NOT_LOGGED_IN" - }, - { - "code": "def _get_cursor(self):\n _options = self._get_options()\n conn = psycopg2.connect(host=_options['host'],\n user=_options['user'],\n password=_options['pass'],\n dbname=_options['db'],\n port=_options['port'])\n cursor = conn.cursor()\n try:\n yield cursor\n log.debug('Connected to POSTGRES DB')\n except psycopg2.DatabaseError as err:\n log.exception('Error in ext_pillar POSTGRES: %s', err.args)\n finally:\n conn.close()", - "docstring": "Yield a POSTGRES cursor" - }, - { - "code": "def get_playlists(self, offset=0, limit=50):\n response = self.client.get(\n self.client.USER_PLAYLISTS % (self.name, offset, limit))\n return self._parse_response(response, splaylist)\n return playlists", - "docstring": "Get user's playlists." - }, - { - "code": "def _find_all_groups(items, require_bam=True):\n all_groups = []\n for data in items:\n batches = _get_batches(data, require_bam)\n all_groups.append(batches)\n return all_groups", - "docstring": "Find all groups" - }, - { - "code": "def get_units(unit, binary=False):\n result = None\n if unit == 'b':\n result = 1, 'Byte'\n elif binary:\n if unit == 'k':\n result = 1024, 'Kibibyte'\n elif unit == 'm':\n result = 1048576, 'Mebibyte'\n elif unit == 'g':\n if opts.precision == -1:\n opts.precision = 3\n result = 1073741824, 'Gibibyte'\n elif unit == 't':\n if opts.precision == -1:\n opts.precision = 3\n result = 1099511627776, 'Tebibyte'\n else:\n if unit == 'k':\n result = 1000, 'Kilobyte'\n elif unit == 'm':\n result = 1000000, 'Megabyte'\n elif unit == 'g':\n if opts.precision == -1:\n opts.precision = 3\n result = 1000000000, 'Gigabyte'\n elif unit == 't':\n if opts.precision == -1:\n opts.precision = 3\n result = 1000000000000, 'Terabyte'\n if not result:\n print(f'Warning: incorrect parameter: {unit}.')\n result = _outunit\n if opts.precision == -1:\n opts.precision = 0\n return result", - "docstring": "Sets the output unit and precision for future calculations and returns\n an integer and the string representation of it." - }, - { - "code": "def create_permissions_from_tuples(model, codename_tpls):\n if codename_tpls:\n model_cls = django_apps.get_model(model)\n content_type = ContentType.objects.get_for_model(model_cls)\n for codename_tpl in codename_tpls:\n app_label, codename, name = get_from_codename_tuple(\n codename_tpl, model_cls._meta.app_label\n )\n try:\n Permission.objects.get(codename=codename, content_type=content_type)\n except ObjectDoesNotExist:\n Permission.objects.create(\n name=name, codename=codename, content_type=content_type\n )\n verify_codename_exists(f\"{app_label}.{codename}\")", - "docstring": "Creates custom permissions on model \"model\"." - }, - { - "code": "def get_size(self, chrom=None):\n if len(self.size) == 0:\n raise LookupError(\"no chromosomes in index, is the index correct?\")\n if chrom:\n if chrom in self.size:\n return self.size[chrom]\n else: \n raise KeyError(\"chromosome {} not in index\".format(chrom))\n total = 0\n for size in self.size.values():\n total += size\n return total", - "docstring": "Return the sizes of all sequences in the index, or the size of chrom if specified\n as an optional argument" - }, - { - "code": "def true_num_genes(model, custom_spont_id=None):\n true_num = 0\n for gene in model.genes:\n if not is_spontaneous(gene, custom_id=custom_spont_id):\n true_num += 1\n return true_num", - "docstring": "Return the number of genes in a model ignoring spontaneously labeled genes.\n\n Args:\n model (Model):\n custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\n Returns:\n int: Number of genes excluding spontaneous genes" - }, - { - "code": "def cut(list_, index=0):\n if isinstance(index, int):\n cut_ = lambda x: x[index]\n else:\n cut_ = lambda x: getattr(x, index)\n return list(map(cut_, list_))", - "docstring": "Cut a list by index or arg" - }, - { - "code": "def cooccurrences(self, domain):\n uri = self._uris[\"cooccurrences\"].format(domain)\n return self.get_parse(uri)", - "docstring": "Get the cooccurrences of the given domain.\n\n For details, see https://investigate.umbrella.com/docs/api#co-occurrences" - }, - { - "code": "def get_health_check(name, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n retries = 30\n while True:\n try:\n lb = conn.get_all_load_balancers(load_balancer_names=[name])\n lb = lb[0]\n ret = odict.OrderedDict()\n hc = lb.health_check\n ret['interval'] = hc.interval\n ret['target'] = hc.target\n ret['healthy_threshold'] = hc.healthy_threshold\n ret['timeout'] = hc.timeout\n ret['unhealthy_threshold'] = hc.unhealthy_threshold\n return ret\n except boto.exception.BotoServerError as e:\n if retries and e.code == 'Throttling':\n log.debug('Throttled by AWS API, will retry in 5 seconds.')\n time.sleep(5)\n retries -= 1\n continue\n log.error('ELB %s not found.', name,\n exc_info_on_logleve=logging.DEBUG)\n return {}", - "docstring": "Get the health check configured for this ELB.\n\n CLI example:\n\n .. code-block:: bash\n\n salt myminion boto_elb.get_health_check myelb" - }, - { - "code": "def Column(self, column_name):\n column_idx = None\n for idx, column in enumerate(self.header.columns):\n if column.name == column_name:\n column_idx = idx\n break\n if column_idx is None:\n raise KeyError(\"Column '{}' not found\".format(column_name))\n for row in self.rows:\n yield row.values[column_idx]", - "docstring": "Iterates over values of a given column.\n\n Args:\n column_name: A nome of the column to retrieve the values for.\n\n Yields:\n Values of the specified column.\n\n Raises:\n KeyError: If given column is not present in the table." - }, - { - "code": "def find_model(sender, model_name):\n MC = get_mc()\n model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!=''))\n if model:\n model_inst = model.get_instance()\n orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='')\n return orm.__models__.get(model_name)", - "docstring": "Register new model to ORM" - }, - { - "code": "def java_version():\n result = subprocess.check_output(\n [c.JAVA, '-version'], stderr=subprocess.STDOUT\n )\n first_line = result.splitlines()[0]\n return first_line.decode()", - "docstring": "Call java and return version information.\n\n :return unicode: Java version string" - }, - { - "code": "def drawdown_start(self, return_date=False):\n dd = self.drawdown_idx()\n mask = nancumsum(dd == nanmin(dd.min)).astype(bool)\n start = dd.mask(mask)[::-1].idxmax()\n if return_date:\n return start.date()\n return start", - "docstring": "The date of the peak at which most severe drawdown began.\n\n Parameters\n ----------\n return_date : bool, default False\n If True, return a `datetime.date` object.\n If False, return a Pandas Timestamp object.\n\n Returns\n -------\n datetime.date or pandas._libs.tslib.Timestamp" - }, - { - "code": "def set_stream_class_lists(self, session_id, payload):\n items_payload = {'items': payload}\n endpoint = self.endpoints.set_stream_class_lists_url(session_id)\n response = requests.put(\n endpoint,\n data=json.dumps(items_payload),\n headers=self.json_headers(),\n proxies=self.proxies,\n timeout=self.timeout\n )\n if response.status_code == 200:\n pass\n elif response.status_code == 400:\n raise SetStreamClassError(\n 'Invalid request. This response may indicate that data in your request data '\n 'is invalid JSON. It may also indicate that you passed in invalid layout options.'\n )\n elif response.status_code == 403:\n raise AuthError('Authentication error.')\n else:\n raise RequestError('OpenTok server error.', response.status_code)", - "docstring": "Use this method to change layout classes for OpenTok streams. The layout classes\n define how the streams are displayed in the layout of a composed OpenTok archive\n\n :param String session_id: The ID of the session of the streams that will be updated\n\n :param List payload: A list defining the class lists to apply to the streams.\n Each element in the list is a dictionary with two properties: 'id' and 'layoutClassList'.\n The 'id' property is the stream ID (a String), and the 'layoutClassList' is an array of\n class names (Strings) to apply to the stream. For example:\n\n payload = [\n {'id': '7b09ec3c-26f9-43d7-8197-f608f13d4fb6', 'layoutClassList': ['focus']},\n {'id': '567bc941-6ea0-4c69-97fc-70a740b68976', 'layoutClassList': ['top']},\n {'id': '307dc941-0450-4c09-975c-705740d08970', 'layoutClassList': ['bottom']}\n ]" - }, - { - "code": "def build_command(self, action, args=None):\n arguments = self.wrap_arguments(args)\n body = self.soap_body_template.format(\n arguments=arguments, action=action, service_type=self.service_type,\n version=self.version)\n soap_action_template = \\\n \"urn:schemas-upnp-org:service:{service_type}:{version}\n soap_action = soap_action_template.format(\n service_type=self.service_type, version=self.version,\n action=action)\n headers = {'Content-Type': 'text/xml; charset=\"utf-8\"',\n 'SOAPACTION': soap_action}\n return (headers, body)", - "docstring": "Build a SOAP request.\n\n Args:\n action (str): the name of an action (a string as specified in the\n service description XML file) to be sent.\n args (list, optional): Relevant arguments as a list of (name,\n value) tuples.\n\n Returns:\n tuple: a tuple containing the POST headers (as a dict) and a\n string containing the relevant SOAP body. Does not set\n content-length, or host headers, which are completed upon\n sending." - }, - { - "code": "def create_from_row(cls, table_row):\n kwargs = {}\n for key in table_row.colnames:\n kwargs[key] = table_row[key]\n try:\n return cls(**kwargs)\n except KeyError:\n print(kwargs)", - "docstring": "Build and return a `FileHandle` from an `astropy.table.row.Row`" - }, - { - "code": "def get_link(self, peer):\n for access in peer.accesses:\n if access.type == 'mqtt':\n break\n else:\n return None\n server = (access.server.host, access.server.port)\n with self.__lock:\n try:\n return self._links[server]\n except KeyError:\n link = self._links[server] = MQTTLink(access)\n return link", - "docstring": "Retrieves the link to the given peer" - }, - { - "code": "def close( self ):\n if self.tag in self.parent.twotags:\n self.parent.content.append( \"\" % self.tag )\n elif self.tag in self.parent.onetags:\n raise ClosingError( self.tag )\n elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:\n raise DeprecationError( self.tag )", - "docstring": "Append a closing tag unless element has only opening tag." - }, - { - "code": "def select(self, axis: AxisIdentifier, index, force_copy: bool = False) -> HistogramBase:\n if index == slice(None) and not force_copy:\n return self\n axis_id = self._get_axis(axis)\n array_index = [slice(None, None, None) for i in range(self.ndim)]\n array_index[axis_id] = index\n frequencies = self._frequencies[tuple(array_index)].copy()\n errors2 = self._errors2[tuple(array_index)].copy()\n if isinstance(index, int):\n return self._reduce_dimension([ax for ax in range(self.ndim) if ax != axis_id], frequencies, errors2)\n elif isinstance(index, slice):\n if index.step is not None and index.step < 0:\n raise IndexError(\"Cannot change the order of bins\")\n copy = self.copy()\n copy._frequencies = frequencies\n copy._errors2 = errors2\n copy._binnings[axis_id] = self._binnings[axis_id][index]\n return copy\n else:\n raise ValueError(\"Invalid index.\")", - "docstring": "Select in an axis.\n\n Parameters\n ----------\n axis: int or str\n Axis, in which we select.\n index: int or slice\n Index of bin (as in numpy).\n force_copy: bool\n If True, identity slice force a copy to be made." - }, - { - "code": "def get_input(input_func, input_str):\n val = input_func(\"Please enter your {0}: \".format(input_str))\n while not val or not len(val.strip()):\n val = input_func(\"You didn't enter a valid {0}, please try again: \".format(input_str))\n return val", - "docstring": "Get input from the user given an input function and an input string" - }, - { - "code": "def splitter_support(py2enc):\n if sys.version < '3':\n def _fn_sentence(pattern, sentence):\n if REGEXTYPE == type(pattern):\n if pattern.flags & re.UNICODE:\n return sentence.decode(py2enc)\n else:\n return sentence\n else:\n return sentence\n def _fn_token2str(pattern):\n if REGEXTYPE == type(pattern):\n if pattern.flags & re.UNICODE:\n def _fn(token):\n return token.encode(py2enc)\n else:\n def _fn(token):\n return token\n else:\n def _fn(token):\n return token\n return _fn\n else:\n def _fn_sentence(pattern, sentence):\n return sentence\n def _fn_token2str(pattern):\n def _fn(token):\n return token\n return _fn\n def _fn_tokenize_pattern(text, pattern):\n pos = 0\n sentence = _fn_sentence(pattern, text)\n postprocess = _fn_token2str(pattern)\n for m in re.finditer(pattern, sentence):\n if pos < m.start():\n token = postprocess(sentence[pos:m.start()])\n yield (token.strip(), False)\n pos = m.start()\n token = postprocess(sentence[pos:m.end()])\n yield (token.strip(), True)\n pos = m.end()\n if pos < len(sentence):\n token = postprocess(sentence[pos:])\n yield (token.strip(), False)\n def _fn_tokenize_features(text, features):\n acc = []\n acc.append((text.strip(), False))\n for feat in features:\n for i,e in enumerate(acc):\n if e[1]==False:\n tmp = list(_fn_tokenize_pattern(e[0], feat))\n if len(tmp) > 0:\n acc.pop(i)\n acc[i:i] = tmp\n return acc\n return _fn_tokenize_pattern, _fn_tokenize_features", - "docstring": "Create tokenizer for use in boundary constraint parsing.\n\n :param py2enc: Encoding used by Python 2 environment.\n :type py2enc: str" - }, - { - "code": "def getAllReceivers( sender = Any, signal = Any ):\n receivers = {}\n for set in (\n getReceivers( sender, signal ),\n getReceivers( sender, Any ),\n getReceivers( Any, signal ),\n getReceivers( Any, Any ),\n ):\n for receiver in set:\n if receiver:\n try:\n if receiver not in receivers:\n receivers[receiver] = 1\n yield receiver\n except TypeError:\n pass", - "docstring": "Get list of all receivers from global tables\n\n This gets all receivers which should receive\n the given signal from sender, each receiver should\n be produced only once by the resulting generator" - }, - { - "code": "def get_storage_controller_hotplug_capable(self, controller_type):\n if not isinstance(controller_type, StorageControllerType):\n raise TypeError(\"controller_type can only be an instance of type StorageControllerType\")\n hotplug_capable = self._call(\"getStorageControllerHotplugCapable\",\n in_p=[controller_type])\n return hotplug_capable", - "docstring": "Returns whether the given storage controller supports\n hot-plugging devices.\n\n in controller_type of type :class:`StorageControllerType`\n The storage controller to check the setting for.\n\n return hotplug_capable of type bool\n Returned flag indicating whether the controller is hotplug capable" - }, - { - "code": "def on_comment_entered(self):\n text_edit = self.findChild(QtWidgets.QWidget, \"CommentBox\")\n comment = text_edit.text()\n context = self.controller.context\n context.data[\"comment\"] = comment\n placeholder = self.findChild(QtWidgets.QLabel, \"CommentPlaceholder\")\n placeholder.setVisible(not comment)", - "docstring": "The user has typed a comment" - }, - { - "code": "def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE,\n frange=DEFAULT_FRANGE, duration=None, sampling=None,\n **kwargs):\n from gwpy.timeseries import TimeSeries\n if isinstance(data, TimeSeries):\n duration = abs(data.span)\n sampling = data.sample_rate.to('Hz').value\n kwargs.update({'epoch': data.t0.value})\n data = data.fft().value\n qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange,\n frange=frange).transform(data, **kwargs)\n far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration\n return (qgram, far)", - "docstring": "Transform data by scanning over a `QTiling`\n\n This utility is provided mainly to allow direct manipulation of the\n `QTiling.transform` output. Most users probably just want to use\n :meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.\n\n Parameters\n ----------\n data : `~gwpy.timeseries.TimeSeries` or `ndarray`\n the time- or frequency-domain input data\n\n mismatch : `float`, optional\n maximum allowed fractional mismatch between neighbouring tiles\n\n qrange : `tuple` of `float`, optional\n `(low, high)` range of Qs to scan\n\n frange : `tuple` of `float`, optional\n `(low, high)` range of frequencies to scan\n\n duration : `float`, optional\n duration (seconds) of input, required if `data` is not a `TimeSeries`\n\n sampling : `float`, optional\n sample rate (Hertz) of input, required if `data` is not a `TimeSeries`\n\n **kwargs\n other keyword arguments to be passed to :meth:`QTiling.transform`,\n including ``'epoch'`` and ``'search'``\n\n Returns\n -------\n qgram : `QGram`\n the raw output of :meth:`QTiling.transform`\n\n far : `float`\n expected false alarm rate (Hertz) of white Gaussian noise with the\n same peak energy and total duration as `qgram`" - }, - { - "code": "def _detect(self):\n results = []\n self.results = []\n self.visited_all_paths = {}\n for contract in self.slither.contracts:\n for function in contract.functions:\n if function.is_implemented and function.contract == contract:\n if function.contains_assembly:\n continue\n uninitialized_local_variables = [v for v in function.local_variables if not v.is_storage and v.uninitialized]\n function.entry_point.context[self.key] = uninitialized_local_variables\n self._detect_uninitialized(function, function.entry_point, [])\n all_results = list(set(self.results))\n for(function, uninitialized_local_variable) in all_results:\n var_name = uninitialized_local_variable.name\n info = \"{} in {}.{} ({}) is a local variable never initialiazed\\n\"\n info = info.format(var_name,\n function.contract.name,\n function.name,\n uninitialized_local_variable.source_mapping_str)\n json = self.generate_json_result(info)\n self.add_variable_to_json(uninitialized_local_variable, json)\n self.add_function_to_json(function, json)\n results.append(json)\n return results", - "docstring": "Detect uninitialized local variables\n\n Recursively visit the calls\n Returns:\n dict: [contract name] = set(local variable uninitialized)" - }, - { - "code": "def end_state(self):\n if self.str_begin != len(self.format):\n if len(self.state) > 1 or self.state[-1] != 'string':\n self.fmt.append_text(\n \"(Bad format string; ended in state %r)\" % self.state[-1])\n else:\n self.fmt.append_text(self.format[self.str_begin:])\n return self.fmt", - "docstring": "Wrap things up and add any final string content." - }, - { - "code": "def overwrites_for(self, obj):\n if isinstance(obj, User):\n predicate = lambda p: p.type == 'member'\n elif isinstance(obj, Role):\n predicate = lambda p: p.type == 'role'\n else:\n predicate = lambda p: True\n for overwrite in filter(predicate, self._overwrites):\n if overwrite.id == obj.id:\n allow = Permissions(overwrite.allow)\n deny = Permissions(overwrite.deny)\n return PermissionOverwrite.from_pair(allow, deny)\n return PermissionOverwrite()", - "docstring": "Returns the channel-specific overwrites for a member or a role.\n\n Parameters\n -----------\n obj\n The :class:`Role` or :class:`abc.User` denoting\n whose overwrite to get.\n\n Returns\n ---------\n :class:`PermissionOverwrite`\n The permission overwrites for this object." - }, - { - "code": "def kick(self, channel, nick, message=None):\n self.send(\"KICK\", channel, nick, \":%s\" % (message or self.user.nick))", - "docstring": "Attempt to kick a user from a channel.\n\n If a message is not provided, defaults to own nick." - }, - { - "code": "def default(self, interface, vrid):\n vrrp_str = \"default vrrp %d\" % vrid\n return self.configure_interface(interface, vrrp_str)", - "docstring": "Defaults a vrrp instance from an interface\n\n Note:\n This method will attempt to default the vrrp on the node's\n operational config. Default results in the deletion of the\n specified vrrp . If the vrrp does not exist on the\n interface then this method will not perform any changes\n but still return True\n\n Args:\n interface (string): The interface to configure.\n vrid (integer): The vrid number for the vrrp to be defaulted.\n\n Returns:\n True if the vrrp could be defaulted otherwise False (see Node)" - }, - { - "code": "def val_to_fp(self, sort, signed=True, rm=None):\n if rm is None:\n rm = fp.fp.RM.default()\n if sort is None:\n sort = fp.fp.FSort.from_size(self.length)\n op = fp.fpToFP if signed else fp.fpToFPUnsigned\n return op(rm, self, sort)", - "docstring": "Interpret this bitvector as an integer, and return the floating-point representation of that integer.\n\n :param sort: The sort of floating point value to return\n :param signed: Optional: whether this value is a signed integer\n :param rm: Optional: the rounding mode to use\n :return: An FP AST whose value is the same as this BV" - }, - { - "code": "def vector_magnitude(vector_in):\n sq_sum = 0.0\n for vin in vector_in:\n sq_sum += vin**2\n return math.sqrt(sq_sum)", - "docstring": "Computes the magnitude of the input vector.\n\n :param vector_in: input vector\n :type vector_in: list, tuple\n :return: magnitude of the vector\n :rtype: float" - }, - { - "code": "def _reduce_dynamic_table(self, new_entry_size=0):\n assert(new_entry_size >= 0)\n cur_sz = len(self)\n dyn_tbl_sz = len(self._dynamic_table)\n while dyn_tbl_sz > 0 and cur_sz + new_entry_size > self._dynamic_table_max_size:\n last_elmt_sz = len(self._dynamic_table[-1])\n self._dynamic_table.pop()\n dyn_tbl_sz -= 1\n cur_sz -= last_elmt_sz", - "docstring": "_reduce_dynamic_table evicts entries from the dynamic table until it\n fits in less than the current size limit. The optional parameter,\n new_entry_size, allows the resize to happen so that a new entry of this\n size fits in.\n @param int new_entry_size: if called before adding a new entry, the size of the new entry in bytes (following # noqa: E501\n the RFC7541 definition of the size of an entry)\n @raise AssertionError" - }, - { - "code": "def _did_save(self, connection):\n self._new_password = None\n controller = NURESTSession.get_current_session().login_controller\n controller.password = None\n controller.api_key = self.api_key\n if connection.async:\n callback = connection.callbacks['remote']\n if connection.user_info:\n callback(connection.user_info, connection)\n else:\n callback(self, connection)\n else:\n return (self, connection)", - "docstring": "Launched when save has been successfully executed" - }, - { - "code": "def memoize(func):\n cache_name = '__CACHED_{}'.format(func.__name__)\n def wrapper(self, *args):\n cache = getattr(self, cache_name, None)\n if cache is None:\n cache = {}\n setattr(self, cache_name, cache)\n if args not in cache:\n cache[args] = func(self, *args)\n return cache[args]\n return wrapper", - "docstring": "Provides memoization for methods on a specific instance.\n Results are cached for given parameter list.\n\n See also: http://en.wikipedia.org/wiki/Memoization\n\n N.B. The cache object gets added to the instance instead of the global scope.\n Therefore cached results are restricted to that instance.\n The cache dictionary gets a name containing the name of the decorated function to\n avoid clashes.\n\n Example:\n\n class MyClass(object):\n @memoize\n def foo(self, a, b):\n return self._do_calculation(a, b)\n\n HINT: - The decorator does not work with keyword arguments." - }, - { - "code": "def reservations(self):\n command = [SINFO, '--reservation']\n output = subprocess.check_output(command, env=SINFO_ENV)\n output = output.decode()\n it = iter(output.splitlines())\n next(it)\n for line in it:\n rsv = Reservation.from_sinfo(line)\n yield rsv.name, rsv", - "docstring": "get nodes of every reservations" - }, - { - "code": "def respond(self, data, format='json'):\n dispatchers = {\n \"dict\": self.__respond_with_dict,\n \"list\": self.__respond_with_list\n }\n if not dispatchers.get(format, False):\n return json.dumps(data)\n return dispatchers[format](data)", - "docstring": "Converts a json object to a python datastructure based on\n specified format\n\n :param data: the json object\n :param format: python datastructure type. Defaults to: \"json\"\n :returns: a python specified object" - }, - { - "code": "def on_parallel_port_change(self, parallel_port):\n if not isinstance(parallel_port, IParallelPort):\n raise TypeError(\"parallel_port can only be an instance of type IParallelPort\")\n self._call(\"onParallelPortChange\",\n in_p=[parallel_port])", - "docstring": "Triggered when settings of a parallel port of the\n associated virtual machine have changed.\n\n in parallel_port of type :class:`IParallelPort`\n\n raises :class:`VBoxErrorInvalidVmState`\n Session state prevents operation.\n \n raises :class:`VBoxErrorInvalidObjectState`\n Session type prevents operation." - }, - { - "code": "def serialize_m2m_on_save(sender, action, instance, using, **kwargs):\n if action == \"post_add\":\n try:\n wrapped_instance = site_offline_models.get_wrapped_instance(instance)\n except ModelNotRegistered:\n pass\n else:\n wrapped_instance.to_outgoing_transaction(using, created=True)", - "docstring": "Part of the serialize transaction process that ensures m2m are\n serialized correctly.\n\n Skip those not registered." - }, - { - "code": "def validateTagAttributes(self, attribs, element):\n\t\tout = {}\n\t\tif element not in _whitelist:\n\t\t\treturn out\n\t\twhitelist = _whitelist[element]\n\t\tfor attribute in attribs:\n\t\t\tvalue = attribs[attribute]\n\t\t\tif attribute not in whitelist:\n\t\t\t\tcontinue\n\t\t\tif attribute == u'style':\n\t\t\t\tvalue = self.checkCss(value)\n\t\t\t\tif value == False:\n\t\t\t\t\tcontinue\n\t\t\telif attribute == u'id':\n\t\t\t\tvalue = self.escapeId(value)\n\t\t\tout[attribute] = value\n\t\treturn out", - "docstring": "docstring for validateTagAttributes" - }, - { - "code": "def pywt_pad_mode(pad_mode, pad_const=0):\n pad_mode = str(pad_mode).lower()\n if pad_mode == 'constant' and pad_const != 0.0:\n raise ValueError('constant padding with constant != 0 not supported '\n 'for `pywt` back-end')\n try:\n return PAD_MODES_ODL2PYWT[pad_mode]\n except KeyError:\n raise ValueError(\"`pad_mode` '{}' not understood\".format(pad_mode))", - "docstring": "Convert ODL-style padding mode to pywt-style padding mode.\n\n Parameters\n ----------\n pad_mode : str\n The ODL padding mode to use at the boundaries.\n pad_const : float, optional\n Value to use outside the signal boundaries when ``pad_mode`` is\n 'constant'. Only a value of 0. is supported by PyWavelets\n\n Returns\n -------\n pad_mode_pywt : str\n The corresponding name of the requested padding mode in PyWavelets.\n See `signal extension modes`_.\n\n References\n ----------\n .. _signal extension modes:\n https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html" - }, - { - "code": "def declare(self, context=None):\n try:\n sql, uses_external = declare(self.full_table_name, self.definition, context)\n if uses_external:\n sql = sql.format(external_table=self.external_table.full_table_name)\n self.connection.query(sql)\n except pymysql.OperationalError as error:\n if error.args[0] == server_error_codes['command denied']:\n logger.warning(error.args[1])\n else:\n raise\n else:\n self._log('Declared ' + self.full_table_name)", - "docstring": "Use self.definition to declare the table in the schema." - }, - { - "code": "def _get_mapping_for_table(self, table):\n for mapping in self.mappings.values():\n if mapping[\"table\"] == table:\n return mapping", - "docstring": "Returns the first mapping for a table name" - }, - { - "code": "def status(name=None):\n name = 'freezer' if not name else name\n return all(os.path.isfile(i) for i in _paths(name))", - "docstring": "Return True if there is already a frozen state.\n\n A frozen state is merely a list of packages (including the\n version) in a specific time. This information can be used to\n compare with the current list of packages, and revert the\n installation of some extra packages that are in the system.\n\n name\n Name of the frozen state. Optional.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' freezer.status\n salt '*' freezer.status pre_install" - }, - { - "code": "def setup_jobs(outpath, options, input_files):\n job_inputs = None\n reverse_mapping = None\n fasta_file_contents = {}\n for input_file in input_files:\n assert(not(fasta_file_contents.get(input_file)))\n if any(fnmatch(input_file, x) for x in pdb_file_wildcards):\n pdb = PDB.from_filepath(input_file, strict=True)\n pdb.pdb_id = os.path.basename(input_file).split('.')[0] \n if pdb.pdb_id.startswith('pdb') and len(pdb.pdb_id) >= 7:\n pdb.pdb_id = pdb.pdb_id.replace('pdb', '') \n fasta_file_contents[input_file] = (pdb.create_fasta(prefer_seqres_order = False), 'PDB')\n else:\n fasta_file_contents[input_file] = (read_file(input_file), 'FASTA')\n found_sequences, reverse_mapping, errors = get_sequences(options, fasta_file_contents)\n if found_sequences:\n reformat(found_sequences)\n if errors:\n return None, False, errors\n desired_sequences = {}\n for key, sequence in found_sequences.iteritems():\n pdb_id, chain, file_name = key\n if options.chain is None or chain == options.chain:\n desired_sequences[key] = sequence\n job_inputs, errors = create_inputs(options, outpath, desired_sequences)\n if reverse_mapping:\n segment_mapping_file = os.path.join(outpath, \"segment_map.json\")\n colorprinter.message(\"Creating a reverse mapping file %s.\" % segment_mapping_file)\n write_file(segment_mapping_file, json.dumps(reverse_mapping))\n post_processing_script = read_file(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'post_processing.py'))\n write_file(os.path.join(outpath, 'post_processing.py'), post_processing_script, 'w')\n if options.secondary_structure_file:\n write_file(os.path.join(outpath, 'ss_filter.json'), json.dumps({'secondary_structure_filter' : SecondaryStructureDefinition.from_filepath(options.secondary_structure_file).data}), 'w')\n return job_inputs, reverse_mapping != None, errors", - "docstring": "This function sets up the jobs by creating the necessary input files as expected.\n - outpath is where the output is to be stored.\n - options is the optparse options object.\n - input_files is a list of paths to input files." - }, - { - "code": "def _HasAccessToClient(self, subject, token):\n client_id, _ = rdfvalue.RDFURN(subject).Split(2)\n client_urn = rdf_client.ClientURN(client_id)\n return self.CheckClientAccess(token, client_urn)", - "docstring": "Checks if user has access to a client under given URN." - }, - { - "code": "def permitted_actions(self, obj=None):\n return [a for a in Action.registered\n if self.allow(a, obj(str(a)) if obj is not None else None)]", - "docstring": "Determine permitted actions for a given object pattern." - }, - { - "code": "def peek(init, exposes, debug=False):\n def _peek(store, container, _stack=None):\n args = [ store.peek(objname, container, _stack=_stack) \\\n for objname in exposes ]\n if debug:\n print(args)\n return init(*args)\n return _peek", - "docstring": "Default deserializer factory.\n\n Arguments:\n\n init (callable): type constructor.\n\n exposes (iterable): attributes to be peeked and passed to `init`.\n\n Returns:\n\n callable: deserializer (`peek` routine)." - }, - { - "code": "def median_bias(n):\n if type(n) is not int or n <= 0:\n raise ValueError('n must be a positive integer')\n if n >= 1000:\n return numpy.log(2)\n ans = 1\n for i in range(1, int((n - 1) / 2 + 1)):\n ans += 1.0 / (2*i + 1) - 1.0 / (2*i)\n return ans", - "docstring": "Calculate the bias of the median average PSD computed from `n` segments.\n\n Parameters\n ----------\n n : int\n Number of segments used in PSD estimation.\n\n Returns\n -------\n ans : float\n Calculated bias.\n\n Raises\n ------\n ValueError\n For non-integer or non-positive `n`.\n\n Notes\n -----\n See arXiv:gr-qc/0509116 appendix B for details." - }, - { - "code": "def validate_number_attribute(\n fully_qualified_name: str,\n spec: Dict[str, Any],\n attribute: str,\n value_type: Union[Type[int], Type[float]] = int,\n minimum: Optional[Union[int, float]] = None,\n maximum: Optional[Union[int, float]] = None) -> Optional[InvalidNumberError]:\n if attribute not in spec:\n return\n try:\n value = value_type(spec[attribute])\n if (minimum is not None and value < minimum) or (maximum is not None and value > maximum):\n raise None\n except:\n return InvalidNumberError(fully_qualified_name, spec, attribute, value_type, minimum,\n maximum)", - "docstring": "Validates to ensure that the value is a number of the specified type, and lies with the specified range" - }, - { - "code": "def _load_patterns(self, folders, pattern_dict=None):\n if pattern_dict is None:\n pattern_dict = {}\n for folder in folders:\n for file in os.listdir(folder):\n if regex.match('^[\\w-]+$', file):\n self._load_pattern_file(os.path.join(folder, file), pattern_dict)\n return pattern_dict", - "docstring": "Load all pattern from all the files in folders" - }, - { - "code": "def new_action(self, method='GET', **kwargs):\n if method not in self.methods:\n raise TypeError('{} not in valid method(s): {}.'.format(method, self.methods))\n return Action(self, method, **kwargs)", - "docstring": "Create a new Action linked to this endpoint with the given args." - }, - { - "code": "def get_urls_and_locations(self, urls):\n location_generator = self.get_new_locations(urls)\n initial_cache = list(set(urls))\n return CachedIterable(location_generator, initial_cache)", - "docstring": "Get URLs and their redirection addresses.\n\n :param urls: a list of URL addresses\n :returns: an instance of CachedIterable containing given URLs\n and valid location header values of their responses" - }, - { - "code": "def bandpass(self, flow, fhigh, gpass=2, gstop=30, fstop=None, type='iir',\n filtfilt=True, **kwargs):\n filt = filter_design.bandpass(flow, fhigh, self.sample_rate,\n fstop=fstop, gpass=gpass, gstop=gstop,\n analog=False, type=type, **kwargs)\n return self.filter(*filt, filtfilt=filtfilt)", - "docstring": "Filter this `TimeSeries` with a band-pass filter.\n\n Parameters\n ----------\n flow : `float`\n lower corner frequency of pass band\n\n fhigh : `float`\n upper corner frequency of pass band\n\n gpass : `float`\n the maximum loss in the passband (dB).\n\n gstop : `float`\n the minimum attenuation in the stopband (dB).\n\n fstop : `tuple` of `float`, optional\n `(low, high)` edge-frequencies of stop band\n\n type : `str`\n the filter type, either ``'iir'`` or ``'fir'``\n\n **kwargs\n other keyword arguments are passed to\n :func:`gwpy.signal.filter_design.bandpass`\n\n Returns\n -------\n bpseries : `TimeSeries`\n a band-passed version of the input `TimeSeries`\n\n See Also\n --------\n gwpy.signal.filter_design.bandpass\n for details on the filter design\n TimeSeries.filter\n for details on how the filter is applied\n\n .. note::\n\n When using `scipy < 0.16.0` some higher-order filters may be\n unstable. With `scipy >= 0.16.0` higher-order filters are\n decomposed into second-order-sections, and so are much more stable." - }, - { - "code": "def get_compound_pd(self):\n entry1 = PDEntry(self.entry1.composition, 0)\n entry2 = PDEntry(self.entry2.composition, 0)\n cpd = CompoundPhaseDiagram(\n self.rxn_entries + [entry1, entry2],\n [Composition(entry1.composition.reduced_formula),\n Composition(entry2.composition.reduced_formula)],\n normalize_terminal_compositions=False) \n return cpd", - "docstring": "Get the CompoundPhaseDiagram object, which can then be used for\n plotting.\n\n Returns:\n (CompoundPhaseDiagram)" - }, - { - "code": "def publish_message(self, message, content_type=None, headers=None, mandatory=False, immediate=False):\n logger.debug(\"Publishing message\")\n try:\n self._connect()\n return self._do_publish(mandatory=mandatory,\n immediate=immediate,\n content_type=content_type,\n headers=headers,\n message=message)\n except pika.exceptions.AMQPConnectionError:\n logger.error(\"AMQPConnectionError occurred. Message not published.\")\n raise PublishMessageError\n except NackError:\n logger.error(\"NackError occurred. Message not published.\")\n raise PublishMessageError\n except UnroutableError:\n logger.error(\"UnroutableError occurred. Message not published.\")\n raise PublishMessageError\n except Exception:\n logger.exception(\"Unknown exception occurred. Message not published.\")\n raise PublishMessageError", - "docstring": "Publish a response message to a RabbitMQ instance.\n\n :param message: Response message\n :param content_type: Pika BasicProperties content_type value\n :param headers: Message header properties\n :param mandatory: The mandatory flag\n :param immediate: The immediate flag\n\n :returns: Boolean corresponding to the success of publishing\n :rtype: bool" - }, - { - "code": "def name2marc(self, key, value):\n result = self.get('100', {})\n result['a'] = value.get('value')\n result['b'] = value.get('numeration')\n result['c'] = value.get('title')\n result['q'] = value.get('preferred_name')\n if 'name_variants' in value:\n self['400'] = [{'a': el} for el in value['name_variants']]\n if 'native_names' in value:\n self['880'] = [{'a': el} for el in value['native_names']]\n if 'previous_names' in value:\n prev_names = [\n {'a': u'Formerly {}'.format(prev_name)}\n for prev_name in value['previous_names']\n ]\n self['667'] = prev_names\n return result", - "docstring": "Populates the ``100`` field.\n\n Also populates the ``400``, ``880``, and ``667`` fields through side\n effects." - }, - { - "code": "def run_forever(dyndnsclients):\n while True:\n try:\n time.sleep(15)\n for dyndnsclient in dyndnsclients:\n dyndnsclient.check()\n except (KeyboardInterrupt,):\n break\n except (Exception,) as exc:\n LOG.critical(\"An exception occurred in the dyndns loop\", exc_info=exc)\n return 0", - "docstring": "Run an endless loop accross the give dynamic dns clients.\n\n :param dyndnsclients: list of DynDnsClients" - }, - { - "code": "def _get_down_up_string(self):\n down_up = \"\"\n if not (self.down and self.up):\n if self.down:\n down_up = \"down\"\n elif self.up:\n down_up = \"up\"\n return down_up", - "docstring": "Return a string that will show whether the string is up or down\n\n return 'down' if the key is a press only\n return 'up' if the key is up only\n return '' if the key is up & down (as default)" - }, - { - "code": "def assert_tz_offset(tz):\n tz_offset = get_tz_offset(tz)\n system_offset = get_system_offset()\n if tz_offset != system_offset:\n msg = ('Timezone offset does not match system offset: {0} != {1}. '\n 'Please, check your config files.').format(\n tz_offset, system_offset\n )\n raise ValueError(msg)", - "docstring": "Assert that system's timezone offset equals to the timezone offset found.\n\n If they don't match, we probably have a misconfiguration, for example, an\n incorrect timezone set in /etc/timezone file in systemd distributions." - }, - { - "code": "def args_repr(*args, **kwargs):\n items = [repr(a) for a in args]\n items += [\"%s = %r\" % (k, v) for k, v in iter(kwargs.items())]\n return \", \".join(items)", - "docstring": "Returns human-readable string representation of both positional and\n keyword arguments passed to the function.\n\n This function uses the built-in :func:`repr()` function to convert\n individual arguments to string.\n\n >>> args_repr(\"a\", (1, 2), some_keyword = list(\"abc\"))\n \"'a', (1, 2), some_keyword = ['a', 'b', 'c']\"" - }, - { - "code": "def __step6(self):\n minval = self.__find_smallest()\n for i in range(self.n):\n for j in range(self.n):\n if self.row_covered[i]:\n self.C[i][j] += minval\n if not self.col_covered[j]:\n self.C[i][j] -= minval\n return 4", - "docstring": "Add the value found in Step 4 to every element of each covered\n row, and subtract it from every element of each uncovered column.\n Return to Step 4 without altering any stars, primes, or covered\n lines." - }, - { - "code": "def connect(self):\n device = None\n for device in self.devices:\n if not device.connected:\n self.connection.emit_message(\"Connecting {}\".format(str(device)), log_level=logging.INFO)\n protocol_name = device.get_protocol_name()\n device.protocol = make_protocol(protocol_name, device)\n command = device.protocol.get_command()\n self.ctrl.spawn_session(command=command)\n try:\n result = device.connect(self.ctrl)\n except CommandSyntaxError as exc:\n if exc.command:\n cmd = exc.command\n host = device.hostname\n else:\n cmd = command\n host = \"Jumphost/CSM\"\n self.connection.log(\n \"Command not supported or not authorized on {}: '{}'\".format(host, cmd))\n raise CommandError(message=\"Command not supported or not authorized\",\n command=cmd,\n host=host)\n if result:\n self.connection.emit_message(\"Connected {}\".format(device), log_level=logging.INFO)\n else:\n if device.last_error_msg:\n message = device.last_error_msg\n device.last_error_msg = None\n else:\n message = \"Connection error\"\n self.connection.log(message)\n raise ConnectionError(message)\n if device is None:\n raise ConnectionError(\"No devices\")\n return True", - "docstring": "Connect to the target device using the intermediate jumphosts." - }, - { - "code": "def _validate_and_get_value(options, options_name, key, _type):\n if isinstance(options, dict):\n has = lambda k: k in options\n get = lambda k: options[k]\n elif isinstance(options, object):\n has = lambda k: hasattr(options, k)\n get = lambda k: getattr(options, k)\n else:\n raise ImproperlyConfigured(\n '`{}` must be a dictionary-like object.'.format(options_name))\n if not has(key):\n raise ImproperlyConfigured(\n '`{}` must be specified in `{}`'.format(key, options_name))\n value = get(key)\n if not isinstance(value, _type):\n raise ImproperlyConfigured(\n '`{}` in `{}` must be a {}'.format(key, options_name, repr(_type)))\n return value", - "docstring": "Check that `options` has a value for `key` with type\n `_type`. Return that value. `options_name` is a string representing a\n human-readable name for `options` to be used when printing errors." - }, - { - "code": "def event(self, name, payload=None, coalesce=True):\n return self.connection.call(\n 'event',\n {'Name': name, 'Payload': payload, 'Coalesce': coalesce},\n expect_body=False)", - "docstring": "Send an event to the cluster. Can take an optional payload as well,\n which will be sent in the form that it's provided." - }, - { - "code": "def get_feature_set_all():\n features = get_feature_set()\n features.append('cusum')\n features.append('eta')\n features.append('n_points')\n features.append('period_SNR')\n features.append('period_log10FAP')\n features.append('period_uncertainty')\n features.append('weighted_mean')\n features.append('weighted_std')\n features.sort()\n return features", - "docstring": "Return a list of entire features.\n\n A set of entire features regardless of being used to train a model or\n predict a class.\n\n Returns\n -------\n feature_names : list\n A list of features' names." - }, - { - "code": "def fill_n(self, values, weights=None, dropna: bool = True):\n values = np.asarray(values)\n if dropna:\n values = values[~np.isnan(values)]\n if self._binning.is_adaptive():\n map = self._binning.force_bin_existence(values)\n self._reshape_data(self._binning.bin_count, map)\n if weights:\n weights = np.asarray(weights)\n self._coerce_dtype(weights.dtype)\n (frequencies, errors2, underflow, overflow, stats) = \\\n calculate_frequencies(values, self._binning, dtype=self.dtype,\n weights=weights, validate_bins=False)\n self._frequencies += frequencies\n self._errors2 += errors2\n if self.keep_missed:\n self.underflow += underflow\n self.overflow += overflow\n if self._stats:\n for key in self._stats:\n self._stats[key] += stats.get(key, 0.0)", - "docstring": "Update histograms with a set of values.\n\n Parameters\n ----------\n values: array_like\n weights: Optional[array_like]\n drop_na: Optional[bool]\n If true (default), all nan's are skipped." - }, - { - "code": "def my_init(self):\n self._start_time = time.time()\n self._stats = {}\n self._stats_lock = threading.Lock()", - "docstring": "Method automatically called from base class constructor." - }, - { - "code": "def is_connected(self):\n return all([r.is_connected() for r in dict.values(self.children)])", - "docstring": "Indication of the connection state of all children" - }, - { - "code": "def is_time(self):\n dt = DATA_TYPES['time']\n if type(self.data) is dt['type'] and ':' in str(self.data) and str(self.data).count(':') == 2:\n date_split = str(self.data).split(':')\n h, m, s = date_split[0], date_split[1], date_split[2]\n valid_hour, valid_min, valid_sec = int(h) in HOURS, int(m) in MINUTES, int(float(s)) in SECONDS\n if all(i is True for i in (valid_hour, valid_min, valid_sec)):\n self.type = 'time'.upper()\n self.len = None\n return True", - "docstring": "Determine if a data record is of type TIME." - }, - { - "code": "def is_reversible(P):\n import msmtools.analysis as msmana\n sets = connected_sets(P, strong=False)\n for s in sets:\n Ps = P[s, :][:, s]\n if not msmana.is_transition_matrix(Ps):\n return False\n pi = msmana.stationary_distribution(Ps)\n X = pi[:, None] * Ps\n if not np.allclose(X, X.T):\n return False\n return True", - "docstring": "Returns if P is reversible on its weakly connected sets" - }, - { - "code": "def structure_repr(self):\n ret = '{%s}' % ', '.join([str(x) for x in self.elements])\n return self._wrap_packed(ret)", - "docstring": "Return the LLVM IR for the structure representation" - }, - { - "code": "def name_check(self, original, loc, tokens):\n internal_assert(len(tokens) == 1, \"invalid name tokens\", tokens)\n if self.strict:\n self.unused_imports.discard(tokens[0])\n if tokens[0] == \"exec\":\n return self.check_py(\"3\", \"exec function\", original, loc, tokens)\n elif tokens[0].startswith(reserved_prefix):\n raise self.make_err(CoconutSyntaxError, \"variable names cannot start with reserved prefix \" + reserved_prefix, original, loc)\n else:\n return tokens[0]", - "docstring": "Check the given base name." - }, - { - "code": "def getMd5Checksum(self):\n references = sorted(\n self.getReferences(),\n key=lambda ref: ref.getMd5Checksum())\n checksums = ''.join([ref.getMd5Checksum() for ref in references])\n md5checksum = hashlib.md5(checksums).hexdigest()\n return md5checksum", - "docstring": "Returns the MD5 checksum for this reference set. This checksum is\n calculated by making a list of `Reference.md5checksum` for all\n `Reference`s in this set. We then sort this list, and take the\n MD5 hash of all the strings concatenated together." - }, - { - "code": "def find(self, location):\n try:\n content = self.store[location]\n return StringIO(content)\n except:\n reason = 'location \"%s\" not in document store' % location\n raise Exception, reason", - "docstring": "Find the specified location in the store.\n @param location: The I{location} part of a URL.\n @type location: str\n @return: An input stream to the document.\n @rtype: StringIO" - }, - { - "code": "def dateparser(self, dformat='%d/%m/%Y'):\n def dateparse(dates):\n return [pd.datetime.strptime(d, dformat) for d in dates]\n return dateparse", - "docstring": "Returns a date parser for pandas" - }, - { - "code": "def get_all_keywords(self, term_so_far='', current_dict=None):\n terms_present = {}\n if not term_so_far:\n term_so_far = ''\n if current_dict is None:\n current_dict = self.keyword_trie_dict\n for key in current_dict:\n if key == '_keyword_':\n terms_present[term_so_far] = current_dict[key]\n else:\n sub_values = self.get_all_keywords(term_so_far + key, current_dict[key])\n for key in sub_values:\n terms_present[key] = sub_values[key]\n return terms_present", - "docstring": "Recursively builds a dictionary of keywords present in the dictionary\n And the clean name mapped to those keywords.\n\n Args:\n term_so_far : string\n term built so far by adding all previous characters\n current_dict : dict\n current recursive position in dictionary\n\n Returns:\n terms_present : dict\n A map of key and value where each key is a term in the keyword_trie_dict.\n And value mapped to it is the clean name mapped to it.\n\n Examples:\n >>> keyword_processor = KeywordProcessor()\n >>> keyword_processor.add_keyword('j2ee', 'Java')\n >>> keyword_processor.add_keyword('Python', 'Python')\n >>> keyword_processor.get_all_keywords()\n >>> {'j2ee': 'Java', 'python': 'Python'}\n >>> # NOTE: for case_insensitive all keys will be lowercased." - }, - { - "code": "def list_to_array(value):\n if value == None:\n return []\n elif type(value) in [list, tuple, set]:\n return list(value)\n elif type(value) in [str]:\n return value.split(',')\n else:\n return [value]", - "docstring": "Converts value into array object with empty array as default.\n Strings with comma-delimited values are split into array of strings.\n\n :param value: the list to convert.\n\n :return: array object or empty array when value is None" - }, - { - "code": "def gf_lehmann(eig_e, eig_states, d_dag, beta, omega, d=None):\n ew = np.exp(-beta*eig_e)\n zet = ew.sum()\n G = np.zeros_like(omega)\n basis_create = np.dot(eig_states.T, d_dag.dot(eig_states))\n if d is None:\n tmat = np.square(basis_create)\n else:\n tmat = np.dot(eig_states.T, d.T.dot(eig_states))*basis_create\n tmat *= np.add.outer(ew, ew)\n gap = np.add.outer(-eig_e, eig_e)\n N = eig_e.size\n for i, j in product(range(N), range(N)):\n G += tmat[i, j] / (omega + gap[i, j])\n return G / zet", - "docstring": "Outputs the lehmann representation of the greens function\n omega has to be given, as matsubara or real frequencies" - }, - { - "code": "def setup_logging(config=None):\n print(__name__)\n if config and config.get('logging'):\n logging.config.dictConfig(config.get('logging'))\n else:\n logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',\n level=logging.DEBUG)", - "docstring": "Setup logging configuration." - }, - { - "code": "def _from_dict(cls, _dict):\n args = {}\n if 'generic' in _dict:\n args['generic'] = [\n DialogRuntimeResponseGeneric._from_dict(x)\n for x in (_dict.get('generic'))\n ]\n if 'intents' in _dict:\n args['intents'] = [\n RuntimeIntent._from_dict(x) for x in (_dict.get('intents'))\n ]\n if 'entities' in _dict:\n args['entities'] = [\n RuntimeEntity._from_dict(x) for x in (_dict.get('entities'))\n ]\n if 'actions' in _dict:\n args['actions'] = [\n DialogNodeAction._from_dict(x) for x in (_dict.get('actions'))\n ]\n if 'debug' in _dict:\n args['debug'] = MessageOutputDebug._from_dict(_dict.get('debug'))\n if 'user_defined' in _dict:\n args['user_defined'] = _dict.get('user_defined')\n return cls(**args)", - "docstring": "Initialize a MessageOutput object from a json dictionary." - }, - { - "code": "def write_values(self):\n return dict(((k, v.value) for k, v in self._inputs.items() if not v.is_secret and not v.is_empty(False)))", - "docstring": "Return the dictionary with which to write values" - }, - { - "code": "def get_catalog_query_session(self):\n if not self.supports_catalog_query():\n raise errors.Unimplemented()\n return sessions.CatalogQuerySession(runtime=self._runtime)", - "docstring": "Gets the catalog query session.\n\n return: (osid.cataloging.CatalogQuerySession) - a\n ``CatalogQuerySession``\n raise: OperationFailed - unable to complete request\n raise: Unimplemented - ``supports_catalog_query()`` is\n ``false``\n *compliance: optional -- This method must be implemented if\n ``supports_catalog_query()`` is ``true``.*" - }, - { - "code": "def run_apidoc(_):\n import better_apidoc\n better_apidoc.main([\n 'better-apidoc',\n '-t',\n os.path.join('.', '_templates'),\n '--force',\n '--no-toc',\n '--separate',\n '-o',\n os.path.join('.', 'API'),\n os.path.join('..', 'src', 'qnet'),\n ])", - "docstring": "Generage API documentation" - }, - { - "code": "def prev_period(self):\n return self.from_date(self.prev(self.lower), period=self.period)", - "docstring": "The period before this range.\n\n >>> span = PeriodRange.from_date(date(2000, 1, 1), period=\"month\")\n >>> span.prev_period()\n PeriodRange([datetime.date(1999, 12, 1),datetime.date(2000, 1, 1)))\n\n :return: A new :class:`~spans.types.PeriodRange` for the period\n before this period" - }, - { - "code": "def getDynMeth(name):\n cname, fname = name.rsplit('.', 1)\n clas = getDynLocal(cname)\n if clas is None:\n return None\n return getattr(clas, fname, None)", - "docstring": "Retrieve and return an unbound method by python path." - }, - { - "code": "def urlQueryParser(url, querydict):\n address_parse = urlparse(url)\n return urlunparse(address_parse._replace(query=urlencode(querydict)))", - "docstring": "parse a url query" - }, - { - "code": "def on_ok(self, sender):\n logger.debug(\"in on_ok with sender %s\" % sender)\n if sender == self.ion_task and not self.transfer_done:\n ion_structure = self.ion_task.get_final_structure()\n self.ioncell_task._change_structure(ion_structure)\n self.transfer_done = True\n self.ioncell_task.unlock(source_node=self)\n elif sender == self.ioncell_task and self.target_dilatmx:\n actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)\n if self.target_dilatmx < actual_dilatmx:\n self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)\n self.history.info('Converging dilatmx. Value reduce from {} to {}.'\n .format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))\n self.ioncell_task.reset_from_scratch()\n return super().on_ok(sender)", - "docstring": "This callback is called when one task reaches status S_OK.\n If sender == self.ion_task, we update the initial structure\n used by self.ioncell_task and we unlock it so that the job can be submitted." - }, - { - "code": "def get(self, uri):\n if uri.startswith('cid:'):\n head, part = self.id_dict[uri[4:]]\n return StringIO.StringIO(part.getvalue())\n if self.loc_dict.has_key(uri):\n head, part = self.loc_dict[uri]\n return StringIO.StringIO(part.getvalue())\n return None", - "docstring": "Get the content for the bodypart identified by the uri." - }, - { - "code": "def parse_name(name):\n name = name.strip()\n if not name:\n return []\n try:\n items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)]\n except KeyError as e:\n raise ValueError('Unknown x509 name field: %s' % e.args[0])\n for key, oid in NAME_OID_MAPPINGS.items():\n if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS:\n raise ValueError('Subject contains multiple \"%s\" fields' % key)\n return sort_name(items)", - "docstring": "Parses a subject string as used in OpenSSLs command line utilities.\n\n The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example\n ``/C=AT/L=Vienna/CN=example.com/emailAddress=user@example.com``. The function does its best to be lenient\n on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``,\n whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``).\n\n >>> parse_name('/CN=example.com')\n [('CN', 'example.com')]\n >>> parse_name('c=AT/l= Vienna/o=\"ex org\"/CN=example.com')\n [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')]\n\n Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted\n based on x509 name specifications regardless of the given order:\n\n >>> parse_name('L=\"Vienna / District\"/EMAILaddress=user@example.com')\n [('L', 'Vienna / District'), ('emailAddress', 'user@example.com')]\n >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT')\n True\n\n Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes,\n so strings like ``/OU=\"Org / Org Unit\"/CN=example.com`` will work as expected.\n\n >>> parse_name('L=\"Vienna / District\"/CN=example.com')\n [('L', 'Vienna / District'), ('CN', 'example.com')]\n\n But note that it's still easy to trick this function, if you really want to. The following example is\n *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's\n certainly different:\n\n >>> parse_name('L=\"Vienna \" District\"/CN=example.com')\n [('L', 'Vienna'), ('CN', 'example.com')]\n\n Examples of where this string is used are:\n\n .. code-block:: console\n\n # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com'\n # openssl x509 -in cert.pem -noout -subject -nameopt compat\n /C=AT/L=Vienna/CN=example.com" - }, - { - "code": "def _get_earliest_valid_timestamp(self, symbol, interval):\n kline = self.get_klines(\n symbol=symbol,\n interval=interval,\n limit=1,\n startTime=0,\n endTime=None\n )\n return kline[0][0]", - "docstring": "Get earliest valid open timestamp from Binance\n\n :param symbol: Name of symbol pair e.g BNBBTC\n :type symbol: str\n :param interval: Binance Kline interval\n :type interval: str\n\n :return: first valid timestamp" - }, - { - "code": "def sequence(self):\n self.open()\n seq = lvm_vg_get_seqno(self.handle)\n self.close()\n return seq", - "docstring": "Returns the volume group sequence number. This number increases\n everytime the volume group is modified." - }, - { - "code": "def ok_hash(token: str) -> bool:\n LOGGER.debug('Tails.ok_hash >>> token: %s', token)\n rv = re.match('[{}]{{42,44}}$'.format(B58), token) is not None\n LOGGER.debug('Tails.ok_hash <<< %s', rv)\n return rv", - "docstring": "Whether input token looks like a valid tails hash.\n\n :param token: candidate string\n :return: whether input token looks like a valid tails hash" - }, - { - "code": "def meta(self):\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n with open(path, 'r') as f:\n return json.load(f)", - "docstring": "Returns the contents of the meta.json file that describes important\n attributes about the dataset and modifies the behavior of the loader." - }, - { - "code": "def import_path(path):\n sys.path.insert(0, \".\")\n parts = path.split(\".\")\n module = None\n for i in range(1, len(parts)+1):\n if module is not None and hasattr(module, parts[i-1]):\n try:\n return _import_attributes(module, parts[i-1:])\n except AttributeError:\n pass\n module_path = \".\".join(parts[0:i])\n module = importlib.import_module(module_path)\n return module", - "docstring": "Imports any valid python module or attribute path as though it were a\n module\n\n :Example:\n >>> from yamlconf import import_path\n >>> from my_package.my_module.my_submodule import attribute\n >>> attribute.sub_attribute == \\\n ... import_path(\"y_package.my_module.my_submodule.attribute.sub_attribute\")\n True\n >>>\n\n :Parameters:\n path : `str`\n A valid python path that crosses modules and/or attributes" - }, - { - "code": "def askopenfile(mode=\"r\", **options):\n \"Ask for a filename to open, and returned the opened file\"\n filename = askopenfilename(**options)\n if filename:\n return open(filename, mode)\n return None", - "docstring": "Ask for a filename to open, and returned the opened file" - }, - { - "code": "def update(self, new_data, *args, **kwargs):\n self.train_set += new_data\n self.train_features = [(self.extract_features(d), c)\n for d, c in self.train_set]\n try:\n self.classifier = self.nltk_class.train(self.train_features,\n *args, **kwargs)\n except AttributeError:\n raise ValueError(\"NLTKClassifier must have a nltk_class\"\n \" variable that is not None.\")\n return True", - "docstring": "Update the classifier with new training data and re-trains the\n classifier.\n\n :param new_data: New data as a list of tuples of the form\n ``(text, label)``." - }, - { - "code": "def decode_tile_data(codec, tidx, data, data_size, stream):\n OPENJP2.opj_decode_tile_data.argtypes = [CODEC_TYPE,\n ctypes.c_uint32,\n ctypes.POINTER(ctypes.c_uint8),\n ctypes.c_uint32,\n STREAM_TYPE_P]\n OPENJP2.opj_decode_tile_data.restype = check_error\n datap = data.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8))\n OPENJP2.opj_decode_tile_data(codec,\n ctypes.c_uint32(tidx),\n datap,\n ctypes.c_uint32(data_size),\n stream)", - "docstring": "Reads tile data.\n\n Wraps the openjp2 library function opj_decode_tile_data.\n\n Parameters\n ----------\n codec : CODEC_TYPE\n The JPEG2000 codec\n tile_index : int\n The index of the tile being decoded\n data : array\n Holds a memory block into which data will be decoded.\n data_size : int\n The size of data in bytes\n stream : STREAM_TYPE_P\n The stream to decode.\n\n Raises\n ------\n RuntimeError\n If the OpenJPEG library routine opj_decode fails." - }, - { - "code": "def AutomaticShelf(table):\n if hasattr(table, '__table__'):\n table = table.__table__\n config = introspect_table(table)\n return Shelf.from_config(config, table)", - "docstring": "Given a SQLAlchemy Table, automatically generate a Shelf with metrics\n and dimensions based on its schema." - }, - { - "code": "def fix_e271(self, result):\n line_index = result['line'] - 1\n target = self.source[line_index]\n offset = result['column'] - 1\n fixed = fix_whitespace(target,\n offset=offset,\n replacement=' ')\n if fixed == target:\n return []\n else:\n self.source[line_index] = fixed", - "docstring": "Fix extraneous whitespace around keywords." - }, - { - "code": "def _get_fgbio_options(data, estimated_defaults, umi_method):\n group_opts = [\"--edits\", \"--min-map-q\"]\n cons_opts = [\"--min-input-base-quality\"]\n if umi_method != \"paired\":\n cons_opts += [\"--min-reads\", \"--max-reads\"]\n filter_opts = [\"--min-reads\", \"--min-base-quality\", \"--max-base-error-rate\"]\n defaults = {\"--min-reads\": \"1\",\n \"--max-reads\": \"100000\",\n \"--min-map-q\": \"1\",\n \"--min-base-quality\": \"13\",\n \"--max-base-error-rate\": \"0.1\",\n \"--min-input-base-quality\": \"2\",\n \"--edits\": \"1\"}\n defaults.update(estimated_defaults)\n ropts = config_utils.get_resources(\"fgbio\", data[\"config\"]).get(\"options\", [])\n assert len(ropts) % 2 == 0, \"Expect even number of options for fgbio\" % ropts\n ropts = dict(tz.partition(2, ropts))\n if \"--min-consensus-base-quality\" in ropts:\n ropts[\"--min-base-quality\"] = ropts.pop(\"--min-consensus-base-quality\")\n defaults.update(ropts)\n group_out = \" \".join([\"%s=%s\" % (x, defaults[x]) for x in group_opts])\n cons_out = \" \".join([\"%s=%s\" % (x, defaults[x]) for x in cons_opts])\n filter_out = \" \".join([\"%s=%s\" % (x, defaults[x]) for x in filter_opts])\n if umi_method != \"paired\":\n cons_out += \" --output-per-base-tags=false\"\n return group_out, cons_out, filter_out", - "docstring": "Get adjustable, through resources, or default options for fgbio." - }, - { - "code": "def next(self, task):\n uuid = str(task.uuid)\n for idx, otask in enumerate(self.tasks[:-1]):\n if otask.uuid == uuid:\n if self.tasks[idx + 1].status != 'SUCCESS':\n return self.tasks[idx + 1]\n else:\n uuid = self.tasks[idx + 1].uuid", - "docstring": "Find the next task\n\n :param kser.sequencing.task.Task task: previous task\n :return: The next task\n :rtype: kser.sequencing.task.Task or None" - }, - { - "code": "def category(self, category=None):\n if category is None:\n return int(self.url.category)\n self.url.category = str(category)", - "docstring": "If category is given, modify the URL correspondingly, return the\n current category otherwise." - }, - { - "code": "def handle(self, *args, **options):\n if 'username' in options:\n self.username = options['username']\n else:\n self.username = None\n if 'password' in options:\n self.password = options['password']\n else:\n self.password = None\n self.xml_path = options.get('xml')\n self.url = options.get('url')\n try:\n blog_index = BlogIndexPage.objects.get(\n title__icontains=options['blog_index'])\n except BlogIndexPage.DoesNotExist:\n raise CommandError(\"Incorrect blog index title - have you created it?\")\n if self.url == \"just_testing\":\n with open('test-data.json') as test_json:\n posts = json.load(test_json)\n elif self.xml_path:\n try:\n import lxml\n from blog.wp_xml_parser import XML_parser\n except ImportError as e:\n print(\"You must have lxml installed to run xml imports.\"\n \" Run `pip install lxml`.\")\n raise e\n self.xml_parser = XML_parser(self.xml_path)\n posts = self.xml_parser.get_posts_data()\n else:\n posts = self.get_posts_data(self.url)\n self.should_import_comments = options.get('import_comments')\n self.create_blog_pages(posts, blog_index)", - "docstring": "gets data from WordPress site" - }, - { - "code": "def authorize(self, request, *args, **kwargs):\n user = request.user\n if not user.is_authenticated or not user.socialaccount_set.exists():\n raise PermissionDenied()", - "docstring": "authorization logic\n raises PermissionDenied if user is not authorized" - }, - { - "code": "def ListMigrationsToProcess(migrations_root,\n current_migration_number\n ):\n migrations = []\n for m in os.listdir(migrations_root):\n if (current_migration_number is None or\n _MigrationFilenameToInt(m) > current_migration_number):\n migrations.append(m)\n return sorted(migrations, key=_MigrationFilenameToInt)", - "docstring": "Lists filenames of migrations with numbers bigger than a given one." - }, - { - "code": "def calc_q1_lz_v1(self):\n con = self.parameters.control.fastaccess\n flu = self.sequences.fluxes.fastaccess\n sta = self.sequences.states.fastaccess\n if sta.lz > 0.:\n flu.q1 = con.k4*sta.lz**(1.+con.gamma)\n else:\n flu.q1 = 0.\n sta.lz -= flu.q1", - "docstring": "Calculate the slow response of the lower zone layer.\n\n Required control parameters:\n |K4|\n |Gamma|\n\n Calculated fluxes sequence:\n |Q1|\n\n Updated state sequence:\n |LZ|\n\n Basic equations:\n :math:`\\\\frac{dLZ}{dt} = -Q1` \\n\n :math:`Q1 = \\\\Bigl \\\\lbrace\n {\n {K4 \\\\cdot LZ^{1+Gamma} \\\\ | \\\\ LZ > 0}\n \\\\atop\n {0 \\\\ | \\\\ LZ\\\\leq 0}\n }`\n\n Examples:\n\n As long as the lower zone storage is negative...\n\n >>> from hydpy.models.hland import *\n >>> parameterstep('1d')\n >>> simulationstep('12h')\n >>> k4(0.2)\n >>> gamma(0.0)\n >>> states.lz = -2.0\n >>> model.calc_q1_lz_v1()\n >>> fluxes.q1\n q1(0.0)\n >>> states.lz\n lz(-2.0)\n\n ...or zero, no slow discharge response occurs:\n\n >>> states.lz = 0.0\n >>> model.calc_q1_lz_v1()\n >>> fluxes.q1\n q1(0.0)\n >>> states.lz\n lz(0.0)\n\n For storage values above zero the linear...\n\n >>> states.lz = 2.0\n >>> model.calc_q1_lz_v1()\n >>> fluxes.q1\n q1(0.2)\n >>> states.lz\n lz(1.8)\n\n ...or nonlinear storage routing equation applies:\n\n >>> gamma(1.)\n >>> states.lz = 2.0\n >>> model.calc_q1_lz_v1()\n >>> fluxes.q1\n q1(0.4)\n >>> states.lz\n lz(1.6)\n\n Note that the assumed length of the simulation step is only a\n half day. Hence the effective value of the storage coefficient\n is not 0.2 but 0.1:\n\n >>> k4\n k4(0.2)\n >>> k4.value\n 0.1" - }, - { - "code": "def badge_svg_text(self):\n if len(self.template.split('\\n')) == 1:\n with open(self.template, mode='r') as file_handle:\n badge_text = file_handle.read()\n else:\n badge_text = self.template\n return badge_text.replace('{{ badge width }}', str(self.badge_width)) \\\n .replace('{{ font name }}', self.font_name) \\\n .replace('{{ font size }}', str(self.font_size)) \\\n .replace('{{ label }}', self.label) \\\n .replace('{{ value }}', self.value_text) \\\n .replace('{{ label anchor }}', str(self.label_anchor)) \\\n .replace('{{ label anchor shadow }}', str(self.label_anchor_shadow)) \\\n .replace('{{ value anchor }}', str(self.value_anchor)) \\\n .replace('{{ value anchor shadow }}', str(self.value_anchor_shadow)) \\\n .replace('{{ color }}', self.badge_color_code) \\\n .replace('{{ label text color }}', self.label_text_color) \\\n .replace('{{ value text color }}', self.value_text_color) \\\n .replace('{{ color split x }}', str(self.color_split_position)) \\\n .replace('{{ value width }}', str(self.badge_width - self.color_split_position))", - "docstring": "The badge SVG text." - }, - { - "code": "def search_vip_request(self, search):\n uri = 'api/v3/vip-request/?%s' % urllib.urlencode({'search': search})\n return super(ApiVipRequest, self).get(uri)", - "docstring": "Method to list vip request\n\n param search: search" - }, - { - "code": "def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(_LogicleLocator(self._transform))\n axis.set_minor_locator(_LogicleLocator(self._transform,\n subs=np.arange(2.0, 10.)))\n axis.set_major_formatter(matplotlib.ticker.LogFormatterSciNotation(\n labelOnlyBase=True))", - "docstring": "Set up the locators and formatters for the scale.\n\n Parameters\n ----------\n axis: matplotlib.axis\n Axis for which to set locators and formatters." - }, - { - "code": "def from_csv(cls, path):\n with open(path) as f:\n fields = map(float, next(f).split(','))\n if len(fields) == 3:\n return u.Quantity([[fields[0], 0, 0],\n [0, fields[1], 0],\n [0, 0, fields[2]]], unit=u.nanometers)\n elif len(fields) == 9:\n return u.Quantity([fields[0:3],\n fields[3:6],\n fields[6:9]], unit=u.nanometers)\n else:\n raise ValueError('This type of CSV is not supported. Please '\n 'provide a comma-separated list of three or nine '\n 'floats in a single-line file.')", - "docstring": "Get box vectors from comma-separated values in file `path`.\n\n The csv file must containt only one line, which in turn can contain\n three values (orthogonal vectors) or nine values (triclinic box).\n\n The values should be in nanometers.\n\n Parameters\n ----------\n path : str\n Path to CSV file\n\n Returns\n -------\n vectors : simtk.unit.Quantity([3, 3], unit=nanometers" - }, - { - "code": "def _sign_input(cls, input_, message, key_pairs):\n if isinstance(input_.fulfillment, Ed25519Sha256):\n return cls._sign_simple_signature_fulfillment(input_, message,\n key_pairs)\n elif isinstance(input_.fulfillment, ThresholdSha256):\n return cls._sign_threshold_signature_fulfillment(input_, message,\n key_pairs)\n else:\n raise ValueError(\"Fulfillment couldn't be matched to \"\n 'Cryptocondition fulfillment type.')", - "docstring": "Signs a single Input.\n\n Note:\n This method works only for the following Cryptoconditions\n currently:\n - Ed25519Fulfillment\n - ThresholdSha256.\n\n Args:\n input_ (:class:`~bigchaindb.common.transaction.\n Input`) The Input to be signed.\n message (str): The message to be signed\n key_pairs (dict): The keys to sign the Transaction with." - }, - { - "code": "def in_book_search(request):\n results = {}\n args = request.matchdict\n ident_hash = args['ident_hash']\n args['search_term'] = request.params.get('q', '')\n query_type = request.params.get('query_type', '')\n combiner = ''\n if query_type:\n if query_type.lower() == 'or':\n combiner = '_or'\n id, version = split_ident_hash(ident_hash)\n args['uuid'] = id\n args['version'] = version\n with db_connect() as db_connection:\n with db_connection.cursor() as cursor:\n cursor.execute(SQL['get-collated-state'], args)\n res = cursor.fetchall()\n if res and res[0][0]:\n statement = SQL['get-in-collated-book-search']\n else:\n statement = SQL['get-in-book-search']\n cursor.execute(statement.format(combiner=combiner), args)\n res = cursor.fetchall()\n results['results'] = {'query': [],\n 'total': len(res),\n 'items': []}\n results['results']['query'] = {\n 'id': ident_hash,\n 'search_term': args['search_term'],\n }\n for uuid, version, title, snippet, matches, rank in res:\n results['results']['items'].append({\n 'rank': '{}'.format(rank),\n 'id': '{}@{}'.format(uuid, version),\n 'title': '{}'.format(title),\n 'snippet': '{}'.format(snippet),\n 'matches': '{}'.format(matches),\n })\n resp = request.response\n resp.status = '200 OK'\n resp.content_type = 'application/json'\n resp.body = json.dumps(results)\n return resp", - "docstring": "Full text, in-book search." - }, - { - "code": "def ParseApplicationUsageRow(\n self, parser_mediator, query, row, **unused_kwargs):\n query_hash = hash(query)\n application_name = self._GetRowValue(query_hash, row, 'event')\n usage = 'Application {0:s}'.format(application_name)\n event_data = MacOSApplicationUsageEventData()\n event_data.application = self._GetRowValue(query_hash, row, 'app_path')\n event_data.app_version = self._GetRowValue(query_hash, row, 'app_version')\n event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id')\n event_data.count = self._GetRowValue(query_hash, row, 'number_times')\n event_data.query = query\n timestamp = self._GetRowValue(query_hash, row, 'last_time')\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(date_time, usage)\n parser_mediator.ProduceEventWithEventData(event, event_data)", - "docstring": "Parses an application usage row.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n query (str): query that created the row.\n row (sqlite3.Row): row." - }, - { - "code": "def git_handler(unused_build_context, target, fetch, package_dir, tar):\n target_name = split_name(target.name)\n repo_dir = join(package_dir, fetch.name) if fetch.name else package_dir\n try:\n repo = git.Repo(repo_dir)\n except (InvalidGitRepositoryError, NoSuchPathError):\n repo = git.Repo.clone_from(fetch.uri, repo_dir)\n assert repo.working_tree_dir == repo_dir\n tar.add(package_dir, arcname=target_name, filter=gitfilter)", - "docstring": "Handle remote Git repository URI.\n\n Clone the repository under the private builder workspace (unless already\n cloned), and add it to the package tar (filtering out git internals).\n\n TODO(itamar): Support branches / tags / specific commit hashes\n TODO(itamar): Support updating a cloned repository\n TODO(itamar): Handle submodules?\n TODO(itamar): Handle force pulls?" - }, - { - "code": "def verify_order(self, **kwargs):\n create_options = self._generate_create_dict(**kwargs)\n return self.client['Product_Order'].verifyOrder(create_options)", - "docstring": "Verifies an order for a piece of hardware.\n\n See :func:`place_order` for a list of available options." - }, - { - "code": "def del_group(self):\n idx = self.tabs.currentIndex()\n self.tabs.removeTab(idx)\n self.apply()", - "docstring": "Delete current group." - }, - { - "code": "def get_outliers(self):\n log.info(\"Clipping outliers...\")\n log.info('Iter %d/%d: %d outliers' %\n (0, self.oiter, len(self.outmask)))\n def M(x): return np.delete(x, np.concatenate(\n [self.nanmask, self.badmask, self.transitmask]), axis=0)\n t = M(self.time)\n outmask = [np.array([-1]), np.array(self.outmask)]\n while not np.array_equal(outmask[-2], outmask[-1]):\n if len(outmask) - 1 > self.oiter:\n log.error('Maximum number of iterations in ' +\n '``get_outliers()`` exceeded. Skipping...')\n break\n if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]):\n log.error('Function ``get_outliers()`` ' +\n 'is going in circles. Skipping...')\n break\n self.compute()\n f = SavGol(M(self.flux))\n med = np.nanmedian(f)\n MAD = 1.4826 * np.nanmedian(np.abs(f - med))\n inds = np.where((f > med + self.osigma * MAD) |\n (f < med - self.osigma * MAD))[0]\n inds = np.array([np.argmax(self.time == t[i]) for i in inds])\n self.outmask = np.array(inds, dtype=int)\n outmask.append(np.array(inds))\n log.info('Iter %d/%d: %d outliers' %\n (len(outmask) - 2, self.oiter, len(self.outmask)))", - "docstring": "Performs iterative sigma clipping to get outliers." - }, - { - "code": "def iter_all_repos(self, number=-1, since=None, etag=None, per_page=None):\n url = self._build_url('repositories')\n return self._iter(int(number), url, Repository,\n params={'since': since, 'per_page': per_page},\n etag=etag)", - "docstring": "Iterate over every repository in the order they were created.\n\n :param int number: (optional), number of repositories to return.\n Default: -1, returns all of them\n :param int since: (optional), last repository id seen (allows\n restarting this iteration)\n :param str etag: (optional), ETag from a previous request to the same\n endpoint\n :param int per_page: (optional), number of repositories to list per\n request\n :returns: generator of :class:`Repository `" - }, - { - "code": "def list_pp(ll, separator='|', header_line=True, autonumber=True):\n if autonumber:\n for cnt, i in enumerate(ll):\n i.insert(0, cnt if cnt > 0 or not header_line else '\n def lenlst(l):\n return [len(str(i)) for i in l]\n lst_len = [lenlst(i) for i in ll]\n lst_rot = zip(*lst_len[::-1])\n lst_len = [max(i) for i in lst_rot]\n frmt = separator + separator.join([\"{!s:\"+str(i)+\"}\" for i in lst_len]) + separator\n if header_line:\n header_line = '-' * len(frmt.format(*ll[0]))\n for cnt, l in enumerate(ll):\n if cnt < 2 and header_line:\n print(header_line)\n print(frmt.format(*l))\n if header_line:\n print(header_line)\n return lst_len", - "docstring": "pretty print list of lists ll" - }, - { - "code": "def add_frame_widget(self, ref, left=1, top=1, right=20, bottom=1, width=20, height=4, direction=\"h\", speed=1):\n if ref not in self.widgets:\n widget = widgets.FrameWidget(\n screen=self, ref=ref, left=left, top=top, right=right, bottom=bottom, width=width, height=height,\n direction=direction, speed=speed,\n )\n self.widgets[ref] = widget\n return self.widgets[ref]", - "docstring": "Add Frame Widget" - }, - { - "code": "def alias_absent(name, index):\n ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}\n try:\n alias = __salt__['elasticsearch.alias_get'](aliases=name, indices=index)\n if alias and alias.get(index, {}).get(\"aliases\", {}).get(name, None) is not None:\n if __opts__['test']:\n ret['comment'] = 'Alias {0} for index {1} will be removed'.format(name, index)\n ret['changes']['old'] = alias.get(index, {}).get(\"aliases\", {}).get(name, {})\n ret['result'] = None\n else:\n ret['result'] = __salt__['elasticsearch.alias_delete'](aliases=name, indices=index)\n if ret['result']:\n ret['comment'] = 'Successfully removed alias {0} for index {1}'.format(name, index)\n ret['changes']['old'] = alias.get(index, {}).get(\"aliases\", {}).get(name, {})\n else:\n ret['comment'] = 'Failed to remove alias {0} for index {1} for unknown reasons'.format(name, index)\n else:\n ret['comment'] = 'Alias {0} for index {1} is already absent'.format(name, index)\n except Exception as err:\n ret['result'] = False\n ret['comment'] = six.text_type(err)\n return ret", - "docstring": "Ensure that the index alias is absent.\n\n name\n Name of the index alias to remove\n index\n Name of the index for the alias" - }, - { - "code": "def get_cell_ngrams(mention, attrib=\"words\", n_min=1, n_max=1, lower=True):\n spans = _to_spans(mention)\n for span in spans:\n for ngram in get_sentence_ngrams(\n span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower\n ):\n yield ngram\n if span.sentence.is_tabular():\n for ngram in chain.from_iterable(\n [\n tokens_to_ngrams(\n getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower\n )\n for sentence in _get_table_cells(span.sentence.table)[\n span.sentence.cell\n ]\n if sentence != span.sentence\n ]\n ):\n yield ngram", - "docstring": "Get the ngrams that are in the Cell of the given mention, not including itself.\n\n Note that if a candidate is passed in, all of its Mentions will be searched.\n\n :param mention: The Mention whose Cell is being searched\n :param attrib: The token attribute type (e.g. words, lemmas, poses)\n :param n_min: The minimum n of the ngrams that should be returned\n :param n_max: The maximum n of the ngrams that should be returned\n :param lower: If True, all ngrams will be returned in lower case\n :rtype: a *generator* of ngrams" - }, - { - "code": "def publish(endpoint, purge_files, rebuild_manifest, skip_upload):\n print(\"Publishing site to %s ...\" % endpoint.upper())\n yass = Yass(CWD)\n target = endpoint.lower()\n sitename = yass.sitename\n if not sitename:\n raise ValueError(\"Missing site name\")\n endpoint = yass.config.get(\"hosting.%s\" % target)\n if not endpoint:\n raise ValueError(\"%s endpoint is missing in the config\" % target.upper())\n if target == \"s3\":\n p = publisher.S3Website(sitename=sitename,\n aws_access_key_id=endpoint.get(\"aws_access_key_id\"),\n aws_secret_access_key=endpoint.get(\"aws_secret_access_key\"),\n region=endpoint.get(\"aws_region\"))\n if not p.website_exists:\n print(\">>>\")\n print(\"Setting S3 site...\")\n if p.create_website() is True:\n time.sleep(10)\n p.create_www_website()\n print(\"New bucket created: %s\" % p.sitename)\n if rebuild_manifest:\n print(\">>>\")\n print(\"Rebuilding site's manifest...\")\n p.create_manifest_from_s3_files()\n if purge_files is True or endpoint.get(\"purge_files\") is True:\n print(\">>>\")\n print(\"Purging files...\")\n exclude_files = endpoint.get(\"purge_exclude_files\", [])\n p.purge_files(exclude_files=exclude_files)\n if not skip_upload:\n print(\">>>\")\n print(\"Uploading your site...\")\n p.upload(yass.build_dir)\n else:\n print(\">>>\")\n print(\"WARNING: files upload was skipped because of the use of --skip-upload\")\n print(\"\")\n print(\"Yass! Your site has been successfully published to: \")\n print(p.website_endpoint_url)\n footer()", - "docstring": "Publish the site" - }, - { - "code": "def close(self):\n if self._env is None:\n raise ValueError('env has already been closed.')\n _LIB.Close(self._env)\n self._env = None\n if self.viewer is not None:\n self.viewer.close()", - "docstring": "Close the environment." - }, - { - "code": "def log_transition(self, transition, from_state, instance, *args, **kwargs):\n save = kwargs.pop('save', True)\n log = kwargs.pop('log', True)\n super(Workflow, self).log_transition(\n transition, from_state, instance, *args, **kwargs)\n if save:\n instance.save()\n if log:\n self.db_log(transition, from_state, instance, *args, **kwargs)", - "docstring": "Generic transition logging." - }, - { - "code": "def _get_initial_binary_name(self):\n binary_name = None\n if self.project and self.is_simprocedure and not self.is_syscall:\n hooker = self.project.hooked_by(self.addr)\n if hooker is not None:\n binary_name = hooker.library_name\n if binary_name is None and self.binary is not None:\n binary_name = os.path.basename(self.binary.binary)\n return binary_name", - "docstring": "Determine the name of the binary where this function is.\n\n :return: None" - }, - { - "code": "def create_slug(title, plain_len=None):\n if plain_len: title = title[:plain_len]\n pass1 = OMIT_FROM_SLUG_PAT.sub('_', title).lower()\n return NORMALIZE_UNDERSCORES_PAT.sub('_', pass1)", - "docstring": "Tries to create a slug from a title, trading off collision risk with readability and minimized cruft\n\n title - a unicode object with a title to use as basis of the slug\n plain_len - the maximum character length preserved (from the beginning) of the title\n\n >>> from versa.contrib.datachefids import create_slug\n >>> create_slug(u\"The quick brown fox jumps over the lazy dog\")\n 'the_quick_brown_fox_jumps_over_the_lazy_dog'\n >>> create_slug(u\"The quick brown fox jumps over the lazy dog\", 20)\n 'the_quick_brown_fox'" - }, - { - "code": "def list_roles(self, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/auth/{mount_point}/roles'.format(mount_point=mount_point)\n response = self._adapter.list(\n url=api_path\n )\n return response.json().get('data')", - "docstring": "List all the roles that are registered with the plugin.\n\n Supported methods:\n LIST: /auth/{mount_point}/roles. Produces: 200 application/json\n\n\n :param mount_point: The \"path\" the azure auth method was mounted on.\n :type mount_point: str | unicode\n :return: The \"data\" key from the JSON response of the request.\n :rtype: dict" - }, - { - "code": "def NormalizeRelativePath(path):\n path_components = path.split('/')\n normalized_components = []\n for component in path_components:\n if re.match(r'{[A-Za-z0-9_]+}$', component):\n normalized_components.append(\n '{%s}' % Names.CleanName(component[1:-1]))\n else:\n normalized_components.append(component)\n return '/'.join(normalized_components)", - "docstring": "Normalize camelCase entries in path." - }, - { - "code": "def attach_attachment(self, analysis, attachment):\n if not attachment:\n return\n if isinstance(attachment, list):\n for attach in attachment:\n self.attach_attachment(analysis, attach)\n return\n an_atts = analysis.getAttachment()\n atts_filenames = [att.getAttachmentFile().filename for att in an_atts]\n if attachment.getAttachmentFile().filename not in atts_filenames:\n an_atts.append(attachment)\n logger.info(\n \"Attaching %s to %s\" % (attachment.UID(), analysis))\n analysis.setAttachment([att.UID() for att in an_atts])\n analysis.reindexObject()\n else:\n self.warn(\"Attachment %s was not linked to analysis %s\" %\n (attachment.UID(), analysis))", - "docstring": "Attach a file or a given set of files to an analysis\n\n :param analysis: analysis where the files are to be attached\n :param attachment: files to be attached. This can be either a\n single file or a list of files\n :return: None" - }, - { - "code": "def get_from_import(self, resource, name):\n module_name = libutils.modname(resource)\n names = []\n if isinstance(name, list):\n names = [(imported, None) for imported in name]\n else:\n names = [(name, None), ]\n return FromImport(module_name, 0, tuple(names))", - "docstring": "The from import statement for `name` in `resource`" - }, - { - "code": "def get_network_interface_id(name, region=None, key=None, keyid=None,\n profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n r = {}\n try:\n enis = conn.get_all_network_interfaces(filters={'tag:Name': name})\n if not enis:\n r['error'] = {'message': 'No ENIs found.'}\n elif len(enis) > 1:\n r['error'] = {'message': 'Name specified is tagged on multiple ENIs.'}\n else:\n eni = enis[0]\n r['result'] = eni.id\n except boto.exception.EC2ResponseError as e:\n r['error'] = __utils__['boto.get_error'](e)\n return r", - "docstring": "Get an Elastic Network Interface id from its name tag.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion boto_ec2.get_network_interface_id name=my_eni" - }, - { - "code": "def map2matrix(data_map, layout):\n r\n layout = np.array(layout)\n n_obj = np.prod(layout)\n image_shape = (np.array(data_map.shape) // layout)[0]\n data_matrix = []\n for i in range(n_obj):\n lower = (image_shape * (i // layout[1]),\n image_shape * (i % layout[1]))\n upper = (image_shape * (i // layout[1] + 1),\n image_shape * (i % layout[1] + 1))\n data_matrix.append((data_map[lower[0]:upper[0],\n lower[1]:upper[1]]).reshape(image_shape ** 2))\n return np.array(data_matrix).T", - "docstring": "r\"\"\"Map to Matrix\n\n This method transforms a 2D map to a 2D matrix\n\n Parameters\n ----------\n data_map : np.ndarray\n Input data map, 2D array\n layout : tuple\n 2D layout of 2D images\n\n Returns\n -------\n np.ndarray 2D matrix\n\n Raises\n ------\n ValueError\n For invalid layout\n\n Examples\n --------\n >>> from modopt.base.transform import map2matrix\n >>> a = np.array([[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13],\n [10, 11, 14, 15]])\n >>> map2matrix(a, (2, 2))\n array([[ 0, 4, 8, 12],\n [ 1, 5, 9, 13],\n [ 2, 6, 10, 14],\n [ 3, 7, 11, 15]])" - }, - { - "code": "def module_reload_changed(key):\n imp.acquire_lock()\n try:\n modkey = module_sys_modules_key(key)\n if not modkey:\n return False\n found = None\n if modkey:\n for second in WatchList:\n secmodkey = module_sys_modules_key(second)\n if secmodkey and sys.modules[modkey] == sys.modules[secmodkey]:\n found = second\n foundmodkey = secmodkey\n break\n if not found:\n return\n filemtime = module_getmtime(WatchList[found][\"file\"])\n filemtime = latest_submodule_time(found, filemtime)\n if filemtime > WatchList[found][\"time\"]:\n tangelo.log(\"Reloaded %s\" % found)\n reload_including_local(sys.modules[foundmodkey])\n for second in WatchList:\n if WatchList[second][\"file\"] == WatchList[found][\"file\"]:\n WatchList[second][\"time\"] = filemtime\n finally:\n imp.release_lock()\n return True", - "docstring": "Reload a module if it has changed since we last imported it. This is\n necessary if module a imports script b, script b is changed, and then\n module c asks to import script b.\n\n :param key: our key used in the WatchList.\n :returns: True if reloaded." - }, - { - "code": "def CreateApproval(self,\n reason=None,\n notified_users=None,\n email_cc_addresses=None):\n if not reason:\n raise ValueError(\"reason can't be empty\")\n if not notified_users:\n raise ValueError(\"notified_users list can't be empty.\")\n approval = user_pb2.ApiHuntApproval(\n reason=reason,\n notified_users=notified_users,\n email_cc_addresses=email_cc_addresses or [])\n args = user_pb2.ApiCreateHuntApprovalArgs(\n hunt_id=self.hunt_id, approval=approval)\n data = self._context.SendRequest(\"CreateHuntApproval\", args)\n return HuntApproval(\n data=data, username=self._context.username, context=self._context)", - "docstring": "Create a new approval for the current user to access this hunt." - }, - { - "code": "def transform_32_33(inst, new_inst, i, n, offset,\n instructions, new_asm):\n add_size = xdis.op_size(new_inst.opcode, opcode_33)\n if inst.opname in ('MAKE_FUNCTION','MAKE_CLOSURE'):\n prev_inst = instructions[i-1]\n assert prev_inst.opname == 'LOAD_CONST'\n assert isinstance(prev_inst.arg, int)\n load_fn_const = Instruction()\n load_fn_const.opname = 'LOAD_CONST'\n load_fn_const.opcode = opcode_33.opmap['LOAD_CONST']\n load_fn_const.line_no = None\n prev_const = new_asm.code.co_consts[prev_inst.arg]\n if hasattr(prev_const, 'co_name'):\n fn_name = new_asm.code.co_consts[prev_inst.arg].co_name\n else:\n fn_name = 'what-is-up'\n const_index = len(new_asm.code.co_consts)\n new_asm.code.co_consts = list(new_asm.code.co_consts)\n new_asm.code.co_consts.append(fn_name)\n load_fn_const.arg = const_index\n load_fn_const.offset = offset\n load_fn_const.starts_line = False\n load_fn_const.is_jump_target = False\n new_asm.code.instructions.append(load_fn_const)\n load_const_size = xdis.op_size(load_fn_const.opcode, opcode_33)\n add_size += load_const_size\n new_inst.offset = offset + add_size\n pass\n return add_size", - "docstring": "MAKEFUNCTION adds another const. probably MAKECLASS as well" - }, - { - "code": "def _post_document_batch(self, batch):\n target_batch = '/2013-01-01/documents/batch'\n url = self.endpoint_url + target_batch\n return requests.post(url, data=batch, headers={'Content-type': 'application/json'})", - "docstring": "Send a batch to Cloudsearch endpoint\n\n See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html" - }, - { - "code": "def event_date(self, event_date):\n self._group_data['eventDate'] = self._utils.format_datetime(\n event_date, date_format='%Y-%m-%dT%H:%M:%SZ'\n )", - "docstring": "Set the Events \"event date\" value." - }, - { - "code": "def connection_made(self, transport):\n self._transport = transport\n self._raw_transport = transport\n if isinstance(transport, asyncio.SubprocessTransport):\n self._transport = transport.get_pipe_transport(0)", - "docstring": "Used to signal `asyncio.Protocol` of a successful connection." - }, - { - "code": "def v1_subfolder_list(request, response, kvlclient, fid):\n fid = urllib.unquote(fid)\n try:\n return sorted(imap(attrgetter('name'),\n ifilter(lambda it: it.is_folder(),\n new_folders(kvlclient, request).list(fid))))\n except KeyError:\n response.status = 404\n return []", - "docstring": "Retrieves a list of subfolders in a folder for the current user.\n\n The route for this endpoint is:\n ``GET /dossier/v1/folder//subfolder``.\n\n (Temporarily, the \"current user\" can be set via the\n ``annotator_id`` query parameter.)\n\n The payload returned is a list of subfolder identifiers." - }, - { - "code": "def suppress_unifurcations(self):\n q = deque(); q.append(self.root)\n while len(q) != 0:\n node = q.popleft()\n if len(node.children) != 1:\n q.extend(node.children); continue\n child = node.children.pop()\n if node.is_root():\n self.root = child; child.parent = None\n else:\n parent = node.parent; parent.remove_child(node); parent.add_child(child)\n if node.edge_length is not None:\n if child.edge_length is None:\n child.edge_length = 0\n child.edge_length += node.edge_length\n if child.label is None and node.label is not None:\n child.label = node.label\n q.append(child)", - "docstring": "Remove all nodes with only one child and directly attach child to parent" - }, - { - "code": "def quantile_binning(data=None, bins=10, *, qrange=(0.0, 1.0), **kwargs) -> StaticBinning:\n if np.isscalar(bins):\n bins = np.linspace(qrange[0] * 100, qrange[1] * 100, bins + 1)\n bins = np.percentile(data, bins)\n return static_binning(bins=make_bin_array(bins), includes_right_edge=True)", - "docstring": "Binning schema based on quantile ranges.\n\n This binning finds equally spaced quantiles. This should lead to\n all bins having roughly the same frequencies.\n\n Note: weights are not (yet) take into account for calculating\n quantiles.\n\n Parameters\n ----------\n bins: sequence or Optional[int]\n Number of bins\n qrange: Optional[tuple]\n Two floats as minimum and maximum quantile (default: 0.0, 1.0)\n\n Returns\n -------\n StaticBinning" - }, - { - "code": "def decorate(self, func, limit, ttl, *anoop, **kwnoop):\n return super(ratelimit, self).decorate(func, limit, ttl, *anoop, **kwnoop)", - "docstring": "make limit and ttl required" - }, - { - "code": "def verify_keys(self):\n verify_keys_endpoint = Template(\"${rest_root}/site/${public_key}\")\n url = verify_keys_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key)\n data = { \"clientName\": \"mollom_python\", \"clientVersion\": \"1.0\" }\n self._client.headers[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n response = self._client.post(url, data, timeout=self._timeout)\n if response.status_code != 200:\n raise MollomAuthenticationError\n return True", - "docstring": "Verify that the public and private key combination is valid; raises MollomAuthenticationError otherwise" - }, - { - "code": "def report_device_status(self, mode):\n if mode == 5:\n self.write_process_input(ctrl.CSI + \"0n\")\n elif mode == 6:\n x = self.cursor.x + 1\n y = self.cursor.y + 1\n if mo.DECOM in self.mode:\n y -= self.margins.top\n self.write_process_input(ctrl.CSI + \"{0};{1}R\".format(y, x))", - "docstring": "Report terminal status or cursor position.\n\n :param int mode: if 5 -- terminal status, 6 -- cursor position,\n otherwise a noop.\n\n .. versionadded:: 0.5.0" - }, - { - "code": "def find_element(self, name, type=ElementType.ANY):\n for e in self.e_list:\n if type.value and not e['elementType'] == type:\n continue\n if e[\"name\"] == name:\n uri = self.uri\n uri.eid = e[\"id\"]\n return uri", - "docstring": "Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.\n\n Args:\n name: str\n the name of the element.\n\n Returns:\n - onshapepy.uri of the element" - }, - { - "code": "def _add_node(self, node, depth):\n self._topmost_node.add_child(node, bool(depth[1]))\n self._stack.append((depth, node))", - "docstring": "Add a node to the graph, and the stack." - }, - { - "code": "def iter_non_intersecting(self, iterable, key=None, descending=False):\n return _ContainsVersionIterator(self, iterable, key, descending,\n mode=_ContainsVersionIterator.MODE_NON_INTERSECTING)", - "docstring": "Like `iter_intersect_test`, but returns non-intersections only.\n\n Returns:\n An iterator that returns items from `iterable` that don't intersect." - }, - { - "code": "def _second(self):\n self._second_one_loop()\n A = self._A\n if A[2, 1] == 0:\n return True\n elif A[2, 1] % A[1, 1] == 0:\n self._second_finalize()\n self._Ps += self._L\n self._L = []\n return True\n else:\n return False", - "docstring": "Find Smith normal form for Right-low 2x2 matrix" - }, - { - "code": "def wireshark(pktlist, wait=False, **kwargs):\n return tcpdump(pktlist, prog=conf.prog.wireshark, wait=wait, **kwargs)", - "docstring": "Runs Wireshark on a list of packets.\n\n See :func:`tcpdump` for more parameter description.\n\n Note: this defaults to wait=False, to run Wireshark in the background." - }, - { - "code": "def act(self):\n g = get_root(self).globals\n fname = filedialog.askopenfilename(\n defaultextension='.json',\n filetypes=[('json files', '.json'), ('fits files', '.fits')],\n initialdir=g.cpars['app_directory'])\n if not fname:\n g.clog.warn('Aborted load from disk')\n return False\n if fname.endswith('.json'):\n with open(fname) as ifname:\n json_string = ifname.read()\n else:\n json_string = jsonFromFits(fname)\n g.ipars.loadJSON(json_string)\n g.rpars.loadJSON(json_string)\n return True", - "docstring": "Carries out the action associated with the Load button" - }, - { - "code": "def sync_with_prompt_toolkit(self):\n self.editor_layout.update()\n window = self.window_arrangement.active_pt_window\n if window:\n self.application.layout.focus(window)", - "docstring": "Update the prompt-toolkit Layout and FocusStack." - }, - { - "code": "def row2dict(row, depth=None, exclude=None, exclude_pk=None,\n exclude_underscore=None, only=None, fk_suffix=None):\n if depth == 0:\n return None\n d, mapper = {}, get_mapper(row)\n if depth is None:\n depth = getattr(row, ATTR_DEPTH, DEFAULT_DEPTH) - 1\n else:\n depth -= 1\n if exclude is None:\n exclude = getattr(row, ATTR_EXCLUDE, DEFAULT_EXCLUDE)\n if exclude_pk is None:\n exclude_pk = getattr(row, ATTR_EXCLUDE_PK, DEFAULT_EXCLUDE_PK)\n if exclude_underscore is None:\n exclude_underscore = getattr(row, ATTR_EXCLUDE_UNDERSCORE,\n DEFAULT_EXCLUDE_UNDERSCORE)\n if only is None:\n only = getattr(row, ATTR_ONLY, DEFAULT_ONLY)\n if fk_suffix is None:\n fk_suffix = getattr(row, ATTR_FK_SUFFIX, DEFAULT_FK_SUFFIX)\n for c in mapper.columns.keys() + mapper.synonyms.keys():\n if c in exclude or \\\n check_exclude_pk(c, exclude_pk, fk_suffix=fk_suffix) or \\\n check_exclude_underscore(c, exclude_underscore) or \\\n check_only(c, only):\n continue\n d[c] = getattr(row, c)\n for r in mapper.relationships.keys():\n if r in exclude or check_only(r, only):\n continue\n attr = getattr(row, r)\n backref = get_backref(mapper.relationships[r])\n if backref:\n exclude.add(backref)\n kwargs = dict(depth=depth, exclude=exclude, exclude_pk=exclude_pk,\n exclude_underscore=exclude_underscore, only=only,\n fk_suffix=fk_suffix)\n if isinstance(attr, collections.InstrumentedList):\n d[r] = [row2dict(i, **kwargs) for i in attr if depth]\n else:\n d[r] = row2dict(attr, **kwargs)\n return d", - "docstring": "Recursively walk row attributes to serialize ones into a dict.\n\n :param row: instance of the declarative base class\n :param depth: number that represent the depth of related relationships\n :param exclude: set of attributes names to exclude\n :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded\n :param exclude_underscore: are private and protected attributes excluded\n :param only: set of attributes names to include\n :param fk_suffix: str that represent a foreign key suffix\n\n :return: dict with attributes of current depth level" - }, - { - "code": "def get_network_by_id(self, network_id: int) -> Network:\n return self.session.query(Network).get(network_id)", - "docstring": "Get a network from the database by its identifier." - }, - { - "code": "def reduce_number(num):\n parts = str(num).split(\".\")\n if len(parts) == 1 or parts[1] == \"0\":\n return int(parts[0])\n else:\n match = _REPEATING_NUMBER_TRIM_RE.search(parts[1])\n if match:\n from_index, _ = match.span()\n if from_index == 0 and match.group(2) == \"0\":\n return int(parts[0])\n else:\n return Decimal(parts[0] + \".\" + parts[1][:from_index] + match.group(2))\n else:\n return num", - "docstring": "Reduces the string representation of a number.\n\n If the number is of the format n.00..., returns n.\n If the decimal portion of the number has a repeating decimal, followed by up to two trailing\n numbers, such as:\n\n 0.3333333\n\n or\n\n 0.343434346\n\n It will return just one instance of the repeating decimals:\n\n 0.3\n\n or\n\n 0.34" - }, - { - "code": "def import_identities(self, parser, matching=None, match_new=False,\n no_strict_matching=False,\n reset=False, verbose=False):\n matcher = None\n if matching:\n strict = not no_strict_matching\n try:\n blacklist = api.blacklist(self.db)\n matcher = create_identity_matcher(matching, blacklist, strict=strict)\n except MatcherNotSupportedError as e:\n self.error(str(e))\n return e.code\n uidentities = parser.identities\n try:\n self.__load_unique_identities(uidentities, matcher, match_new,\n reset, verbose)\n except LoadError as e:\n self.error(str(e))\n return e.code\n return CMD_SUCCESS", - "docstring": "Import identities information on the registry.\n\n New unique identities, organizations and enrollment data parsed\n by 'parser' will be added to the registry.\n\n Optionally, this method can look for possible identities that match with\n the new one to insert using 'matching' method. If a match is found,\n that means both identities are likely the same. Therefore, both identities\n would be merged into one. The 'match_new' parameter can be set to match\n and merge only new loaded identities. Rigorous validation of mathching\n values (i.e, well formed email addresses) will be disabled when\n is set to to `True`.\n\n When `reset` is set, relationships and enrollments will be removed\n before loading any data.\n\n :param parser: sorting hat parser\n :param matching: type of matching used to merge existing identities\n :param match_new: match and merge only the new loaded identities\n :param no_strict_matching: disable strict matching (i.e, well-formed email addresses)\n :param reset: remove relationships and enrollments before loading data\n :param verbose: run in verbose mode when matching is set" - }, - { - "code": "def set_member_roles(self, guild_id: int, member_id: int, roles: List[int]):\n self._query(f'guilds/{guild_id}/members/{member_id}', 'PATCH', {'roles': roles}, expected_status=204)", - "docstring": "Set the member's roles\n\n This method takes a list of **role ids** that you want the user to have. This\n method will **overwrite** all of the user's current roles with the roles in\n the passed list of roles.\n\n When calling this method, be sure that the list of roles that you're setting\n for this user is complete, not just the roles that you want to add or remove.\n For assistance in just adding or just removing roles, set the ``add_member_roles``\n and ``remove_member_roles`` methods.\n\n Args:\n guild_id: snowflake id of the guild\n member_id: snowflake id of the member\n roles: list of snowflake ids of roles to set" - }, - { - "code": "def b58check_encode(bin_s, version_byte=0):\n bin_s = chr(int(version_byte)) + bin_s\n num_leading_zeros = len(re.match(r'^\\x00*', bin_s).group(0))\n bin_s = bin_s + bin_checksum(bin_s)\n hex_s = hexlify(bin_s)\n b58_s = change_charset(hex_s, HEX_KEYSPACE, B58_KEYSPACE)\n return B58_KEYSPACE[0] * num_leading_zeros + b58_s", - "docstring": "Takes in a binary string and converts it to a base 58 check string." - }, - { - "code": "def extract_favicon(bs4):\n favicon = []\n selectors = [\n 'link[rel=\"icon\"]',\n 'link[rel=\"Icon\"]',\n 'link[rel=\"ICON\"]',\n 'link[rel^=\"shortcut\"]',\n 'link[rel^=\"Shortcut\"]',\n 'link[rel^=\"SHORTCUT\"]'\n ]\n for selector in selectors:\n icons = bs4.select(selector)\n if icons:\n for icon in icons:\n if icon.has_attr('href'):\n favicon.append(icon['href'])\n return favicon", - "docstring": "Extracting favicon url from BeautifulSoup object\n\n :param bs4: `BeautifulSoup`\n :return: `list` List of favicon urls" - }, - { - "code": "def karbasa(self, result):\n probs = result['all_probs']\n probs.sort()\n return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])", - "docstring": "Finding if class probabilities are close to eachother\n Ratio of the distance between 1st and 2nd class,\n to the distance between 1st and last class.\n\n :param result: The dict returned by LM.calculate()" - }, - { - "code": "def from_name(cls, name):\r\n return Author(name=name, sha512=cls.hash_name(name))", - "docstring": "Create an author by name, automatically populating the hash." - }, - { - "code": "def load_results_from_table_definition(table_definition, table_definition_file, options):\n default_columns = extract_columns_from_table_definition_file(table_definition, table_definition_file)\n columns_relevant_for_diff = _get_columns_relevant_for_diff(default_columns)\n results = []\n for tag in table_definition:\n if tag.tag == 'result':\n columns = extract_columns_from_table_definition_file(tag, table_definition_file) or default_columns\n run_set_id = tag.get('id')\n for resultsFile in get_file_list_from_result_tag(tag, table_definition_file):\n results.append(parallel.submit(\n load_result, resultsFile, options, run_set_id, columns, columns_relevant_for_diff))\n elif tag.tag == 'union':\n results.append(parallel.submit(\n handle_union_tag, tag, table_definition_file, options, default_columns, columns_relevant_for_diff))\n return [future.result() for future in results]", - "docstring": "Load all results in files that are listed in the given table-definition file.\n @return: a list of RunSetResult objects" - }, - { - "code": "def permission_required_with_403(perm, login_url=None):\n return user_passes_test_with_403(lambda u: u.has_perm(perm), login_url=login_url)", - "docstring": "Decorator for views that checks whether a user has a particular permission\n enabled, redirecting to the login page or rendering a 403 as necessary.\n\n See :meth:`django.contrib.auth.decorators.permission_required`." - }, - { - "code": "def _AddStopTimeObjectUnordered(self, stoptime, schedule):\n stop_time_class = self.GetGtfsFactory().StopTime\n cursor = schedule._connection.cursor()\n insert_query = \"INSERT INTO stop_times (%s) VALUES (%s);\" % (\n ','.join(stop_time_class._SQL_FIELD_NAMES),\n ','.join(['?'] * len(stop_time_class._SQL_FIELD_NAMES)))\n cursor = schedule._connection.cursor()\n cursor.execute(\n insert_query, stoptime.GetSqlValuesTuple(self.trip_id))", - "docstring": "Add StopTime object to this trip.\n\n The trip isn't checked for duplicate sequence numbers so it must be\n validated later." - }, - { - "code": "def _calculateGlyph(self, targetGlyphObject, instanceLocationObject, glyphMasters):\n sources = None\n items = []\n for item in glyphMasters:\n locationObject = item['location']\n fontObject = item['font']\n glyphName = item['glyphName']\n if not glyphName in fontObject:\n continue\n glyphObject = MathGlyph(fontObject[glyphName])\n items.append((locationObject, glyphObject))\n bias, m = buildMutator(items, axes=self.axes)\n instanceObject = m.makeInstance(instanceLocationObject)\n if self.roundGeometry:\n try:\n instanceObject = instanceObject.round()\n except AttributeError:\n if self.verbose and self.logger:\n self.logger.info(\"MathGlyph object missing round() method.\")\n try:\n instanceObject.extractGlyph(targetGlyphObject, onlyGeometry=True)\n except TypeError:\n pPen = targetGlyphObject.getPointPen()\n targetGlyphObject.clear()\n instanceObject.drawPoints(pPen)\n targetGlyphObject.width = instanceObject.width", - "docstring": "Build a Mutator object for this glyph.\n\n * name: glyphName\n * location: Location object\n * glyphMasters: dict with font objects." - }, - { - "code": "def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)", - "docstring": "Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand." - }, - { - "code": "def select_string(self, rows: List[Row], column: StringColumn) -> List[str]:\n return [str(row.values[column.name]) for row in rows if row.values[column.name] is not None]", - "docstring": "Select function takes a list of rows and a column name and returns a list of strings as\n in cells." - }, - { - "code": "def get(self, node_id):\n return self.prepare_model(self.client.api.inspect_node(node_id))", - "docstring": "Get a node.\n\n Args:\n node_id (string): ID of the node to be inspected.\n\n Returns:\n A :py:class:`Node` object.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error." - }, - { - "code": "def ema_growth(eqdata, **kwargs):\n _growth_outputcol = kwargs.get('outputcol', 'EMA Growth')\n _ema_outputcol = 'EMA'\n kwargs['outputcol'] = _ema_outputcol\n _emadf = ema(eqdata, **kwargs)\n return simple.growth(_emadf, selection=_ema_outputcol, outputcol=_growth_outputcol)", - "docstring": "Growth of exponential moving average.\n\n Parameters\n ----------\n eqdata : DataFrame\n span : int, optional\n Span for exponential moving average. Defaults to 20.\n outputcol : str, optional.\n Column to use for output. Defaults to 'EMA Growth'.\n selection : str, optional\n Column of eqdata on which to calculate ema growth. If\n `eqdata` has only 1 column, `selection` is ignored,\n and ema growth is calculated on that column. Defaults\n to 'Adj Close'.\n\n Returns\n ---------\n out : DataFrame\n Growth of exponential moving average from one day to next" - }, - { - "code": "def _process_plan_lines(self, final_line_count):\n if not self._lines_seen[\"plan\"]:\n self._add_error(_(\"Missing a plan.\"))\n return\n if len(self._lines_seen[\"plan\"]) > 1:\n self._add_error(_(\"Only one plan line is permitted per file.\"))\n return\n plan, at_line = self._lines_seen[\"plan\"][0]\n if not self._plan_on_valid_line(at_line, final_line_count):\n self._add_error(\n _(\"A plan must appear at the beginning or end of the file.\")\n )\n return\n if plan.expected_tests != self._lines_seen[\"test\"]:\n self._add_error(\n _(\"Expected {expected_count} tests but only {seen_count} ran.\").format(\n expected_count=plan.expected_tests,\n seen_count=self._lines_seen[\"test\"],\n )\n )", - "docstring": "Process plan line rules." - }, - { - "code": "def number_crossing_m(x, m):\n if not isinstance(x, (np.ndarray, pd.Series)):\n x = np.asarray(x)\n positive = x > m\n return np.where(np.bitwise_xor(positive[1:], positive[:-1]))[0].size", - "docstring": "Calculates the number of crossings of x on m. A crossing is defined as two sequential values where the first value\n is lower than m and the next is greater, or vice-versa. If you set m to zero, you will get the number of zero\n crossings.\n\n :param x: the time series to calculate the feature of\n :type x: numpy.ndarray\n :param m: the threshold for the crossing\n :type m: float\n :return: the value of this feature\n :return type: int" - }, - { - "code": "def brozzler_new_site(argv=None):\n argv = argv or sys.argv\n arg_parser = argparse.ArgumentParser(\n prog=os.path.basename(argv[0]),\n description='brozzler-new-site - register site to brozzle',\n formatter_class=BetterArgumentDefaultsHelpFormatter)\n arg_parser.add_argument('seed', metavar='SEED', help='seed url')\n add_rethinkdb_options(arg_parser)\n arg_parser.add_argument(\n '--time-limit', dest='time_limit', default=None,\n help='time limit in seconds for this site')\n arg_parser.add_argument(\n '--ignore-robots', dest='ignore_robots', action='store_true',\n help='ignore robots.txt for this site')\n arg_parser.add_argument(\n '--warcprox-meta', dest='warcprox_meta',\n help=(\n 'Warcprox-Meta http request header to send with each request; '\n 'must be a json blob, ignored unless warcprox features are '\n 'enabled'))\n arg_parser.add_argument(\n '--behavior-parameters', dest='behavior_parameters',\n default=None, help=(\n 'json blob of parameters to populate the javascript behavior '\n 'template, e.g. {\"parameter_username\":\"x\",'\n '\"parameter_password\":\"y\"}'))\n arg_parser.add_argument(\n '--username', dest='username', default=None,\n help='use this username to try to log in if a login form is found')\n arg_parser.add_argument(\n '--password', dest='password', default=None,\n help='use this password to try to log in if a login form is found')\n add_common_options(arg_parser, argv)\n args = arg_parser.parse_args(args=argv[1:])\n configure_logging(args)\n rr = rethinker(args)\n site = brozzler.Site(rr, {\n 'seed': args.seed,\n 'time_limit': int(args.time_limit) if args.time_limit else None,\n 'ignore_robots': args.ignore_robots,\n 'warcprox_meta': json.loads(\n args.warcprox_meta) if args.warcprox_meta else None,\n 'behavior_parameters': json.loads(\n args.behavior_parameters) if args.behavior_parameters else None,\n 'username': args.username,\n 'password': args.password})\n frontier = brozzler.RethinkDbFrontier(rr)\n brozzler.new_site(frontier, site)", - "docstring": "Command line utility entry point for queuing a new brozzler site.\n Takes a seed url and creates a site and page object in rethinkdb, which\n brozzler-workers will look at and start crawling." - }, - { - "code": "def _get_imported_module(self, module_name):\n imp_mod = self.by_name.get(module_name)\n if imp_mod:\n return imp_mod\n no_obj = module_name.rsplit('.', 1)[0]\n imp_mod2 = self.by_name.get(no_obj)\n if imp_mod2:\n return imp_mod2\n if module_name in self.pkgs:\n pkg_name = module_name + \".__init__\"\n return self.by_name[pkg_name]\n if no_obj in self.pkgs:\n pkg_name = no_obj + \".__init__\"\n return self.by_name[pkg_name]", - "docstring": "try to get imported module reference by its name" - }, - { - "code": "def coverage_region_detailed_stats(target_name, bed_file, data, out_dir):\n if bed_file and utils.file_exists(bed_file):\n ready_depth = tz.get_in([\"depth\", target_name], data)\n if ready_depth:\n cov_file = ready_depth[\"regions\"]\n dist_file = ready_depth[\"dist\"]\n thresholds_file = ready_depth.get(\"thresholds\")\n out_cov_file = os.path.join(out_dir, os.path.basename(cov_file))\n out_dist_file = os.path.join(out_dir, os.path.basename(dist_file))\n out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \\\n if thresholds_file and os.path.isfile(thresholds_file) else None\n if not utils.file_uptodate(out_cov_file, cov_file):\n utils.copy_plus(cov_file, out_cov_file)\n utils.copy_plus(dist_file, out_dist_file)\n utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None\n return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else [])\n return []", - "docstring": "Calculate coverage at different completeness cutoff\n for region in coverage option." - }, - { - "code": "def join_conn_groups( conns, descs, mat_ids, concat = False ):\n el = dict_from_keys_init( descs, list )\n for ig, desc in enumerate( descs ):\n el[desc].append( ig )\n groups = [ii for ii in el.values() if ii]\n descs_out, conns_out, mat_ids_out = [], [], []\n for group in groups:\n n_ep = conns[group[0]].shape[1]\n conn = nm.zeros( (0, n_ep), nm.int32 )\n mat_id = nm.zeros( (0,), nm.int32 )\n for ig in group:\n conn = nm.concatenate( (conn, conns[ig]) )\n mat_id = nm.concatenate( (mat_id, mat_ids[ig]) )\n if concat:\n conn = nm.concatenate( (conn, mat_id[:,nm.newaxis]), 1 )\n else:\n mat_ids_out.append( mat_id )\n conns_out.append( conn )\n descs_out.append( descs[group[0]] )\n if concat:\n return conns_out, descs_out\n else:\n return conns_out, descs_out, mat_ids_out", - "docstring": "Join groups of the same element type." - }, - { - "code": "def security_group_update(secgroup=None, auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.update_security_group(secgroup, **kwargs)", - "docstring": "Update a security group\n\n secgroup\n Name, ID or Raw Object of the security group to update\n\n name\n New name for the security group\n\n description\n New description for the security group\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' neutronng.security_group_update secgroup=secgroup1 \\\n description=\"Very secure security group\"\n salt '*' neutronng.security_group_update secgroup=secgroup1 \\\n description=\"Very secure security group\" \\\n project_id=1dcac318a83b4610b7a7f7ba01465548" - }, - { - "code": "def get_unique_scan_parameter_combinations(meta_data_array, scan_parameters=None, scan_parameter_columns_only=False):\n try:\n last_not_parameter_column = meta_data_array.dtype.names.index('error_code')\n except ValueError:\n last_not_parameter_column = meta_data_array.dtype.names.index('error')\n if last_not_parameter_column == len(meta_data_array.dtype.names) - 1:\n return\n if scan_parameters is None:\n return unique_row(meta_data_array, use_columns=range(4, len(meta_data_array.dtype.names)), selected_columns_only=scan_parameter_columns_only)\n else:\n use_columns = []\n for scan_parameter in scan_parameters:\n try:\n use_columns.append(meta_data_array.dtype.names.index(scan_parameter))\n except ValueError:\n logging.error('No scan parameter ' + scan_parameter + ' found')\n raise RuntimeError('Scan parameter not found')\n return unique_row(meta_data_array, use_columns=use_columns, selected_columns_only=scan_parameter_columns_only)", - "docstring": "Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters.\n If selected columns only is true, the returned histogram only contains the selected columns.\n\n Parameters\n ----------\n meta_data_array : numpy.ndarray\n scan_parameters : list of string, None\n Scan parameter names taken. If None all are used.\n selected_columns_only : bool\n\n Returns\n -------\n numpy.Histogram" - }, - { - "code": "def launch():\n if launched():\n check_version()\n os.chdir(ROOT)\n return\n if not os.path.exists(BIN_LORE):\n missing = ' %s virtualenv is missing.' % APP\n if '--launched' in sys.argv:\n sys.exit(ansi.error() + missing + ' Please check for errors during:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install(None, None)\n reboot('--env-launched')", - "docstring": "Ensure that python is running from the Lore virtualenv past this point." - }, - { - "code": "def exception(cls, name, message, *args):\n cls.getLogger(name).exception(message, *args)", - "docstring": "Convenience function to log a message at the ERROR level with additonal exception information.\n\n :param name: The name of the logger instance in the VSG namespace (VSG.)\n :param message: A message format string.\n :param args: The arguments that are are merged into msg using the string formatting operator.\n :..note: This method should only be called from an exception handler." - }, - { - "code": "def match(self, objects: List[Any]) -> bool:\n s = self._make_string(objects)\n m = self._compiled_expression.match(s)\n return m is not None", - "docstring": "Return True if the list of objects matches the expression." - }, - { - "code": "def gene_expression_conv_base():\n hparams = common_hparams.basic_params1()\n batch_size = 10\n output_length = 2048\n inputs_per_output = 128\n chunk_size = 4\n input_length = output_length * inputs_per_output // chunk_size\n hparams.batch_size = input_length * batch_size\n hparams.dropout = 0.1\n hparams.add_hparam(\"num_conv_layers\", 4)\n hparams.add_hparam(\"num_dconv_layers\", 7)\n hparams.add_hparam(\"pooling_windows\", [2, 2, 2, 4])\n hparams.hidden_size = 256\n hparams.kernel_width = 20\n hparams.add_hparam(\"stride\", 1)\n return hparams", - "docstring": "Hparams for GeneExpressionConv model." - }, - { - "code": "def fillna(series_or_arr, missing_value=0.0):\n if pandas.notnull(missing_value):\n if isinstance(series_or_arr, (numpy.ndarray)):\n series_or_arr[numpy.isnan(series_or_arr)] = missing_value\n else:\n series_or_arr.fillna(missing_value, inplace=True)\n return series_or_arr", - "docstring": "Fill missing values in pandas objects and numpy arrays.\n\n Arguments\n ---------\n series_or_arr : pandas.Series, numpy.ndarray\n The numpy array or pandas series for which the missing values\n need to be replaced.\n missing_value : float, int, str\n The value to replace the missing value with. Default 0.0.\n\n Returns\n -------\n pandas.Series, numpy.ndarray\n The numpy array or pandas series with the missing values\n filled." - }, - { - "code": "def glob(self):\n file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*'))\n return sorted(file_glob, key=lambda f: os.path.splitext(f))", - "docstring": "Returns a sorted glob of files belonging to a given problem" - }, - { - "code": "def get_stored_cert_serials(store):\n cmd = \"certutil.exe -store {0}\".format(store)\n out = __salt__['cmd.run'](cmd)\n matches = re.findall(r\"={16}\\r\\n.*:\\s*(\\w*)\\r\\n\", out)\n return matches", - "docstring": "Get all of the certificate serials in the specified store\n\n store\n The store to get all the certificate serials from\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' certutil.get_stored_cert_serials " - }, - { - "code": "def post(self, request, *args, **kwargs):\n serializer = EventSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(\n {\"accepted\": False, \"reason\": serializer.errors}, status=400\n )\n data = serializer.validated_data\n event_type = {\n \"ack\": \"ack\",\n \"nack\": \"nack\",\n \"delivery_report\": \"delivery_succeeded\",\n }.get(data[\"event_type\"])\n accepted, reason = process_event(\n data[\"user_message_id\"], event_type, data[\"nack_reason\"], data[\"timestamp\"]\n )\n return Response(\n {\"accepted\": accepted, \"reason\": reason}, status=200 if accepted else 400\n )", - "docstring": "Checks for expect event types before continuing" - }, - { - "code": "def get_path_components(path):\n path_segments = path.split('.')\n module_path = '.'.join(path_segments[:-1])\n if module_path == '':\n raise VerifyingDoubleImportError('Invalid import path: {}.'.format(path))\n class_name = path_segments[-1]\n return module_path, class_name", - "docstring": "Extract the module name and class name out of the fully qualified path to the class.\n\n :param str path: The full path to the class.\n :return: The module path and the class name.\n :rtype: str, str\n :raise: ``VerifyingDoubleImportError`` if the path is to a top-level module." - }, - { - "code": "def get_query_params(request, *args):\n query = request.GET.copy()\n index = 1\n key = ''\n for arg in args:\n if index % 2 != 0:\n key = arg\n else:\n if arg == \"!remove\":\n try:\n query.pop(key)\n except KeyError:\n pass\n else:\n query[key] = arg\n index += 1\n return query.urlencode()", - "docstring": "Allows to change one of the URL get parameter while keeping all the others.\n\n Usage::\n\n {% load libs_tags %}\n {% get_query_params request \"page\" page_obj.next_page_number as query %}\n Next\n\n You can also pass in several pairs of keys and values::\n\n {% get_query_params request \"page\" 1 \"foobar\" 2 as query %}\n\n You often need this when you have a paginated set of objects with filters.\n\n Your url would look something like ``/?region=1&gender=m``. Your paginator\n needs to create links with ``&page=2`` in them but you must keep the\n filter values when switching pages.\n\n :param request: The request instance.\n :param *args: Make sure to always pass in paris of args. One is the key,\n one is the value. If you set the value of a key to \"!remove\" that\n parameter will not be included in the returned query." - }, - { - "code": "def all_inspections(obj):\n for name, callback in INSPECTIONS:\n result = callback(obj)\n if result:\n yield name, result", - "docstring": "Generator to iterate all current Jishaku inspections." - }, - { - "code": "def set_signal(self, signal, name):\n r\n signal = self._check_signal(signal)\n self.signals[name] = signal", - "docstring": "r\"\"\"Attach a signal to the graph.\n\n Attached signals can be accessed (and modified or deleted) through the\n :attr:`signals` dictionary.\n\n Parameters\n ----------\n signal : array_like\n A sequence that assigns a value to each vertex.\n The value of the signal at vertex `i` is ``signal[i]``.\n name : String\n Name of the signal used as a key in the :attr:`signals` dictionary.\n\n Examples\n --------\n >>> graph = graphs.Sensor(10)\n >>> signal = np.arange(graph.n_vertices)\n >>> graph.set_signal(signal, 'mysignal')\n >>> graph.signals\n {'mysignal': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}" - }, - { - "code": "def _split_message(self, message):\n if len(message) == 3:\n return message\n else:\n request_id, data = message\n return request_id, data, 0", - "docstring": "Return request_id, data, max_doc_size.\n\n :Parameters:\n - `message`: (request_id, data, max_doc_size) or (request_id, data)" - }, - { - "code": "def to_wav(mediafile):\n if mediafile.endswith(\".wav\"):\n yield mediafile\n else:\n wavfile = tempfile.mktemp(__name__) + \".wav\"\n try:\n extract_wav(mediafile, wavfile)\n yield wavfile\n finally:\n if os.path.exists(wavfile):\n os.remove(wavfile)", - "docstring": "Context manager providing a temporary WAV file created from the given media file." - }, - { - "code": "def smart_query_string(parser, token):\n args = token.split_contents()\n additions = args[1:]\n addition_pairs = []\n while additions:\n addition_pairs.append(additions[0:2])\n additions = additions[2:]\n return SmartQueryStringNode(addition_pairs)", - "docstring": "Outputs current GET query string with additions appended.\n Additions are provided in token pairs." - }, - { - "code": "def check_base_required_attributes(self, dataset):\n test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')\n conventions = getattr(dataset, 'Conventions', '')\n feature_type = getattr(dataset, 'featureType', '')\n accepted_conventions = ['CF-1.6', 'ACDD-1.3']\n dataset_conventions = conventions.replace(' ', '').split(',')\n for accepted_convention in accepted_conventions:\n if accepted_convention not in dataset_conventions:\n test_ctx.assert_true(False, 'Conventions attribute is missing or is not equal to \"CF-1.6, ACDD-1.3\": {}'.format(conventions))\n break\n else:\n test_ctx.assert_true(True, '')\n test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],\n 'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))\n return test_ctx.to_result()", - "docstring": "Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides\n just checking that they exist.\n\n :param netCDF4.Dataset dataset: An open netCDF dataset\n\n :Conventions = \"CF-1.6, ACDD-1.3\" ; //............................... REQUIRED - Always try to use latest value. (CF)\n :featureType = \"timeSeries\" ; //..................................... REQUIRED - CF attribute for identifying the featureType.\n :cdm_data_type = \"Station\" ; //...................................... REQUIRED (ACDD)\n :ncei_template_version = \"NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1\" ; //....... REQUIRED (NCEI)\n :title = \"\" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)\n :summary = \"\" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)\n :keywords = \"\" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)\n :Conventions = \"CF-1.6, ACDD-1.3\" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD)" - }, - { - "code": "def build_wheel(wheel_directory, config_settings, metadata_directory=None):\n prebuilt_whl = _find_already_built_wheel(metadata_directory)\n if prebuilt_whl:\n shutil.copy2(prebuilt_whl, wheel_directory)\n return os.path.basename(prebuilt_whl)\n return _build_backend().build_wheel(wheel_directory, config_settings,\n metadata_directory)", - "docstring": "Invoke the mandatory build_wheel hook.\n\n If a wheel was already built in the\n prepare_metadata_for_build_wheel fallback, this\n will copy it rather than rebuilding the wheel." - }, - { - "code": "def get_ftr(self):\n if not self.ftr:\n return self.ftr\n width = self.size()[0]\n return re.sub(\n \"%time\", \"%s\\n\" % time.strftime(\"%H:%M:%S\"), self.ftr).rjust(width)", - "docstring": "Process footer and return the processed string" - }, - { - "code": "def find_dates(text, source=False, index=False, strict=False, base_date=None):\n date_finder = DateFinder(base_date=base_date)\n return date_finder.find_dates(text, source=source, index=index, strict=strict)", - "docstring": "Extract datetime strings from text\n\n :param text:\n A string that contains one or more natural language or literal\n datetime strings\n :type text: str|unicode\n :param source:\n Return the original string segment\n :type source: boolean\n :param index:\n Return the indices where the datetime string was located in text\n :type index: boolean\n :param strict:\n Only return datetimes with complete date information. For example:\n `July 2016` of `Monday` will not return datetimes.\n `May 16, 2015` will return datetimes.\n :type strict: boolean\n :param base_date:\n Set a default base datetime when parsing incomplete dates\n :type base_date: datetime\n\n :return: Returns a generator that produces :mod:`datetime.datetime` objects,\n or a tuple with the source text and index, if requested" - }, - { - "code": "def _construct(self, targets, control_flow_slice=False):\n if control_flow_slice:\n simruns = [ r for r, _ in targets ]\n self._construct_control_flow_slice(simruns)\n else:\n self._construct_default(targets)", - "docstring": "Construct a dependency graph based on given parameters.\n\n :param targets: A list of tuples like (CFGNode, statement ID)\n :param control_flow_slice: Is the backward slicing only depends on CFG or not." - }, - { - "code": "def fileprint(filename, category, level=logging.DEBUG, maxBytes=1024*10124*100,\n backupCount=0):\n path = os.path.join(CFG.filedir, category, filename)\n filer = logging.getLogger(filename)\n frt = logging.Formatter('%(message)s')\n hdr = RotatingFileHandler(path, 'a', maxBytes, backupCount, 'utf-8')\n hdr.setFormatter(frt)\n hdr._name = '\n already_in = False\n for _hdr in filer.handlers:\n if _hdr._name == '\n already_in = True\n break\n if not already_in:\n filer.addHandler(hdr)\n hdr = logging.StreamHandler(sys.stdout)\n hdr.setFormatter(frt)\n hdr._name = '\n already_in = False\n for _hdr in filer.handlers:\n if _hdr._name == '\n already_in = True\n if not already_in:\n filer.addHandler(hdr)\n filer.setLevel(level)\n def _wraper(*args):\n if not args:\n return\n encoding = 'utf8' if os.name == 'posix' else 'gbk'\n args = [_cu(a, encoding) for a in args]\n filer.info(' '.join(args))\n return _wraper, filer", - "docstring": "Print files by file size.\n\n filename\n string, file name\n category\n string, category path of logs file in log directory\n level\n enumerated type of logging module, restrict whether logs to be printed or not\n maxBytes\n int, max limit of file size\n backupCount\n int, allowed numbers of file copys" - }, - { - "code": "def checkIsConsistent(self):\n if is_an_array(self.mask) and self.mask.shape != self.data.shape:\n raise ConsistencyError(\"Shape mismatch mask={}, data={}\"\n .format(self.mask.shape != self.data.shape))", - "docstring": "Raises a ConsistencyError if the mask has an incorrect shape." - }, - { - "code": "def total_duration(self):\n duration = 0\n for utterance in self.utterances.values():\n duration += utterance.duration\n return duration", - "docstring": "Return the total amount of audio summed over all utterances in the corpus in seconds." - }, - { - "code": "def _contingency_matrix(reference_indices, estimated_indices):\n ref_classes, ref_class_idx = np.unique(reference_indices,\n return_inverse=True)\n est_classes, est_class_idx = np.unique(estimated_indices,\n return_inverse=True)\n n_ref_classes = ref_classes.shape[0]\n n_est_classes = est_classes.shape[0]\n return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),\n (ref_class_idx, est_class_idx)),\n shape=(n_ref_classes, n_est_classes),\n dtype=np.int).toarray()", - "docstring": "Computes the contingency matrix of a true labeling vs an estimated one.\n\n Parameters\n ----------\n reference_indices : np.ndarray\n Array of reference indices\n estimated_indices : np.ndarray\n Array of estimated indices\n\n Returns\n -------\n contingency_matrix : np.ndarray\n Contingency matrix, shape=(#reference indices, #estimated indices)\n .. note:: Based on sklearn.metrics.cluster.contingency_matrix" - }, - { - "code": "def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None:\n ip = headers.get(\"X-Forwarded-For\", self.remote_ip)\n for ip in (cand.strip() for cand in reversed(ip.split(\",\"))):\n if ip not in self.trusted_downstream:\n break\n ip = headers.get(\"X-Real-Ip\", ip)\n if netutil.is_valid_ip(ip):\n self.remote_ip = ip\n proto_header = headers.get(\n \"X-Scheme\", headers.get(\"X-Forwarded-Proto\", self.protocol)\n )\n if proto_header:\n proto_header = proto_header.split(\",\")[-1].strip()\n if proto_header in (\"http\", \"https\"):\n self.protocol = proto_header", - "docstring": "Rewrite the ``remote_ip`` and ``protocol`` fields." - }, - { - "code": "def openall(self, title=None):\n spreadsheet_files = self.list_spreadsheet_files()\n return [\n Spreadsheet(self, dict(title=x['name'], **x))\n for x in spreadsheet_files\n ]", - "docstring": "Opens all available spreadsheets.\n\n :param title: (optional) If specified can be used to filter\n spreadsheets by title.\n :type title: str\n\n :returns: a list of :class:`~gspread.models.Spreadsheet` instances." - }, - { - "code": "def gen_df_output(\n list_csv_in=[\n 'SSss_YYYY_SUEWS_TT.csv',\n 'SSss_DailyState.csv',\n 'SSss_YYYY_snow_TT.csv',\n ],\n url_base=url_repo_output)->Path:\n list_url_table = [\n url_base/table for table in list_csv_in\n ]\n try:\n df_var_info = pd.concat(\n [pd.read_csv(f) for f in list_url_table],\n sort=False)\n except:\n for url in list_url_table:\n if not url.get().ok:\n print(f'{url} not existing!')\n else:\n df_var_info_x = df_var_info\\\n .set_index('Name')\\\n .loc[:, ['Description']]\\\n .drop_duplicates()\n df_var_output = df_var_info_x\\\n .copy()\\\n .assign(lower=df_var_info_x.index.str.lower())\\\n .reset_index()\\\n .set_index('lower')\n df_var_group = df_output_sample.columns.to_frame()\n df_var_group.index = df_var_group.index.droplevel(0).rename('Name')\n df_var_output = df_var_group\\\n .merge(\n df_var_output.set_index('Name'),\n left_on='Name',\n right_on='Name')\\\n .rename(columns={\n 'var': 'variable',\n 'group': 'Group',\n })\\\n .set_index('variable')\\\n .drop_duplicates()\n return df_var_output", - "docstring": "Generate description info of supy output results into dataframe\n\n Parameters\n ----------\n list_csv_in : list, optional\n list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description])\n url_base : [type], optional\n URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file)\n\n Returns\n -------\n pd.DataFrame\n Description info of supy output results" - }, - { - "code": "def get_contour(mask):\n if isinstance(mask, np.ndarray) and len(mask.shape) == 2:\n mask = [mask]\n ret_list = False\n else:\n ret_list = True\n contours = []\n for mi in mask:\n c0 = find_contours(mi.transpose(),\n level=.9999,\n positive_orientation=\"low\",\n fully_connected=\"high\")[0]\n c1 = np.asarray(np.round(c0), int)\n c2 = remove_duplicates(c1)\n contours.append(c2)\n if ret_list:\n return contours\n else:\n return contours[0]", - "docstring": "Compute the image contour from a mask\n\n The contour is computed in a very inefficient way using scikit-image\n and a conversion of float coordinates to pixel coordinates.\n\n Parameters\n ----------\n mask: binary ndarray of shape (M,N) or (K,M,N)\n The mask outlining the pixel positions of the event.\n If a 3d array is given, then `K` indexes the individual\n contours.\n\n Returns\n -------\n cont: ndarray or list of K ndarrays of shape (J,2)\n A 2D array that holds the contour of an event (in pixels)\n e.g. obtained using `mm.contour` where `mm` is an instance\n of `RTDCBase`. The first and second columns of `cont`\n correspond to the x- and y-coordinates of the contour." - }, - { - "code": "def get(self, deviceId, measurementId):\n record = self.measurements.get(deviceId)\n if record is not None:\n return record.get(measurementId)\n return None", - "docstring": "details the specific measurement." - }, - { - "code": "def strip_chr(bt):\n try:\n df = pd.read_table(bt.fn, header=None, dtype=str)\n except pd.parser.CParserError:\n df = pd.read_table(bt.fn, header=None, skiprows=1, dtype=str)\n df[0] = df[0].apply(lambda x: x[3:])\n s = '\\n'.join(df.astype(str).apply(lambda x: '\\t'.join(x), axis=1)) + '\\n'\n out = pbt.BedTool(s, from_string=True)\n return out", - "docstring": "Strip 'chr' from chromosomes for BedTool object\n\n Parameters\n ----------\n bt : pybedtools.BedTool\n BedTool to strip 'chr' from.\n\n Returns\n -------\n out : pybedtools.BedTool\n New BedTool with 'chr' stripped from chromosome names." - }, - { - "code": "def set_param(self, params, value=None):\n if isinstance(params, Mapping):\n params = params.items()\n elif isinstance(params, STRING_TYPES) and value is not None:\n params = [(params, value)]\n for key, val in params:\n _check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val))))", - "docstring": "Set parameters into the Booster.\n\n Parameters\n ----------\n params: dict/list/str\n list of key,value pairs, dict of key to value or simply str key\n value: optional\n value of the specified parameter, when params is str key" - }, - { - "code": "def run_ppm_server(pdb_file, outfile, force_rerun=False):\n if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun):\n url = 'http://sunshine.phar.umich.edu/upload_file.php'\n files = {'userfile': open(pdb_file, 'rb')}\n r = requests.post(url, files=files)\n info = r.text\n with open(outfile, 'w') as f:\n f.write(info)\n else:\n with open(outfile, 'r') as f:\n info = f.read()\n t = info.replace('\\n', '')\n tt = t.replace('\\r', '')\n ttt = tt.replace('\\t', '')\n soup = BeautifulSoup(ttt, \"lxml\")\n tables = soup.find_all(\"table\", attrs={\"class\": \"data\"})\n info_dict = {}\n table_index = 0\n for t in tables:\n data_index = 0\n for data in t.find_all('tr', attrs={\"class\": \"row1\"}):\n data_list = list(data.strings)\n if table_index == 0:\n info_dict['Depth/Hydrophobic Thickness'] = data_list[0]\n info_dict['deltaG_transfer'] = data_list[2]\n info_dict['Tilt Angle'] = data_list[3]\n if table_index == 1 and data_index == 0:\n info_dict['Embedded_residues_Tilt'] = data_list[0]\n info_dict['Embedded_residues'] = data_list[1]\n if table_index == 1 and data_index == 1:\n info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0]\n info_dict['Transmembrane_secondary_structure_segments'] = data_list[1]\n if table_index == 2:\n info_dict['Output Messages'] = data_list[1]\n if table_index == 3:\n baseurl = 'http://sunshine.phar.umich.edu/'\n a = data.find('a', href=True)\n download_url = baseurl + a['href'].replace('./', '')\n info_dict['Output file download link'] = download_url\n data_index += 1\n table_index += 1\n return info_dict", - "docstring": "Run the PPM server from OPM to predict transmembrane residues.\n\n Args:\n pdb_file (str): Path to PDB file\n outfile (str): Path to output HTML results file\n force_rerun (bool): Flag to rerun PPM if HTML results file already exists\n\n Returns:\n dict: Dictionary of information from the PPM run, including a link to download the membrane protein file" - }, - { - "code": "def CreatePermission(self, user_link, permission, options=None):\n if options is None:\n options = {}\n path, user_id = self._GetUserIdWithPathForPermission(permission, user_link)\n return self.Create(permission,\n path,\n 'permissions',\n user_id,\n None,\n options)", - "docstring": "Creates a permission for a user.\n\n :param str user_link:\n The link to the user entity.\n :param dict permission:\n The Azure Cosmos user permission to create.\n :param dict options:\n The request options for the request.\n\n :return:\n The created Permission.\n :rtype:\n dict" - }, - { - "code": "def save_config(\n self,\n cmd=\"copy running-config startup-config\",\n confirm=True,\n confirm_response=\"y\",\n ):\n return super(ExtremeSlxSSH, self).save_config(\n cmd=cmd, confirm=confirm, confirm_response=confirm_response\n )", - "docstring": "Save Config for Extreme SLX." - }, - { - "code": "def engage(self, height):\n if height > MAX_ENGAGE_HEIGHT or height < 0:\n raise ValueError('Invalid engage height. Should be 0 to {}'.format(\n MAX_ENGAGE_HEIGHT))\n self._driver.move(height)\n self._engaged = True", - "docstring": "Move the magnet to a specific height, in mm from home position" - }, - { - "code": "def start_parent():\n while True:\n args = [sys.executable] + sys.argv\n new_environ = environ.copy()\n new_environ[\"_IN_CHILD\"] = 'yes'\n ret = subprocess.call(args, env=new_environ)\n if ret != settings.CODE_RELOAD_EXIT:\n return ret", - "docstring": "Start the parent that will simply run the child forever until stopped." - }, - { - "code": "def abort(self, err):\n if _debug: IOGroup._debug(\"abort %r\", err)\n self.ioState = ABORTED\n self.ioError = err\n for iocb in self.ioMembers:\n iocb.abort(err)\n self.trigger()", - "docstring": "Called by a client to abort all of the member transactions.\n When the last pending member is aborted the group callback\n function will be called." - }, - { - "code": "def execute(self):\n self.print_info()\n self._config.provisioner.converge()\n self._config.state.change_state('converged', True)", - "docstring": "Execute the actions necessary to perform a `molecule converge` and\n returns None.\n\n :return: None" - }, - { - "code": "def _to_json(uniq):\n result_json = {}\n depth, ipix = utils.uniq2orderipix(uniq)\n min_depth = np.min(depth[0])\n max_depth = np.max(depth[-1])\n for d in range(min_depth, max_depth+1):\n pix_index = np.where(depth == d)[0]\n if pix_index.size:\n ipix_depth = ipix[pix_index]\n result_json[str(d)] = ipix_depth.tolist()\n return result_json", - "docstring": "Serializes a MOC to the JSON format.\n\n Parameters\n ----------\n uniq : `~numpy.ndarray`\n The array of HEALPix cells representing the MOC to serialize.\n\n Returns\n -------\n result_json : {str : [int]}\n A dictionary of HEALPix cell lists indexed by their depth." - }, - { - "code": "def _get_9q_square_qvm(name: str, noisy: bool,\n connection: ForestConnection = None,\n qvm_type: str = 'qvm') -> QuantumComputer:\n topology = nx.convert_node_labels_to_integers(nx.grid_2d_graph(3, 3))\n return _get_qvm_with_topology(name=name, connection=connection,\n topology=topology,\n noisy=noisy,\n requires_executable=True,\n qvm_type=qvm_type)", - "docstring": "A nine-qubit 3x3 square lattice.\n\n This uses a \"generic\" lattice not tied to any specific device. 9 qubits is large enough\n to do vaguely interesting algorithms and small enough to simulate quickly.\n\n :param name: The name of this QVM\n :param connection: The connection to use to talk to external services\n :param noisy: Whether to construct a noisy quantum computer\n :param qvm_type: The type of QVM. Either 'qvm' or 'pyqvm'.\n :return: A pre-configured QuantumComputer" - }, - { - "code": "def _get_max_sigma(self, R):\n max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)\n return max_sigma", - "docstring": "Calculate maximum sigma of scanner RAS coordinates\n\n Parameters\n ----------\n\n R : 2D array, with shape [n_voxel, n_dim]\n The coordinate matrix of fMRI data from one subject\n\n Returns\n -------\n\n max_sigma : float\n The maximum sigma of scanner coordinates." - }, - { - "code": "def scan(phenotype, X, G=None, K=None, covariates=None, progress=True,\n options=None):\n logger = logging.getLogger(__name__)\n logger.info('%s association scan has started.', phenotype.likelihood_name)\n if options is None:\n options = dict()\n if 'fast' not in options:\n options['fast'] = True\n if 'rank_norm' not in options:\n options['rank_norm'] = True\n n = phenotype.sample_size\n covariates = ones((n, 1)) if covariates is None else covariates\n X = _clone(X)\n G = _clone(G)\n K = _clone(K)\n if not is_all_finite(X):\n raise ValueError(\"The candidate matrix X has non-finite values.\")\n if G is not None and not is_all_finite(G):\n raise ValueError(\"The genetic markers matrix G has non-finite values.\")\n if K is not None and not is_all_finite(K):\n raise ValueError(\"The Kinship matrix K has non-finite values.\")\n background = Background()\n (Q0, Q1, S0) = _genetic_preprocess(X, G, K, background)\n qtl = QTLScan(phenotype, covariates, X, Q0, Q1, S0, options)\n qtl.progress = progress\n qtl.compute_statistics()\n return qtl", - "docstring": "Association between genetic variants and phenotype.\n\n Matrix `X` shall contain the genetic markers (e.g., number of minor\n alleles) with rows and columns representing samples and genetic markers,\n respectively.\n\n The user must specify only one of the parameters `G` and `K` for defining\n the genetic background.\n\n Let :math:`N` be the sample size, :math:`S` the number of covariates,\n :math:`P_c` the number of genetic markers to be tested, and :math:`P_b`\n the number of genetic markers used for Kinship estimation.\n\n Args:\n y (array_like): Phenotype. Dimension (:math:`N\\\\times 0`).\n X (array_like): Candidate genetic markers (or any other\n type of explanatory variable) whose\n association with the phenotype will be\n tested. Dimension (:math:`N\\\\times P_c`).\n G (array_like): Genetic markers matrix used internally for\n kinship estimation. Dimension\n (:math:`N\\\\times P_b`).\n K (array_like): Kinship matrix. Dimension\n (:math:`N\\\\times N`).\n covariates (array_like): Covariates. Default is an offset.\n Dimension (:math:`N\\\\times S`).\n progress (bool) : Shows progress. Defaults to `True`.\n\n Returns:\n A :class:`lim.genetics.qtl._canonical.CanonicalLRTScan` instance." - }, - { - "code": "def as_ordered(self, inplace=False):\n inplace = validate_bool_kwarg(inplace, 'inplace')\n return self.set_ordered(True, inplace=inplace)", - "docstring": "Set the Categorical to be ordered.\n\n Parameters\n ----------\n inplace : bool, default False\n Whether or not to set the ordered attribute in-place or return\n a copy of this categorical with ordered set to True." - }, - { - "code": "def listdir(directory):\n file_names = list()\n for filename in os.listdir(directory):\n file_path = os.path.join(directory, filename)\n if os.path.isdir(file_path):\n filename = f'{filename}{os.path.sep}'\n file_names.append(filename)\n return file_names", - "docstring": "Returns list of nested files and directories for local directory by path\n\n :param directory: absolute or relative path to local directory\n :return: list nested of file or directory names" - }, - { - "code": "def _resize_buffers(self, font_scale):\n new_sizes = (font_scale,) + self.size\n if new_sizes == self._current_sizes:\n return\n self._n_rows = int(max(self.size[1] /\n (self._char_height * font_scale), 1))\n self._n_cols = int(max(self.size[0] /\n (self._char_width * font_scale), 1))\n self._bytes_012 = np.zeros((self._n_rows, self._n_cols, 3), np.float32)\n self._bytes_345 = np.zeros((self._n_rows, self._n_cols, 3), np.float32)\n pos = np.empty((self._n_rows, self._n_cols, 2), np.float32)\n C, R = np.meshgrid(np.arange(self._n_cols), np.arange(self._n_rows))\n x_off = 4.\n y_off = 4 - self.size[1] / font_scale\n pos[..., 0] = x_off + self._char_width * C\n pos[..., 1] = y_off + self._char_height * R\n self._position = VertexBuffer(pos)\n for ii, line in enumerate(self._text_lines[:self._n_rows]):\n self._insert_text_buf(line, ii)\n self._current_sizes = new_sizes", - "docstring": "Resize buffers only if necessary" - }, - { - "code": "def _check_for_indent(self, newline_token):\n indent_delta = self._get_next_line_indent_delta(newline_token)\n if indent_delta is None or indent_delta == 1:\n return None\n else:\n self.errors.append(\n ('Line continuation must increment indent by 1.',\n newline_token.lexer.lineno))", - "docstring": "Checks that the line following a newline is indented, otherwise a\n parsing error is generated." - }, - { - "code": "def add_data_dict(self, datadict):\n self.set_codes(list(datadict.keys()))\n self.add_data(list(datadict.values()))", - "docstring": "Sets the data and country codes via a dictionary.\n\n i.e. {'DE': 50, 'GB': 30, 'AT': 70}" - }, - { - "code": "def data_iterator_csv_dataset(uri,\n batch_size,\n shuffle=False,\n rng=None,\n normalize=True,\n with_memory_cache=True,\n with_file_cache=True,\n cache_dir=None,\n epoch_begin_callbacks=[],\n epoch_end_callbacks=[]):\n ds = CsvDataSource(uri,\n shuffle=shuffle,\n rng=rng,\n normalize=normalize)\n return data_iterator(ds,\n batch_size=batch_size,\n with_memory_cache=with_memory_cache,\n with_file_cache=with_file_cache,\n cache_dir=cache_dir,\n epoch_begin_callbacks=epoch_begin_callbacks,\n epoch_end_callbacks=epoch_end_callbacks)", - "docstring": "data_iterator_csv_dataset\n Get data directly from a dataset provided as a CSV file.\n\n You can read files located on the local file system, http(s) servers or Amazon AWS S3 storage.\n\n For example,\n\n .. code-block:: python\n\n batch = data_iterator_csv_dataset('CSV_FILE.csv', batch_size, shuffle=True)\n\n Args:\n uri (str): Location of dataset CSV file.\n batch_size (int): Size of data unit.\n shuffle (bool):\n Indicates whether the dataset is shuffled or not.\n Default value is False. \n rng (None or :obj:`numpy.random.RandomState`): Numpy random number\n generator.\n normalize (bool): If True, each sample in the data gets normalized by a factor of 255. \n Default is True.\n with_memory_cache (bool):\n If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`\n to wrap ``data_source``. It is a good idea to set this as true unless\n data_source provides on-memory data.\n Default value is True.\n with_file_cache (bool):\n If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`\n to wrap ``data_source``.\n If ``data_source`` is slow, enabling this option a is good idea.\n Default value is False.\n cache_dir (str):\n Location of file_cache.\n If this value is None, :py:class:`.data_source.DataSourceWithFileCache`\n creates file caches implicitly on temporary directory and erases them all\n when data_iterator is finished.\n Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.\n Default is None.\n epoch_begin_callbacks (list of functions): An item is a function\n which takes an epoch index as an argument. These are called\n at the beginning of an epoch.\n epoch_end_callbacks (list of functions): An item is a function\n which takes an epoch index as an argument. These are called\n at the end of an epoch.\n\n\n Returns:\n :py:class:`DataIterator `:\n Instance of DataIterator" - }, - { - "code": "def get_only_element_from_collection(one_element_collection):\n if len(one_element_collection) != 1:\n raise AssertionError(u'Expected a collection with exactly one element, but got: {}'\n .format(one_element_collection))\n return funcy.first(one_element_collection)", - "docstring": "Assert that the collection has exactly one element, then return that element." - }, - { - "code": "def do_set_log_file(self, args):\n params = args.split()\n try:\n filename = params[0]\n logging.basicConfig(filename=filename)\n except IndexError:\n self.do_help('set_log_file')", - "docstring": "Set the log file.\n\n Usage:\n set_log_file filename\n Parameters:\n filename: log file name to write to\n\n THIS CAN ONLY BE CALLED ONCE AND MUST BE CALLED\n BEFORE ANY LOGGING STARTS." - }, - { - "code": "def handle_process_output(process, stdout_handler, stderr_handler,\n finalizer=None, decode_streams=True):\n def pump_stream(cmdline, name, stream, is_decode, handler):\n try:\n for line in stream:\n if handler:\n if is_decode:\n line = line.decode(defenc)\n handler(line)\n except Exception as ex:\n log.error(\"Pumping %r of cmd(%s) failed due to: %r\", name, cmdline, ex)\n raise CommandError(['<%s-pump>' % name] + cmdline, ex)\n finally:\n stream.close()\n cmdline = getattr(process, 'args', '')\n if not isinstance(cmdline, (tuple, list)):\n cmdline = cmdline.split()\n pumps = []\n if process.stdout:\n pumps.append(('stdout', process.stdout, stdout_handler))\n if process.stderr:\n pumps.append(('stderr', process.stderr, stderr_handler))\n threads = []\n for name, stream, handler in pumps:\n t = threading.Thread(target=pump_stream,\n args=(cmdline, name, stream, decode_streams, handler))\n t.setDaemon(True)\n t.start()\n threads.append(t)\n for t in threads:\n t.join()\n if finalizer:\n return finalizer(process)", - "docstring": "Registers for notifications to lean that process output is ready to read, and dispatches lines to\n the respective line handlers.\n This function returns once the finalizer returns\n\n :return: result of finalizer\n :param process: subprocess.Popen instance\n :param stdout_handler: f(stdout_line_string), or None\n :param stderr_handler: f(stderr_line_string), or None\n :param finalizer: f(proc) - wait for proc to finish\n :param decode_streams:\n Assume stdout/stderr streams are binary and decode them before pushing \\\n their contents to handlers.\n Set it to False if `universal_newline == True` (then streams are in text-mode)\n or if decoding must happen later (i.e. for Diffs)." - }, - { - "code": "def burst_run(self):\n get_and_call_next_op = self.get_and_call_next_op\n for __ in range(self.outer_burst_op_count):\n for __ in range(self.inner_burst_op_count):\n get_and_call_next_op()\n self.call_sync_callbacks()", - "docstring": "Run CPU as fast as Python can..." - }, - { - "code": "def script_request_send(self, target_system, target_component, seq, force_mavlink1=False):\n return self.send(self.script_request_encode(target_system, target_component, seq), force_mavlink1=force_mavlink1)", - "docstring": "Request script item with the sequence number seq. The response of the\n system to this message should be a SCRIPT_ITEM\n message.\n\n target_system : System ID (uint8_t)\n target_component : Component ID (uint8_t)\n seq : Sequence (uint16_t)" - }, - { - "code": "def setResponse(self, response):\n self.response = response\n self.result = self.response.body\n if isinstance(self.result, remoting.ErrorFault):\n self.result.raiseException()", - "docstring": "A response has been received by the gateway" - }, - { - "code": "def num_nodes(tree):\n if tree.is_leaf:\n return 1\n else:\n return 1 + num_nodes(tree.left_child) + num_nodes(tree.right_child)", - "docstring": "Determine the number of nodes in a tree" - }, - { - "code": "def allowed_methods(self):\n return [\n method\n for method, allowed in (\n ('GET', hasattr(self, 'on_get')),\n ('POST', hasattr(self, 'on_post')),\n ('PUT', hasattr(self, 'on_put')),\n ('PATCH', hasattr(self, 'on_patch')),\n ('DELETE', hasattr(self, 'on_delete')),\n ('HEAD', hasattr(self, 'on_head')),\n ('OPTIONS', hasattr(self, 'on_options')),\n ) if allowed\n ]", - "docstring": "Return list of allowed HTTP methods on this resource.\n\n This is only for purpose of making resource description.\n\n Returns:\n list: list of allowed HTTP method names (uppercase)" - }, - { - "code": "def construct_mapping(self, node, deep=False):\n if not isinstance(node, MappingNode):\n raise ConstructorError(\n None,\n None,\n 'expected a mapping node, but found {0}'.format(node.id),\n node.start_mark)\n self.flatten_mapping(node)\n context = 'while constructing a mapping'\n mapping = self.dictclass()\n for key_node, value_node in node.value:\n key = self.construct_object(key_node, deep=deep)\n try:\n hash(key)\n except TypeError:\n raise ConstructorError(\n context,\n node.start_mark,\n \"found unacceptable key {0}\".format(key_node.value),\n key_node.start_mark)\n value = self.construct_object(value_node, deep=deep)\n if key in mapping:\n raise ConstructorError(\n context,\n node.start_mark,\n \"found conflicting ID '{0}'\".format(key),\n key_node.start_mark)\n mapping[key] = value\n return mapping", - "docstring": "Build the mapping for YAML" - }, - { - "code": "def import_locations(self, baken_file):\n self._baken_file = baken_file\n data = ConfigParser()\n if hasattr(baken_file, 'readlines'):\n data.readfp(baken_file)\n elif isinstance(baken_file, list):\n data.read(baken_file)\n elif isinstance(baken_file, basestring):\n data.readfp(open(baken_file))\n else:\n raise TypeError('Unable to handle data of type %r'\n % type(baken_file))\n valid_locator = re.compile(r\"[A-Z]{2}\\d{2}[A-Z]{2}\")\n for name in data.sections():\n elements = {}\n for item in ('latitude', 'longitude', 'antenna', 'direction',\n 'frequency', 'height', 'locator', 'mode', 'operator',\n 'power', 'qth'):\n if data.has_option(name, item):\n if item in ('antenna', 'locator', 'mode', 'power', 'qth'):\n elements[item] = data.get(name, item)\n elif item == 'operator':\n elements[item] = elements[item].split(',')\n elif item == 'direction':\n elements[item] = data.get(name, item).split(',')\n else:\n try:\n elements[item] = data.getfloat(name, item)\n except ValueError:\n logging.debug('Multiple frequency workaround for '\n '%r entry' % name)\n elements[item] = \\\n map(float, data.get(name, item).split(','))\n else:\n elements[item] = None\n if elements['latitude'] is None \\\n and not valid_locator.match(elements['locator']):\n logging.info('Skipping %r entry, as it contains no location '\n 'data' % name)\n continue\n self[name] = Baken(**elements)", - "docstring": "Import baken data files.\n\n ``import_locations()`` returns a dictionary with keys containing the\n section title, and values consisting of a collection :class:`Baken`\n objects.\n\n It expects data files in the format used by the baken_ amateur radio\n package, which is Windows INI style files such as:\n\n .. code-block:: ini\n\n [Abeche, Chad]\n latitude=14.460000\n longitude=20.680000\n height=0.000000\n\n [GB3BUX]\n frequency=50.000\n locator=IO93BF\n power=25 TX\n antenna=2 x Turnstile\n height=460\n mode=A1A\n\n The reader uses the :mod:`configparser` module, so should be reasonably\n robust against encodings and such. The above file processed by\n ``import_locations()`` will return the following ``dict`` object::\n\n {\"Abeche, Chad\": Baken(14.460, 20.680, None, None, None, 0.000,\n None, None, None, None, None),\n \"GB3BUX\": : Baken(None, None, \"2 x Turnstile\", None, 50.000,\n 460.000, \"IO93BF\", \"A1A\", None, 25, None)}\n\n Args::\n baken_file (iter): Baken data to read\n\n Returns:\n dict: Named locations and their associated values\n\n .. _baken: http://www.qsl.net:80/g4klx/" - }, - { - "code": "def GetTSKVsPartByPathSpec(tsk_volume, path_spec):\n location = getattr(path_spec, 'location', None)\n part_index = getattr(path_spec, 'part_index', None)\n start_offset = getattr(path_spec, 'start_offset', None)\n partition_index = None\n if part_index is None:\n if location is not None:\n if location.startswith('/p'):\n try:\n partition_index = int(location[2:], 10) - 1\n except ValueError:\n pass\n if partition_index is None or partition_index < 0:\n location = None\n if location is None and start_offset is None:\n return None, None\n bytes_per_sector = TSKVolumeGetBytesPerSector(tsk_volume)\n current_part_index = 0\n current_partition_index = 0\n tsk_vs_part = None\n tsk_vs_part_list = list(tsk_volume)\n number_of_tsk_vs_parts = len(tsk_vs_part_list)\n if number_of_tsk_vs_parts > 0:\n if (part_index is not None and\n (part_index < 0 or part_index >= number_of_tsk_vs_parts)):\n return None, None\n for tsk_vs_part in tsk_vs_part_list:\n if TSKVsPartIsAllocated(tsk_vs_part):\n if partition_index is not None:\n if partition_index == current_partition_index:\n break\n current_partition_index += 1\n if part_index is not None and part_index == current_part_index:\n break\n if start_offset is not None:\n start_sector = TSKVsPartGetStartSector(tsk_vs_part)\n if start_sector is not None:\n start_sector *= bytes_per_sector\n if start_sector == start_offset:\n break\n current_part_index += 1\n if tsk_vs_part is None or current_part_index >= number_of_tsk_vs_parts:\n return None, None\n if not TSKVsPartIsAllocated(tsk_vs_part):\n current_partition_index = None\n return tsk_vs_part, current_partition_index", - "docstring": "Retrieves the TSK volume system part object from the TSK volume object.\n\n Args:\n tsk_volume (pytsk3.Volume_Info): TSK volume information.\n path_spec (PathSpec): path specification.\n\n Returns:\n tuple: contains:\n\n pytsk3.TSK_VS_PART_INFO: TSK volume system part information or\n None on error.\n int: partition index or None if not available." - }, - { - "code": "def set_inputhook(self, callback):\n ignore_CTRL_C()\n self._callback = callback\n self._callback_pyfunctype = self.PYFUNC(callback)\n pyos_inputhook_ptr = self.get_pyos_inputhook()\n original = self.get_pyos_inputhook_as_func()\n pyos_inputhook_ptr.value = \\\n ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value\n self._installed = True\n return original", - "docstring": "Set PyOS_InputHook to callback and return the previous one." - }, - { - "code": "def process_filter_directive(filter_operation_info, location, context):\n op_name, operator_params = _get_filter_op_name_and_values(filter_operation_info.directive)\n non_comparison_filters = {\n u'name_or_alias': _process_name_or_alias_filter_directive,\n u'between': _process_between_filter_directive,\n u'in_collection': _process_in_collection_filter_directive,\n u'has_substring': _process_has_substring_filter_directive,\n u'contains': _process_contains_filter_directive,\n u'intersects': _process_intersects_filter_directive,\n u'has_edge_degree': _process_has_edge_degree_filter_directive,\n }\n all_recognized_filters = frozenset(non_comparison_filters.keys()) | COMPARISON_OPERATORS\n if all_recognized_filters != ALL_OPERATORS:\n unrecognized_filters = ALL_OPERATORS - all_recognized_filters\n raise AssertionError(u'Some filtering operators are defined but do not have an associated '\n u'processing function. This is a bug: {}'.format(unrecognized_filters))\n if op_name in COMPARISON_OPERATORS:\n process_func = partial(_process_comparison_filter_directive, operator=op_name)\n else:\n process_func = non_comparison_filters.get(op_name, None)\n if process_func is None:\n raise GraphQLCompilationError(u'Unknown op_name for filter directive: {}'.format(op_name))\n if (filter_operation_info.field_name is None and\n op_name not in INNER_SCOPE_VERTEX_FIELD_OPERATORS):\n raise GraphQLCompilationError(u'The filter with op_name \"{}\" must be applied on a field. '\n u'It may not be applied on a type coercion.'.format(op_name))\n fields = ((filter_operation_info.field_name,) if op_name != 'name_or_alias'\n else ('name', 'alias'))\n context['metadata'].record_filter_info(\n location,\n FilterInfo(fields=fields, op_name=op_name, args=tuple(operator_params))\n )\n return process_func(filter_operation_info, location, context, operator_params)", - "docstring": "Return a Filter basic block that corresponds to the filter operation in the directive.\n\n Args:\n filter_operation_info: FilterOperationInfo object, containing the directive and field info\n of the field where the filter is to be applied.\n location: Location where this filter is used.\n context: dict, various per-compilation data (e.g. declared tags, whether the current block\n is optional, etc.). May be mutated in-place in this function!\n\n Returns:\n a Filter basic block that performs the requested filtering operation" - }, - { - "code": "def as_block_string(txt):\n import json\n lines = []\n for line in txt.split('\\n'):\n line_ = json.dumps(line)\n line_ = line_[1:-1].rstrip()\n lines.append(line_)\n return '' % '\\n'.join(lines)", - "docstring": "Return a string formatted as a python block comment string, like the one\n you're currently reading. Special characters are escaped if necessary." - }, - { - "code": "def _validate_ctypes(self, from_obj, to_obj):\n if from_obj:\n from_ctype = ContentType.objects.get_for_model(from_obj)\n assert from_ctype.natural_key() == self.from_content_type.natural_key(), (\n 'Relationship \"%s\" does not support connections '\n 'from \"%s\" types' % (self.name, from_ctype))\n if to_obj:\n to_ctype = ContentType.objects.get_for_model(to_obj)\n assert to_ctype.natural_key() == self.to_content_type.natural_key(), (\n 'Relationship \"%s\" does not support connections '\n 'to \"%s\" types' % (self.name, to_ctype))", - "docstring": "Asserts that the content types for the given object are valid for this\n relationship. If validation fails, ``AssertionError`` will be raised." - }, - { - "code": "def _process_response(response):\n error = response.exception()\n if error:\n if isinstance(error, aws_exceptions.AWSError):\n if error.args[1]['type'] in exceptions.MAP:\n raise exceptions.MAP[error.args[1]['type']](\n error.args[1]['message'])\n raise error\n http_response = response.result()\n if not http_response or not http_response.body:\n raise exceptions.DynamoDBException('empty response')\n return json.loads(http_response.body.decode('utf-8'))", - "docstring": "Process the raw AWS response, returning either the mapped exception\n or deserialized response.\n\n :param tornado.concurrent.Future response: The request future\n :rtype: dict or list\n :raises: sprockets_dynamodb.exceptions.DynamoDBException" - }, - { - "code": "def message(self, text):\n line = 0\n for char in text:\n if char == '\\n':\n line += 1\n col = 0 if self.displaymode & LCD_ENTRYLEFT > 0 else self._cols-1\n self.set_cursor(col, line)\n else:\n self.write8(ord(char), True)", - "docstring": "Write text to display. Note that text can include newlines." - }, - { - "code": "def _apply_data(self, f, ts, reverse=False):\n if isinstance(ts, (int, float)):\n d = ts * np.ones(self.shape[0])\n elif ts is None:\n d = None\n elif np.array_equal(ts.index, self.index):\n d = ts.values\n else:\n d = ts._retime(self.index)\n if not reverse:\n new_data = np.apply_along_axis(f, 0, self.values, d)\n else:\n new_data = np.apply_along_axis(f, 0, d, self.values)\n return Trace(new_data, self.index, name=self.name)", - "docstring": "Convenience function for all of the math stuff." - }, - { - "code": "def ncr(n, r):\n r = min(r, n - r)\n numer = reduce(op.mul, range(n, n - r, -1), 1)\n denom = reduce(op.mul, range(1, r + 1), 1)\n return numer // denom", - "docstring": "Calculate n choose r.\n\n :param n: n\n :type n : int\n :param r: r\n :type r :int\n :return: n choose r as int" - }, - { - "code": "def get_templatetag_module(cls):\n if cls not in CacheTag._templatetags_modules:\n all_tags = cls.get_all_tags_and_filters_by_function()['tags']\n CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]\n return CacheTag._templatetags_modules[cls]", - "docstring": "Return the templatetags module name for which the current class is used.\n It's used to render the nocache blocks by loading the correct module" - }, - { - "code": "def _ProcessDirectory(self, mediator, file_entry):\n self.processing_status = definitions.STATUS_INDICATOR_COLLECTING\n if self._processing_profiler:\n self._processing_profiler.StartTiming('collecting')\n for sub_file_entry in file_entry.sub_file_entries:\n if self._abort:\n break\n try:\n if not sub_file_entry.IsAllocated():\n continue\n except dfvfs_errors.BackEndError as exception:\n warning_message = (\n 'unable to process directory entry: {0:s} with error: '\n '{1!s}').format(sub_file_entry.name, exception)\n mediator.ProduceExtractionWarning(\n warning_message, path_spec=file_entry.path_spec)\n continue\n if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:\n if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':\n continue\n event_source = event_sources.FileEntryEventSource(\n path_spec=sub_file_entry.path_spec)\n stat_object = sub_file_entry.GetStat()\n if stat_object:\n event_source.file_entry_type = stat_object.type\n mediator.ProduceEventSource(event_source)\n self.last_activity_timestamp = time.time()\n if self._processing_profiler:\n self._processing_profiler.StopTiming('collecting')\n self.processing_status = definitions.STATUS_INDICATOR_RUNNING", - "docstring": "Processes a directory file entry.\n\n Args:\n mediator (ParserMediator): mediates the interactions between\n parsers and other components, such as storage and abort signals.\n file_entry (dfvfs.FileEntry): file entry of the directory." - }, - { - "code": "def get_spam_checker(backend_path):\n try:\n backend_module = import_module(backend_path)\n backend = getattr(backend_module, 'backend')\n except (ImportError, AttributeError):\n warnings.warn('%s backend cannot be imported' % backend_path,\n RuntimeWarning)\n backend = None\n except ImproperlyConfigured as e:\n warnings.warn(str(e), RuntimeWarning)\n backend = None\n return backend", - "docstring": "Return the selected spam checker backend." - }, - { - "code": "def read_cjson(cls, buf):\n if isinstance(buf, dict):\n data = buf.copy()\n else:\n with open(buf, 'r') as f:\n data = json.load(f)\n assert data['chemical json'] == 0\n n_atoms = len(data['atoms']['coords']['3d'])\n metadata = {}\n _metadata = {}\n coords = np.array(\n data['atoms']['coords']['3d']).reshape((n_atoms // 3, 3))\n atomic_number = constants.elements['atomic_number']\n elements = [dict(zip(atomic_number, atomic_number.index))[x]\n for x in data['atoms']['elements']['number']]\n try:\n connections = data['bonds']['connections']['index']\n except KeyError:\n pass\n else:\n bond_dict = defaultdict(set)\n for i, b in zip(connections[::2], connections[1::2]):\n bond_dict[i].add(b)\n bond_dict[b].add(i)\n _metadata['bond_dict'] = dict(bond_dict)\n try:\n metadata.update(data['properties'])\n except KeyError:\n pass\n out = cls(atoms=elements, coords=coords, _metadata=_metadata,\n metadata=metadata)\n return out", - "docstring": "Read a cjson file or a dictionary.\n\n The cjson format is specified\n `here `_.\n\n Args:\n buf (str, dict): If it is a filepath, the data is read from\n filepath. If it is a dictionary, the dictionary is interpreted\n as cjson.\n\n Returns:\n Cartesian:" - }, - { - "code": "def read_resource_list(self, uri):\n self.logger.info(\"Reading resource list %s\" % (uri))\n try:\n resource_list = ResourceList(allow_multifile=self.allow_multifile,\n mapper=self.mapper)\n resource_list.read(uri=uri)\n except Exception as e:\n raise ClientError(\"Can't read source resource list from %s (%s)\" %\n (uri, str(e)))\n self.logger.debug(\"Finished reading resource list\")\n return(resource_list)", - "docstring": "Read resource list from specified URI else raise exception." - }, - { - "code": "def decorate(self, other=None, **kwargs):\n if 'color' in kwargs:\n incompatible = []\n for othercolor in ('linecolor', 'fillcolor', 'markercolor'):\n if othercolor in kwargs:\n incompatible.append(othercolor)\n if incompatible:\n raise ValueError(\n \"Setting both the `color` and the `{0}` attribute{1} \"\n \"is ambiguous. Please set only one.\".format(\n ', '.join(incompatible),\n 's' if len(incompatible) != 1 else ''))\n if other is not None:\n decor = other.decorators\n if 'color' in kwargs:\n decor.pop('linecolor', None)\n decor.pop('fillcolor', None)\n decor.pop('markercolor', None)\n decor.update(kwargs)\n kwargs = decor\n for key, value in kwargs.items():\n if key in Plottable.EXTRA_ATTRS_DEPRECATED:\n newkey = Plottable.EXTRA_ATTRS_DEPRECATED[key]\n warnings.warn(\n \"`{0}` is deprecated and will be removed in \"\n \"future versions. Use `{1}` instead\".format(\n key, newkey),\n DeprecationWarning)\n key = newkey\n if key in Plottable.EXTRA_ATTRS:\n setattr(self, key, value)\n elif key == 'markerstyle':\n self.SetMarkerStyle(value)\n elif key == 'markercolor':\n self.SetMarkerColor(value)\n elif key == 'markersize':\n self.SetMarkerSize(value)\n elif key == 'fillcolor':\n self.SetFillColor(value)\n elif key == 'fillstyle':\n self.SetFillStyle(value)\n elif key == 'linecolor':\n self.SetLineColor(value)\n elif key == 'linestyle':\n self.SetLineStyle(value)\n elif key == 'linewidth':\n self.SetLineWidth(value)\n elif key == 'color':\n self.SetColor(value)\n else:\n raise AttributeError(\n \"unknown decoration attribute: `{0}`\".format(key))\n return self", - "docstring": "Apply style options to a Plottable object.\n\n Returns a reference to self." - }, - { - "code": "def get_number_of_desktops(self):\n ndesktops = ctypes.c_long(0)\n _libxdo.xdo_get_number_of_desktops(self._xdo, ctypes.byref(ndesktops))\n return ndesktops.value", - "docstring": "Get the current number of desktops.\n Uses ``_NET_NUMBER_OF_DESKTOPS`` of the EWMH spec.\n\n :param ndesktops:\n pointer to long where the current number of desktops is stored" - }, - { - "code": "def rmrf(items, verbose=True):\n \"Silently remove a list of directories or files\"\n if isinstance(items, str):\n items = [items]\n for item in items:\n if verbose:\n print(\"Removing {}\".format(item))\n shutil.rmtree(item, ignore_errors=True)\n try:\n os.remove(item)\n except FileNotFoundError:\n pass", - "docstring": "Silently remove a list of directories or files" - }, - { - "code": "def _authenticate():\n global url, port, ticket, csrf, verify_ssl\n url = config.get_cloud_config_value(\n 'url', get_configured_provider(), __opts__, search_global=False\n )\n port = config.get_cloud_config_value(\n 'port', get_configured_provider(), __opts__,\n default=8006, search_global=False\n )\n username = config.get_cloud_config_value(\n 'user', get_configured_provider(), __opts__, search_global=False\n ),\n passwd = config.get_cloud_config_value(\n 'password', get_configured_provider(), __opts__, search_global=False\n )\n verify_ssl = config.get_cloud_config_value(\n 'verify_ssl', get_configured_provider(), __opts__,\n default=True, search_global=False\n )\n connect_data = {'username': username, 'password': passwd}\n full_url = 'https://{0}:{1}/api2/json/access/ticket'.format(url, port)\n returned_data = requests.post(\n full_url, verify=verify_ssl, data=connect_data).json()\n ticket = {'PVEAuthCookie': returned_data['data']['ticket']}\n csrf = six.text_type(returned_data['data']['CSRFPreventionToken'])", - "docstring": "Retrieve CSRF and API tickets for the Proxmox API" - }, - { - "code": "def reboot(name, call=None):\n if call != 'action':\n raise SaltCloudException(\n 'The reboot action must be called with -a or --action.'\n )\n my_info = _get_my_info(name)\n profile_name = my_info[name]['profile']\n profile = __opts__['profiles'][profile_name]\n host = profile['host']\n local = salt.client.LocalClient()\n return local.cmd(host, 'vagrant.reboot', [name])", - "docstring": "Reboot a vagrant minion.\n\n name\n The name of the VM to reboot.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -a reboot vm_name" - }, - { - "code": "def build_class(name, basenames=(), doc=None):\n node = nodes.ClassDef(name, doc)\n for base in basenames:\n basenode = nodes.Name()\n basenode.name = base\n node.bases.append(basenode)\n basenode.parent = node\n return node", - "docstring": "create and initialize an astroid ClassDef node" - }, - { - "code": "def copy_info(self, other, ignore=None):\n logging.info(\"Copying info\")\n if ignore is None:\n ignore = []\n if isinstance(ignore, (str, unicode)):\n ignore = [ignore]\n ignore = set(ignore + [self.samples_group])\n copy_groups = set(self.keys()) - ignore\n for key in copy_groups:\n super(BaseInferenceFile, self).copy(key, other)", - "docstring": "Copies \"info\" from this file to the other.\n\n \"Info\" is defined all groups that are not the samples group.\n\n Parameters\n ----------\n other : output file\n The output file. Must be an hdf file.\n ignore : (list of) str\n Don't copy the given groups." - }, - { - "code": "def select(self):\n err = self._oasis_obj.error\n if np.allclose(err, 0):\n return None\n nsel = self._check_nsel()\n if nsel is None:\n return None\n return self._select(nsel, err)", - "docstring": "Selects next column indexes according to defined strategy\n\n Returns\n -------\n cols : ndarray((nsel,), dtype=int)\n selected columns" - }, - { - "code": "def _get_settings_from_request(request):\n settings = request.registry.settings\n if 'zipkin.create_zipkin_attr' in settings:\n zipkin_attrs = settings['zipkin.create_zipkin_attr'](request)\n else:\n zipkin_attrs = create_zipkin_attr(request)\n if 'zipkin.transport_handler' in settings:\n transport_handler = settings['zipkin.transport_handler']\n if not isinstance(transport_handler, BaseTransportHandler):\n warnings.warn(\n 'Using a function as transport_handler is deprecated. '\n 'Please extend py_zipkin.transport.BaseTransportHandler',\n DeprecationWarning,\n )\n stream_name = settings.get('zipkin.stream_name', 'zipkin')\n transport_handler = functools.partial(transport_handler, stream_name)\n else:\n raise ZipkinError(\n \"`zipkin.transport_handler` is a required config property, which\"\n \" is missing. Have a look at py_zipkin's docs for how to implement\"\n \" it: https://github.com/Yelp/py_zipkin\n )\n context_stack = _getattr_path(request, settings.get('zipkin.request_context'))\n service_name = settings.get('service_name', 'unknown')\n span_name = '{0} {1}'.format(request.method, request.path)\n add_logging_annotation = settings.get(\n 'zipkin.add_logging_annotation',\n False,\n )\n if 'zipkin.report_root_timestamp' in settings:\n report_root_timestamp = settings['zipkin.report_root_timestamp']\n else:\n report_root_timestamp = 'X-B3-TraceId' not in request.headers\n zipkin_host = settings.get('zipkin.host')\n zipkin_port = settings.get('zipkin.port', request.server_port)\n firehose_handler = settings.get('zipkin.firehose_handler')\n post_handler_hook = settings.get('zipkin.post_handler_hook')\n max_span_batch_size = settings.get('zipkin.max_span_batch_size')\n use_pattern_as_span_name = bool(\n settings.get('zipkin.use_pattern_as_span_name', False),\n )\n encoding = settings.get('zipkin.encoding', Encoding.V1_THRIFT)\n return _ZipkinSettings(\n zipkin_attrs,\n transport_handler,\n service_name,\n span_name,\n add_logging_annotation,\n report_root_timestamp,\n zipkin_host,\n zipkin_port,\n context_stack,\n firehose_handler,\n post_handler_hook,\n max_span_batch_size,\n use_pattern_as_span_name,\n encoding=encoding,\n )", - "docstring": "Extracts Zipkin attributes and configuration from request attributes.\n See the `zipkin_span` context in py-zipkin for more detaied information on\n all the settings.\n\n Here are the supported Pyramid registry settings:\n\n zipkin.create_zipkin_attr: allows the service to override the creation of\n Zipkin attributes. For example, if you want to deterministically\n calculate trace ID from some service-specific attributes.\n zipkin.transport_handler: how py-zipkin will log the spans it generates.\n zipkin.stream_name: an additional parameter to be used as the first arg\n to the transport_handler function. A good example is a Kafka topic.\n zipkin.add_logging_annotation: if true, the outermost span in this service\n will have an annotation set when py-zipkin begins its logging.\n zipkin.report_root_timestamp: if true, the outermost span in this service\n will set its timestamp and duration attributes. Use this only if this\n service is not going to have a corresponding client span. See\n https://github.com/Yelp/pyramid_zipkin/issues/68\n zipkin.firehose_handler: [EXPERIMENTAL] this enables \"firehose tracing\",\n which will log 100% of the spans to this handler, regardless of\n sampling decision. This is experimental and may change or be removed\n at any time without warning.\n zipkin.use_pattern_as_span_name: if true, we'll use the pyramid route pattern\n as span name. If false (default) we'll keep using the raw url path." - }, - { - "code": "def write_remote_map(self):\n remote_map = salt.utils.path.join(self.cache_root, 'remote_map.txt')\n try:\n with salt.utils.files.fopen(remote_map, 'w+') as fp_:\n timestamp = \\\n datetime.now().strftime('%d %b %Y %H:%M:%S.%f')\n fp_.write(\n '\n self.role,\n timestamp\n )\n )\n for repo in self.remotes:\n fp_.write(\n salt.utils.stringutils.to_str(\n '{0} = {1}\\n'.format(\n repo.cachedir_basename,\n repo.id\n )\n )\n )\n except OSError:\n pass\n else:\n log.info('Wrote new %s remote map to %s', self.role, remote_map)", - "docstring": "Write the remote_map.txt" - }, - { - "code": "def build_base_parameters(request):\n getParameters = {}\n postParameters = {}\n files = {}\n for v in request.GET:\n if v[:6] != 'ebuio_':\n val = request.GET.getlist(v)\n if len(val) == 1:\n getParameters[v] = val[0]\n else:\n getParameters[v] = val\n if request.method == 'POST':\n for v in request.POST:\n if v[:6] != 'ebuio_':\n val = request.POST.getlist(v)\n if len(val) == 1:\n postParameters[v] = val[0]\n else:\n postParameters[v] = val\n for v in request.FILES:\n if v[:6] != 'ebuio_':\n files[v] = request.FILES[v]\n return (getParameters, postParameters, files)", - "docstring": "Build the list of parameters to forward from the post and get parameters" - }, - { - "code": "def search(self, index, query, **kwargs):\n itercls = kwargs.pop('itercls', _FTS.SearchRequest)\n iterargs = itercls.mk_kwargs(kwargs)\n params = kwargs.pop('params', _FTS.Params(**kwargs))\n body = _FTS.make_search_body(index, query, params)\n return itercls(body, self, **iterargs)", - "docstring": "Perform full-text searches\n\n .. versionadded:: 2.0.9\n\n .. warning::\n\n The full-text search API is experimental and subject to change\n\n :param str index: Name of the index to query\n :param couchbase.fulltext.SearchQuery query: Query to issue\n :param couchbase.fulltext.Params params: Additional query options\n :return: An iterator over query hits\n\n .. note:: You can avoid instantiating an explicit `Params` object\n and instead pass the parameters directly to the `search` method.\n\n .. code-block:: python\n\n it = cb.search('name', ft.MatchQuery('nosql'), limit=10)\n for hit in it:\n print(hit)" - }, - { - "code": "def replace_suffixes_4(self, word):\n length = len(word)\n replacements = {'ational': 'ate', 'tional': 'tion', 'alize': 'al',\n 'icate': 'ic', 'iciti': 'ic', 'ical': 'ic',\n 'ful': '', 'ness': ''}\n for suffix in replacements.keys():\n if word.endswith(suffix):\n suffix_length = len(suffix)\n if self.r1 <= (length - suffix_length):\n word = word[:-suffix_length] + replacements[suffix]\n if word.endswith('ative'):\n if self.r1 <= (length - 5) and self.r2 <= (length - 5):\n word = word[:-5]\n return word", - "docstring": "Perform replacements on even more common suffixes." - }, - { - "code": "def continues(method):\n @functools.wraps(method)\n def wrapped(self, *args, **kwargs):\n yield method(self, *args, **kwargs)\n raise self.Continue()\n return wrapped", - "docstring": "Method decorator signifying that the visitor should not visit the\n current node's children once this method has been invoked." - }, - { - "code": "def call(self, command, *args):\n command = self._normalize_command_name(command)\n args = self._normalize_command_args(command, *args)\n redis_function = getattr(self, command)\n value = redis_function(*args)\n return self._normalize_command_response(command, value)", - "docstring": "Sends call to the function, whose name is specified by command.\n\n Used by Script invocations and normalizes calls using standard\n Redis arguments to use the expected redis-py arguments." - }, - { - "code": "def add_label(self, name, addr):\n self._symbolization_needed = True\n self.symbol_manager.new_label(addr, name=name, force=True)", - "docstring": "Add a new label to the symbol manager.\n\n :param str name: Name of the label.\n :param int addr: Address of the label.\n :return: None" - }, - { - "code": "def uuid_constructor(loader, node):\n value = loader.construct_scalar(node)\n return uuid.UUID(value)", - "docstring": "Construct a uuid.UUID object form a scalar YAML node.\n\n Tests:\n >>> yaml.add_constructor(\"!uuid\", uuid_constructor, Loader=yaml.SafeLoader)\n >>> yaml.safe_load(\"{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}\")\n {'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}" - }, - { - "code": "def target_to_hostname(target):\n if len(target) == 0 or len(target) > 255:\n return None\n if not re.match(r'^[\\w.-]+$', target):\n return None\n return [target]", - "docstring": "Attempt to return a single hostname list from a target string." - }, - { - "code": "def reset_epoch(func):\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n connection = kwargs.get('connection', None)\n epoch = connection.current_epoch() if connection else None\n try:\n return func(*args, **kwargs)\n finally:\n if epoch is not None:\n connection.set_epoch(epoch.gps_start, epoch.gps_stop)\n return wrapped_func", - "docstring": "Wrap a function to reset the epoch when finished\n\n This is useful for functions that wish to use `connection.set_epoch`." - }, - { - "code": "def all_dependencies(self, target):\n for dep in target.closure(bfs=True, **self.target_closure_kwargs):\n yield dep", - "docstring": "All transitive dependencies of the context's target." - }, - { - "code": "def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):\n if edns is None or edns is False:\n edns = -1\n if edns is True:\n edns = 0\n if request_payload is None:\n request_payload = payload\n if edns < 0:\n ednsflags = 0\n payload = 0\n request_payload = 0\n options = []\n else:\n ednsflags &= 0xFF00FFFFL\n ednsflags |= (edns << 16)\n if options is None:\n options = []\n self.edns = edns\n self.ednsflags = ednsflags\n self.payload = payload\n self.options = options\n self.request_payload = request_payload", - "docstring": "Configure EDNS behavior.\n @param edns: The EDNS level to use. Specifying None, False, or -1\n means 'do not use EDNS', and in this case the other parameters are\n ignored. Specifying True is equivalent to specifying 0, i.e. 'use\n EDNS0'.\n @type edns: int or bool or None\n @param ednsflags: EDNS flag values.\n @type ednsflags: int\n @param payload: The EDNS sender's payload field, which is the maximum\n size of UDP datagram the sender can handle.\n @type payload: int\n @param request_payload: The EDNS payload size to use when sending\n this message. If not specified, defaults to the value of payload.\n @type request_payload: int or None\n @param options: The EDNS options\n @type options: None or list of dns.edns.Option objects\n @see: RFC 2671" - }, - { - "code": "def treewidth_branch_and_bound(G, elimination_order=None, treewidth_upperbound=None):\n if not any(G[v] for v in G):\n return 0, list(G)\n x = []\n f = minor_min_width(G)\n g = 0\n ub, order = min_fill_heuristic(G)\n if elimination_order is not None:\n upperbound = elimination_order_width(G, elimination_order)\n if upperbound <= ub:\n ub, order = upperbound, elimination_order\n if treewidth_upperbound is not None and treewidth_upperbound < ub:\n ub, order = treewidth_upperbound, []\n best_found = ub, order\n assert f <= ub, \"Logic error\"\n if f < ub:\n adj = {v: set(G[v]) for v in G}\n best_found = _branch_and_bound(adj, x, g, f, best_found)\n return best_found", - "docstring": "Computes the treewidth of graph G and a corresponding perfect elimination ordering.\n\n Parameters\n ----------\n G : NetworkX graph\n The graph on which to compute the treewidth and perfect elimination ordering.\n\n elimination_order: list (optional, Default None)\n An elimination order used as an initial best-known order. If a good\n order is provided, it may speed up computation. If not provided, the\n initial order is generated using the min-fill heuristic.\n\n treewidth_upperbound : int (optional, Default None)\n An upper bound on the treewidth. Note that using\n this parameter can result in no returned order.\n\n Returns\n -------\n treewidth : int\n The treewidth of graph G.\n order : list\n An elimination order that induces the treewidth.\n\n Examples\n --------\n This example computes the treewidth for the :math:`K_7`\n complete graph using an optionally provided elimination order (a sequential\n ordering of the nodes, arbitrally chosen).\n\n >>> import dwave_networkx as dnx\n >>> import networkx as nx\n >>> K_7 = nx.complete_graph(7)\n >>> dnx.treewidth_branch_and_bound(K_7, [0, 1, 2, 3, 4, 5, 6])\n (6, [0, 1, 2, 3, 4, 5, 6])\n\n References\n ----------\n .. [GD] Gogate & Dechter, \"A Complete Anytime Algorithm for Treewidth\",\n https://arxiv.org/abs/1207.4109" - }, - { - "code": "def update(self, figure):\n if hasattr(self, \"figure_canvas\"):\n self.figure_canvas.Destroy()\n self.figure_canvas = self._get_figure_canvas(figure)\n self.figure_canvas.SetSize(self.GetSize())\n figure.subplots_adjust()\n self.main_sizer.Add(self.figure_canvas, 1,\n wx.EXPAND | wx.FIXED_MINSIZE, 0)\n self.Layout()\n self.figure_canvas.draw()", - "docstring": "Updates figure on data change\n\n Parameters\n ----------\n * figure: matplotlib.figure.Figure\n \\tMatplotlib figure object that is displayed in self" - }, - { - "code": "def _netapp_login(self):\n self.server = NaServer(self.ip, 1, 3)\n self.server.set_transport_type('HTTPS')\n self.server.set_style('LOGIN')\n self.server.set_admin_user(self.netapp_user, self.netapp_password)", - "docstring": "Login to our netapp filer" - }, - { - "code": "def _distance(self, x0, y0, x1, y1):\n dx = x1-x0\n dy = y1-y0\n if self.pix:\n dx[ dx > self.Lx/2 ] -= self.Lx\n dx[ dx < -self.Lx/2 ] += self.Lx\n if self.piy:\n dy[ dy > self.Ly/2 ] -= self.Ly\n dy[ dy < -self.Ly/2 ] += self.Ly\n return dx, dy", - "docstring": "Utitlity function to compute distance between points." - }, - { - "code": "def millRule(self,aLvlNow,pLvlNow,MPCnow,TranShkNow,EmpNow,t_age,LorenzBool,ManyStatsBool):\n self.calcStats(aLvlNow,pLvlNow,MPCnow,TranShkNow,EmpNow,t_age,LorenzBool,ManyStatsBool)\n if self.AggShockBool:\n return self.calcRandW(aLvlNow,pLvlNow)\n else:\n self.MaggNow = 0.0\n self.AaggNow = 0.0", - "docstring": "The millRule for this class simply calls the method calcStats." - }, - { - "code": "def _packet_loop(self):\n while self._is_running:\n if self.inbox.empty() \\\n and not self.new_packet.wait(self._packet_timeout):\n continue\n ip, port, packet = self.inbox.get()\n if self.inbox.empty():\n self.new_packet.clear()\n self.debug(u\"{}\".format(packet))\n if packet.header.message_type == MsgType.CONFIG:\n self._do_config_packet(packet, ip, port)\n elif packet.header.message_type == MsgType.UPDATE:\n self._do_update_packet(packet)", - "docstring": "Packet processing loop\n\n :rtype: None" - }, - { - "code": "def resample_single_nifti(input_nifti):\n input_image = nibabel.load(input_nifti)\n output_image = resample_nifti_images([input_image])\n output_image.to_filename(input_nifti)", - "docstring": "Resample a gantry tilted image in place" - }, - { - "code": "def delete_router(self, context, router_id, router):\n if router:\n router_name = self._arista_router_name(router_id, router['name'])\n mlag_peer_failed = False\n for s in self._servers:\n try:\n self.delete_router_from_eos(router_name, s)\n mlag_peer_failed = False\n except Exception:\n if self._mlag_configured and not mlag_peer_failed:\n mlag_peer_failed = True\n else:\n msg = (_('Failed to create router %s on EOS') %\n router_name)\n LOG.exception(msg)\n raise arista_exc.AristaServicePluginRpcError(msg=msg)", - "docstring": "Deletes a router from Arista Switch." - }, - { - "code": "def get_acs(self):\n import predix.security.acs\n acs = predix.security.acs.AccessControl()\n return acs", - "docstring": "Returns an instance of the Asset Control Service." - }, - { - "code": "def new_class_from_gtype(gtype):\n if gtype.is_a(PGType.from_name(\"GObject\")):\n parent = gtype.parent.pytype\n if parent is None or parent == PGType.from_name(\"void\"):\n return\n interfaces = [i.pytype for i in gtype.interfaces]\n bases = tuple([parent] + interfaces)\n cls = type(gtype.name, bases, dict())\n cls.__gtype__ = gtype\n return cls\n elif gtype.is_a(PGType.from_name(\"GEnum\")):\n from pgi.enum import GEnumBase\n return GEnumBase", - "docstring": "Create a new class for a gtype not in the gir.\n The caller is responsible for caching etc." - }, - { - "code": "def count(fn, coll):\n return len([x for x in coll if fn(x) is True])", - "docstring": "Return the count of True values returned by the predicate function applied to the\n collection\n\n :param fn: a predicate function\n :param coll: a collection\n :returns: an integer\n\n >>> count(lambda x: x % 2 == 0, [11, 22, 31, 24, 15])\n 2" - }, - { - "code": "def plot(self, plot_grouped=False, dates=None, min_dets=1, rate=False,\n **kwargs):\n all_dets = []\n if dates:\n new_party = self.filter(dates=dates, min_dets=min_dets)\n for fam in new_party.families:\n all_dets.extend(fam.detections)\n else:\n for fam in self.families:\n all_dets.extend(fam.detections)\n fig = cumulative_detections(detections=all_dets,\n plot_grouped=plot_grouped,\n rate=rate, **kwargs)\n return fig", - "docstring": "Plot the cumulative detections in time.\n\n :type plot_grouped: bool\n :param plot_grouped:\n Whether to plot all families together (plot_grouped=True), or each\n as a separate line.\n :type dates: list\n :param dates: list of obspy.core.UTCDateTime objects bounding the\n plot. The first should be the start date, the last the end date.\n :type min_dets: int\n :param min_dets: Plot only families with this number of detections\n or more.\n :type rate: bool\n :param rate: Whether or not to plot the daily rate of detection as\n opposed to cumulative number. Only works with plot_grouped=True.\n :param \\**kwargs: Any other arguments accepted by\n :func:`eqcorrscan.utils.plotting.cumulative_detections`\n\n .. rubric:: Examples\n\n Plot cumulative detections for all templates individually:\n\n >>> Party().read().plot() # doctest: +SKIP\n\n Plot cumulative detections for all templates grouped together:\n\n >>> Party().read().plot(plot_grouped=True) # doctest: +SKIP\n\n Plot the rate of detection for all templates grouped together:\n\n >>> Party().read().plot(plot_grouped=True, rate=True) # doctest: +SKIP\n\n Plot cumulative detections for all templates with more than five\n detections between June 1st, 2012 and July 31st, 2012:\n\n >>> from obspy import UTCDateTime\n >>> Party().read().plot(dates=[UTCDateTime(2012, 6, 1),\n ... UTCDateTime(2012, 7, 31)],\n ... min_dets=5) # doctest: +SKIP" - }, - { - "code": "def msg_name(code):\n ids = {v: k for k, v in COMMANDS.items()}\n return ids[code]", - "docstring": "Convert integer message code into a string name." - }, - { - "code": "def get_collections(self):\n collections = self.request.matchdict['collections'].split('/')[0]\n collections = [coll.strip() for coll in collections.split(',')]\n return set(collections)", - "docstring": "Get names of collections from request matchdict.\n\n :return: Names of collections\n :rtype: list of str" - }, - { - "code": "def EnsurePythonVersion(self, major, minor):\n if sys.version_info < (major, minor):\n v = sys.version.split()[0]\n print(\"Python %d.%d or greater required, but you have Python %s\" %(major,minor,v))\n sys.exit(2)", - "docstring": "Exit abnormally if the Python version is not late enough." - }, - { - "code": "def _write_frame(self, frame_out):\n self._connection.write_frame(0, frame_out)\n LOGGER.debug('Frame Sent: %s', frame_out.name)", - "docstring": "Write a pamqp frame from Channel0.\n\n :param frame_out: Amqp frame.\n :return:" - }, - { - "code": "def page(self, attr=None, fill=u' '):\r\n u\r\n if attr is None:\r\n attr = self.attr\r\n if len(fill) != 1:\r\n raise ValueError\r\n info = CONSOLE_SCREEN_BUFFER_INFO()\r\n self.GetConsoleScreenBufferInfo(self.hout, byref(info))\r\n if info.dwCursorPosition.X != 0 or info.dwCursorPosition.Y != 0:\r\n self.SetConsoleCursorPosition(self.hout, self.fixcoord(0, 0))\r\n w = info.dwSize.X\r\n n = DWORD(0)\r\n for y in range(info.dwSize.Y):\r\n self.FillConsoleOutputAttribute(self.hout, attr, \r\n w, self.fixcoord(0, y), byref(n))\r\n self.FillConsoleOutputCharacterW(self.hout, ord(fill[0]), \r\n w, self.fixcoord(0, y), byref(n))\r\n self.attr = attr", - "docstring": "u'''Fill the entire screen." - }, - { - "code": "def check_table(\n problems: List,\n table: str,\n df: DataFrame,\n condition,\n message: str,\n type_: str = \"error\",\n) -> List:\n indices = df.loc[condition].index.tolist()\n if indices:\n problems.append([type_, message, table, indices])\n return problems", - "docstring": "Check the given GTFS table for the given problem condition.\n\n Parameters\n ----------\n problems : list\n A four-tuple containing\n\n 1. A problem type (string) equal to ``'error'`` or ``'warning'``;\n ``'error'`` means the GTFS is violated;\n ``'warning'`` means there is a problem but it is not a\n GTFS violation\n 2. A message (string) that describes the problem\n 3. A GTFS table name, e.g. ``'routes'``, in which the problem\n occurs\n 4. A list of rows (integers) of the table's DataFrame where the\n problem occurs\n\n table : string\n Name of a GTFS table\n df : DataFrame\n The GTFS table corresponding to ``table``\n condition : boolean expression\n One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``\n message : string\n Problem message, e.g. ``'Invalid route_id'``\n type_ : string\n ``'error'`` or ``'warning'`` indicating the type of problem\n encountered\n\n Returns\n -------\n list\n The ``problems`` list extended as follows.\n Record the indices of ``df`` that statisfy the condition.\n If the list of indices is nonempty, append to the\n problems the item ``[type_, message, table, indices]``;\n otherwise do not append anything." - }, - { - "code": "def add_tag(self, new_tags):\n tags = self.get_tags()\n orig_tag_cnt = len(tags)\n if isinstance(new_tags, six.string_types):\n new_tags = new_tags.split(',')\n for tag in new_tags:\n if not tag in tags:\n tags.append(tag.strip())\n if len(tags) > orig_tag_cnt:\n xml_tags = escape(\",\".join(tags))\n post_data = TAGS_TEMPLATE.format(connectware_id=self.get_connectware_id(),\n tags=xml_tags)\n self._conn.put('/ws/DeviceCore', post_data)\n self._device_json = None", - "docstring": "Add a tag to existing device tags. This method will not add a duplicate, if already in the list.\n\n :param new_tags: the tag(s) to be added. new_tags can be a comma-separated string or list" - }, - { - "code": "def detail_dict(self):\n d = self.dict\n def aug_col(c):\n d = c.dict\n d['stats'] = [s.dict for s in c.stats]\n return d\n d['table'] = self.table.dict\n d['table']['columns'] = [aug_col(c) for c in self.table.columns]\n return d", - "docstring": "A more detailed dict that includes the descriptions, sub descriptions, table\n and columns." - }, - { - "code": "def check_available_ram(self, requested_ram):\n available_ram = int(psutil.virtual_memory().available / (1024 * 1024))\n percentage_left = psutil.virtual_memory().percent\n if requested_ram > available_ram:\n message = '\"{}\" requires {}MB of RAM to run but there is only {}MB - {}% of RAM left on \"{}\"'.format(self.name,\n requested_ram,\n available_ram,\n percentage_left,\n platform.node())\n self.project.emit(\"log.warning\", {\"message\": message})", - "docstring": "Sends a warning notification if there is not enough RAM on the system to allocate requested RAM.\n\n :param requested_ram: requested amount of RAM in MB" - }, - { - "code": "def __geometryToDict(self, geom):\n if isinstance(geom, dict):\n return geom\n elif isinstance(geom, Point):\n pt = geom.asDictionary\n return {\"geometry\": {\"x\" : pt['x'], \"y\" : pt['y']}}\n elif isinstance(geom, Polygon):\n poly = geom.asDictionary\n return {\n \"geometry\" : {\n \"rings\" : poly['rings'],\n 'spatialReference' : poly['spatialReference']\n }\n }\n elif isinstance(geom, list):\n return [self.__geometryToDict(g) for g in geom]", - "docstring": "converts a geometry object to a dictionary" - }, - { - "code": "def run_stop_backup(cls):\n def handler(popen):\n assert popen.returncode != 0\n raise UserException('Could not stop hot backup')\n return cls._dict_transform(psql_csv_run(\n \"SELECT file_name, \"\n \" lpad(file_offset::text, 8, '0') AS file_offset \"\n \"FROM pg_{0}file_name_offset(\"\n \" pg_stop_backup())\".format(cls._wal_name()),\n error_handler=handler))", - "docstring": "Stop a hot backup, if it was running, or error\n\n Return the last WAL file name and position that is required to\n gain consistency on the captured heap." - }, - { - "code": "def hicpro_contact_chart (self):\n keys = OrderedDict()\n keys['cis_shortRange'] = { 'color': '\n keys['cis_longRange'] = { 'color': '\n keys['trans_interaction'] = { 'color': '\n keys['duplicates'] = { 'color': '\n config = {\n 'id': 'hicpro_contact_plot',\n 'title': 'HiC-Pro: Contact Statistics',\n 'ylab': '\n 'cpswitch_counts_label': 'Number of Pairs'\n }\n return bargraph.plot(self.hicpro_data, keys, config)", - "docstring": "Generate the HiC-Pro interaction plot" - }, - { - "code": "def start_watching(self, cluster, callback):\n logger.debug(\"starting to watch cluster %s\", cluster.name)\n wait_on_any(self.connected, self.shutdown)\n logger.debug(\"done waiting on (connected, shutdown)\")\n znode_path = \"/\".join([self.base_path, cluster.name])\n self.stop_events[znode_path] = threading.Event()\n def should_stop():\n return (\n znode_path not in self.stop_events or\n self.stop_events[znode_path].is_set() or\n self.shutdown.is_set()\n )\n while not should_stop():\n try:\n if self.client.exists(znode_path):\n break\n except exceptions.ConnectionClosedError:\n break\n wait_on_any(\n self.stop_events[znode_path], self.shutdown,\n timeout=NO_NODE_INTERVAL\n )\n logger.debug(\"setting up ChildrenWatch for %s\", znode_path)\n @self.client.ChildrenWatch(znode_path)\n def watch(children):\n if should_stop():\n return False\n logger.debug(\"znode children changed! (%s)\", znode_path)\n new_nodes = []\n for child in children:\n child_path = \"/\".join([znode_path, child])\n try:\n new_nodes.append(\n Node.deserialize(self.client.get(child_path)[0])\n )\n except ValueError:\n logger.exception(\"Invalid node at path '%s'\", child)\n continue\n cluster.nodes = new_nodes\n callback()", - "docstring": "Initiates the \"watching\" of a cluster's associated znode.\n\n This is done via kazoo's ChildrenWatch object. When a cluster's\n znode's child nodes are updated, a callback is fired and we update\n the cluster's `nodes` attribute based on the existing child znodes\n and fire a passed-in callback with no arguments once done.\n\n If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL`\n seconds before trying again as long as no ChildrenWatch exists for\n the given cluster yet and we are not in the process of shutting down." - }, - { - "code": "def forward(self, inputs, context, inference=False):\n self.inference = inference\n enc_context, enc_len, hidden = context\n hidden = self.init_hidden(hidden)\n x = self.embedder(inputs)\n x, h, attn, scores = self.att_rnn(x, hidden[0], enc_context, enc_len)\n self.append_hidden(h)\n x = torch.cat((x, attn), dim=2)\n x = self.dropout(x)\n x, h = self.rnn_layers[0](x, hidden[1])\n self.append_hidden(h)\n for i in range(1, len(self.rnn_layers)):\n residual = x\n x = torch.cat((x, attn), dim=2)\n x = self.dropout(x)\n x, h = self.rnn_layers[i](x, hidden[i + 1])\n self.append_hidden(h)\n x = x + residual\n x = self.classifier(x)\n hidden = self.package_hidden()\n return x, scores, [enc_context, enc_len, hidden]", - "docstring": "Execute the decoder.\n\n :param inputs: tensor with inputs to the decoder\n :param context: state of encoder, encoder sequence lengths and hidden\n state of decoder's LSTM layers\n :param inference: if True stores and repackages hidden state" - }, - { - "code": "def preview(self, request):\n recurrence_rule = request.POST.get('recurrence_rule')\n limit = int(request.POST.get('limit', 10))\n try:\n rruleset = rrule.rrulestr(\n recurrence_rule, dtstart=djtz.now(), forceset=True)\n except ValueError as e:\n data = {\n 'error': six.text_type(e),\n }\n else:\n data = {\n 'occurrences': rruleset[:limit]\n }\n return JsonResponse(data)", - "docstring": "Return a occurrences in JSON format up until the configured limit." - }, - { - "code": "def Hash(self):\n if not self.__hash:\n hashdata = self.RawData()\n ba = bytearray(binascii.unhexlify(hashdata))\n hash = bin_dbl_sha256(ba)\n self.__hash = UInt256(data=hash)\n return self.__hash", - "docstring": "Get the hash value of the Blockbase.\n\n Returns:\n UInt256: containing the hash of the data." - }, - { - "code": "def isinstance(self, instance, class_name):\n if isinstance(instance, BaseNode):\n klass = self.dynamic_node_classes.get(class_name, None)\n if klass:\n return isinstance(instance, klass)\n return False\n else:\n raise TypeError(\"This function can only be used for BaseNode objects\")", - "docstring": "Check if a BaseNode is an instance of a registered dynamic class" - }, - { - "code": "def _update(collection_name, upsert, multi, spec, doc, check_keys, opts):\n flags = 0\n if upsert:\n flags += 1\n if multi:\n flags += 2\n encode = _dict_to_bson\n encoded_update = encode(doc, check_keys, opts)\n return b\"\".join([\n _ZERO_32,\n _make_c_string(collection_name),\n _pack_int(flags),\n encode(spec, False, opts),\n encoded_update]), len(encoded_update)", - "docstring": "Get an OP_UPDATE message." - }, - { - "code": "def _chart(self, x, y, chart_type, opts, style, label, options, **kwargs):\n\t\tif opts is not None:\n\t\t\tself.chart_opts = opts\n\t\tif style is not None:\n\t\t\tself.chart_style = style\n\t\tif label is not None:\n\t\t\tself.label = label\n\t\tself.x = x\n\t\tself.y = y\n\t\tif chart_type is None:\n\t\t\treturn\n\t\ttry:\n\t\t\tchart_obj = self._get_chart(chart_type, x, y, style=style,\n\t\t\t\t\t\t\t\t\t\topts=opts, label=label,\n\t\t\t\t\t\t\t\t\t\toptions=options, **kwargs)\n\t\t\treturn chart_obj\n\t\texcept Exception as e:\n\t\t\tself.err(e)", - "docstring": "Initialize chart options" - }, - { - "code": "def thermostat(self, temperature):\n target = int(temperature * 100)\n data = copy.copy(self._parameters)\n data.update({'value': target})\n response = self._get_data('/client/auth/setPoint', data)\n self._logger.debug('Response received {}'.format(response))\n self._clear_cache()", - "docstring": "A temperature to set the thermostat to. Requires a float.\n\n :param temperature: A float of the desired temperature to change to." - }, - { - "code": "def get_read_options(eventual, transaction_id):\n if transaction_id is None:\n if eventual:\n return datastore_pb2.ReadOptions(\n read_consistency=datastore_pb2.ReadOptions.EVENTUAL\n )\n else:\n return datastore_pb2.ReadOptions()\n else:\n if eventual:\n raise ValueError(\"eventual must be False when in a transaction\")\n else:\n return datastore_pb2.ReadOptions(transaction=transaction_id)", - "docstring": "Validate rules for read options, and assign to the request.\n\n Helper method for ``lookup()`` and ``run_query``.\n\n :type eventual: bool\n :param eventual: Flag indicating if ``EVENTUAL`` or ``STRONG``\n consistency should be used.\n\n :type transaction_id: bytes\n :param transaction_id: A transaction identifier (may be null).\n\n :rtype: :class:`.datastore_pb2.ReadOptions`\n :returns: The read options corresponding to the inputs.\n :raises: :class:`ValueError` if ``eventual`` is ``True`` and the\n ``transaction_id`` is not ``None``." - }, - { - "code": "def lookup_command(cmdname, mode):\n if cmdname in COMMANDS[mode]:\n return COMMANDS[mode][cmdname]\n elif cmdname in COMMANDS['global']:\n return COMMANDS['global'][cmdname]\n else:\n return None, None, None", - "docstring": "returns commandclass, argparser and forced parameters used to construct\n a command for `cmdname` when called in `mode`.\n\n :param cmdname: name of the command to look up\n :type cmdname: str\n :param mode: mode identifier\n :type mode: str\n :rtype: (:class:`Command`, :class:`~argparse.ArgumentParser`,\n dict(str->dict))" - }, - { - "code": "def _parse_exclude(exclude_file):\n if os.path.isfile(exclude_file):\n exclude = excludemod.parseExcludeFile(exclude_file, lambda x: None)\n else:\n exclude = dict()\n return exclude", - "docstring": "Parse an exclude file.\n\n Returns a dict as defined in gentoolkit.eclean.exclude.parseExcludeFile" - }, - { - "code": "def get_cached_translation(instance, language_code=None, related_name=None, use_fallback=False):\n if language_code is None:\n language_code = instance.get_current_language()\n translated_model = instance._parler_meta.get_model_by_related_name(related_name)\n values = _get_cached_values(instance, translated_model, language_code, use_fallback)\n if not values:\n return None\n try:\n translation = translated_model(**values)\n except TypeError:\n return None\n translation._state.adding = False\n return translation", - "docstring": "Fetch an cached translation.\n\n .. versionadded 1.2 Added the ``related_name`` parameter." - }, - { - "code": "def resolve(self, authorization: http.Header):\n if authorization is None:\n return None\n scheme, token = authorization.split()\n if scheme.lower() != 'basic':\n return None\n username, password = base64.b64decode(token).decode('utf-8').split(':')\n user = authenticate(username=username, password=password)\n return user", - "docstring": "Determine the user associated with a request, using HTTP Basic Authentication." - }, - { - "code": "def get_chunks(self,chunk_type):\n for nonter,this_type in self.label_for_nonter.items():\n if this_type == chunk_type:\n subsumed = self.terms_subsumed_by_nonter.get(nonter)\n if subsumed is not None:\n yield sorted(list(subsumed))", - "docstring": "Returns the chunks for a certain type\n @type chunk_type: string\n @param chunk_type: type of the chunk\n @rtype: list\n @return: the chunks for that type" - }, - { - "code": "def _indent(lines, prefix=\" \"):\n indented = []\n for line in lines.split(\"\\n\"):\n indented.append(prefix + line)\n return \"\\n\".join(indented)", - "docstring": "Indent some text.\n\n Note that this is present as ``textwrap.indent``, but not in Python 2.\n\n Args:\n lines (str): The newline delimited string to be indented.\n prefix (Optional[str]): The prefix to indent each line with. Default\n to two spaces.\n\n Returns:\n str: The newly indented content." - }, - { - "code": "def show_wait_cursor(object):\n @functools.wraps(object)\n def show_wait_cursorWrapper(*args, **kwargs):\n QApplication.setOverrideCursor(Qt.WaitCursor)\n value = None\n try:\n value = object(*args, **kwargs)\n finally:\n QApplication.restoreOverrideCursor()\n return value\n return show_wait_cursorWrapper", - "docstring": "Shows a wait cursor while processing.\n\n :param object: Object to decorate.\n :type object: object\n :return: Object.\n :rtype: object" - }, - { - "code": "def updateRole(self, roleID, name, description):\n params = {\n \"name\" : name,\n \"description\" : description,\n \"f\" : \"json\"\n }\n url = self._url + \"/%s/update\"\n return self._post(url=url,\n param_dict=params,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port)", - "docstring": "allows for the role name or description to be modified" - }, - { - "code": "def create_signature(secret, value, digestmod='sha256', encoding='utf-8'):\n if isinstance(secret, str):\n secret = secret.encode(encoding)\n if isinstance(value, str):\n value = value.encode(encoding)\n if isinstance(digestmod, str):\n digestmod = getattr(hashlib, digestmod, hashlib.sha1)\n hm = hmac.new(secret, digestmod=digestmod)\n hm.update(value)\n return hm.hexdigest()", - "docstring": "Create HMAC Signature from secret for value." - }, - { - "code": "def strip_end(text, suffix):\n if not text.endswith(suffix):\n return text\n return text[:len(text)-len(suffix)]", - "docstring": "Strip `suffix` from the end of `text` if `text` has that suffix." - }, - { - "code": "def _get_repo_options_env(env):\n env_options = ''\n if env is None:\n return env_options\n if not isinstance(env, dict):\n raise SaltInvocationError(\n '\\'env\\' must be a Python dictionary'\n )\n for key, value in env.items():\n if key == 'OPTIONS':\n env_options += '{0}\\n'.format(value)\n return env_options", - "docstring": "Get repo environment overrides dictionary to use in repo options process\n\n env\n A dictionary of variables to define the repository options\n Example:\n\n .. code-block:: yaml\n\n - env:\n - OPTIONS : 'ask-passphrase'\n\n .. warning::\n\n The above illustrates a common PyYAML pitfall, that **yes**,\n **no**, **on**, **off**, **true**, and **false** are all loaded as\n boolean ``True`` and ``False`` values, and must be enclosed in\n quotes to be used as strings. More info on this (and other) PyYAML\n idiosyncrasies can be found :ref:`here `." - }, - { - "code": "def numToDigits(num, places):\n s = str(num)\n if len(s) < places:\n return (\"0\" * (places - len(s))) + s\n elif len(s) > places:\n return s[len(s)-places: ]\n else:\n return s", - "docstring": "Helper, for converting numbers to textual digits." - }, - { - "code": "def get_project_version_by_name(self, project, version_name):\n versions = self.project_versions(project)\n for version in versions:\n if version.name == version_name:\n return version", - "docstring": "Get a version Resource by its name present on a project.\n\n :param project: ID or key of the project to get versions from\n :type project: str\n :param version_name: name of the version to search for\n :type version_name: str\n :rtype: Optional[Version]" - }, - { - "code": "def request_span(request: Request,\n request_key: str = REQUEST_AIOZIPKIN_KEY) -> SpanAbc:\n return cast(SpanAbc, request[request_key])", - "docstring": "Returns span created by middleware from request context, you can use it\n as parent on next child span." - }, - { - "code": "def get_file_list(path):\n f_list = []\n def recur_dir(path, newpath = os.path.sep):\n files = os.listdir(path)\n for fle in files:\n f_path = cpjoin(path, fle)\n if os.path.isdir(f_path): recur_dir(f_path, cpjoin(newpath, fle))\n elif os.path.isfile(f_path): f_list.append(get_single_file_info(f_path, cpjoin(newpath, fle)))\n recur_dir(path)\n return f_list", - "docstring": "Recursively lists all files in a file system below 'path'." - }, - { - "code": "def transform(self, data=None):\n if data is None:\n return self.xform_data\n else:\n formatted = format_data(\n data,\n semantic=self.semantic,\n vectorizer=self.vectorizer,\n corpus=self.corpus,\n ppca=True)\n norm = normalizer(formatted, normalize=self.normalize)\n reduction = reducer(\n norm,\n reduce=self.reduce,\n ndims=self.reduce['params']['n_components'])\n return aligner(reduction, align=self.align)", - "docstring": "Return transformed data, or transform new data using the same model\n parameters\n\n Parameters\n ----------\n data : numpy array, pandas dataframe or list of arrays/dfs\n The data to transform. If no data is passed, the xform_data from\n the DataGeometry object will be returned.\n\n Returns\n ----------\n xformed_data : list of numpy arrays\n The transformed data" - }, - { - "code": "def has_scope(context=None):\n if not booted(context):\n return False\n _sd_version = version(context)\n if _sd_version is None:\n return False\n return _sd_version >= 205", - "docstring": "Scopes were introduced in systemd 205, this function returns a boolean\n which is true when the minion is systemd-booted and running systemd>=205." - }, - { - "code": "def listen_for_updates(self):\n self.toredis.subscribe(self.group_pubsub, callback=self.callback)", - "docstring": "Attach a callback on the group pubsub" - }, - { - "code": "def ends_with_path_separator(self, file_path):\n if is_int_type(file_path):\n return False\n file_path = make_string_path(file_path)\n return (file_path and\n file_path not in (self.path_separator,\n self.alternative_path_separator) and\n (file_path.endswith(self._path_separator(file_path)) or\n self.alternative_path_separator is not None and\n file_path.endswith(\n self._alternative_path_separator(file_path))))", - "docstring": "Return True if ``file_path`` ends with a valid path separator." - }, - { - "code": "def _get_zk_path_children(self, zk_conn, zk_path, name_for_error):\n children = []\n try:\n children = zk_conn.get_children(zk_path)\n except NoNodeError:\n self.log.info('No zookeeper node at %s', zk_path)\n except Exception:\n self.log.exception('Could not read %s from %s', name_for_error, zk_path)\n return children", - "docstring": "Fetch child nodes for a given Zookeeper path." - }, - { - "code": "def ipa_chars(self, value):\n if value is None:\n self.__ipa_chars = []\n else:\n if is_list_of_ipachars(value):\n self.__ipa_chars = value\n else:\n raise TypeError(\"ipa_chars only accepts a list of IPAChar objects\")", - "docstring": "Set the list of IPAChar objects composing the IPA string\n\n :param list value: list of IPAChar objects" - }, - { - "code": "def _normalize_xml_search_response(self, xml):\n target = XMLSearchResult()\n parser = ElementTree.XMLParser(target=target)\n parser.feed(xml)\n return parser.close()", - "docstring": "Normalizes an XML search response so that PB and HTTP have the\n same return value" - }, - { - "code": "def blast(self):\n logging.info('BLASTing FASTA files against {} database'.format(self.analysistype))\n for _ in range(self.cpus):\n threads = Thread(target=self.blastthreads, args=())\n threads.setDaemon(True)\n threads.start()\n with progressbar(self.runmetadata.samples) as bar:\n for sample in bar:\n if sample.general.bestassemblyfile != 'NA':\n sample[self.analysistype].blastreport = os.path.join(\n sample[self.analysistype].outputdir,\n '{}_{}_blastresults.csv'.format(sample.name, self.analysistype))\n blastn = NcbiblastnCommandline(query=sample[self.analysistype].fasta,\n db=os.path.splitext(sample[self.analysistype].baitfile)[0],\n max_target_seqs=1,\n num_threads=self.threads,\n outfmt=\"'6 qseqid sseqid positive mismatch gaps evalue \"\n \"bitscore slen length qstart qend qseq sstart send sseq'\",\n out=sample[self.analysistype].blastreport)\n sample[self.analysistype].blastcall = str(blastn)\n self.blastqueue.put((sample, blastn))\n self.blastqueue.join()", - "docstring": "Run BLAST analyses of the subsampled FASTQ reads against the NCBI 16S reference database" - }, - { - "code": "def split_on(word: str, section: str) -> Tuple[str, str]:\n return word[:word.index(section)] + section, word[word.index(section) + len(section):]", - "docstring": "Given a string, split on a section, and return the two sections as a tuple.\n\n :param word:\n :param section:\n :return:\n\n >>> split_on('hamrye', 'ham')\n ('ham', 'rye')" - }, - { - "code": "def add_child(self, child):\n child_id = getattr(child, 'id', None)\n if child_id:\n if not hasattr(self, 'children'):\n self.children = {}\n if child_id not in self.children:\n self.children[child_id] = child", - "docstring": "Children are GFFFeatures and are defined when added. This is done to avoid memory overheads\n that may be incurred by GFF files that have millions of rows." - }, - { - "code": "def copy(self):\n from copy import deepcopy\n cp = type(self)()\n cp._metainfo = deepcopy(self._metainfo)\n return cp", - "docstring": "Return a new object with the same metainfo\n\n Internally, this simply copies the internal metainfo dictionary with\n :func:`copy.deepcopy` and gives it to the new instance." - }, - { - "code": "def create_quantiles(items: Sequence, lower_bound, upper_bound):\n interval = (upper_bound - lower_bound) / len(items)\n quantiles = ((g, (x - interval, x)) for g, x in\n zip(items, accumulate(repeat(interval, len(items)))))\n return quantiles", - "docstring": "Create quantile start and end boundaries." - }, - { - "code": "def hermetic_environment_as(**kwargs):\n old_environment = os.environ.copy() if PY3 else _copy_and_decode_env(os.environ)\n _purge_env()\n try:\n with environment_as(**kwargs):\n yield\n finally:\n _purge_env()\n _restore_env(old_environment)", - "docstring": "Set the environment to the supplied values from an empty state." - }, - { - "code": "def check_pending_labels(ast):\n result = True\n visited = set()\n pending = [ast]\n while pending:\n node = pending.pop()\n if node is None or node in visited:\n continue\n visited.add(node)\n for x in node.children:\n pending.append(x)\n if node.token != 'VAR' or (node.token == 'VAR' and node.class_ is not CLASS.unknown):\n continue\n tmp = global_.SYMBOL_TABLE.get_entry(node.name)\n if tmp is None or tmp.class_ is CLASS.unknown:\n syntax_error(node.lineno, 'Undeclared identifier \"%s\"'\n % node.name)\n else:\n assert tmp.class_ == CLASS.label\n node.to_label(node)\n result = result and tmp is not None\n return result", - "docstring": "Iteratively traverses the node looking for ID with no class set,\n marks them as labels, and check they've been declared.\n\n This way we avoid stack overflow for high line-numbered listings." - }, - { - "code": "def as_dictionary(self):\n return {to_camel_case(i): Serializable._convert_to_dictionary(self.__dict__[i])\n for i in self.__dict__ if self.__dict__[i] is not None}", - "docstring": "Convert this object to a dictionary with formatting appropriate for a PIF.\n\n :returns: Dictionary with the content of this object formatted for a PIF." - }, - { - "code": "def firmware_bundles(self):\n if not self.__firmware_bundles:\n self.__firmware_bundles = FirmwareBundles(self.__connection)\n return self.__firmware_bundles", - "docstring": "Gets the FirmwareBundles API client.\n\n Returns:\n FirmwareBundles:" - }, - { - "code": "def initiate(self, **kwargs):\n run_startup = kwargs.pop('run_startup', True)\n setter = lambda value, name: setattr(self, name, value)\n d = defer.Deferred()\n d.addCallback(defer.drop_param,\n self.agency._messaging.get_connection, self)\n d.addCallback(setter, \"_messaging\")\n d.addCallback(defer.drop_param,\n self.agency._database.get_connection)\n d.addCallback(setter, '_database')\n d.addCallback(defer.drop_param,\n self._reload_descriptor)\n d.addCallback(defer.drop_param,\n self._subscribe_for_descriptor_changes)\n d.addCallback(defer.drop_param, self._store_instance_id)\n d.addCallback(defer.drop_param, self._load_configuration)\n d.addCallback(setter, '_configuration')\n d.addCallback(defer.drop_param,\n self.join_shard, self._descriptor.shard)\n d.addCallback(defer.drop_param,\n self.journal_agent_created)\n d.addCallback(defer.drop_param,\n self._call_initiate, **kwargs)\n d.addCallback(defer.drop_param, self.call_next, self._call_startup,\n call_startup=run_startup)\n d.addCallback(defer.override_result, self)\n d.addErrback(self._startup_error)\n self.call_next(d.callback, None)\n return d", - "docstring": "Establishes the connections to database and messaging platform,\n taking into account that it might meen performing asynchronous job." - }, - { - "code": "def role_exists(role, **kwargs):\n return len(tsql_query(query='sp_helprole \"{0}\"'.format(role), as_dict=True, **kwargs)) == 1", - "docstring": "Checks if a role exists.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt minion mssql.role_exists db_owner" - }, - { - "code": "def get_uint8(self):\n value = self.get_int()\n if value < 0 or value > 255:\n raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)\n return value", - "docstring": "Read the next token and interpret it as an 8-bit unsigned\n integer.\n\n @raises dns.exception.SyntaxError:\n @rtype: int" - }, - { - "code": "def _get_file_event_handler(self, file_path, save_name):\n self._file_pusher.update_file(save_name, file_path)\n if save_name not in self._file_event_handlers:\n if save_name == 'wandb-history.jsonl':\n self._file_event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(\n file_path, 'wandb-history.jsonl', self._api)\n elif save_name == 'wandb-events.jsonl':\n self._file_event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(\n file_path, 'wandb-events.jsonl', self._api)\n elif 'tfevents' in save_name or 'graph.pbtxt' in save_name:\n self._file_event_handlers[save_name] = FileEventHandlerThrottledOverwrite(\n file_path, save_name, self._api, self._file_pusher)\n elif save_name == config.FNAME:\n self._file_event_handlers[save_name] = FileEventHandlerConfig(\n file_path, save_name, self._api, self._file_pusher, self._run)\n elif save_name == 'wandb-summary.json':\n self._run.summary.load()\n self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())\n self._file_event_handlers[save_name] = FileEventHandlerSummary(\n file_path, save_name, self._api, self._file_pusher, self._run)\n elif save_name.startswith('media/'):\n self._file_event_handlers[save_name] = FileEventHandlerOverwrite(\n file_path, save_name, self._api, self._file_pusher)\n else:\n Handler = FileEventHandlerOverwriteDeferred\n for policy, globs in six.iteritems(self._user_file_policies):\n if policy == \"end\":\n continue\n for g in globs:\n if any(save_name in p for p in glob.glob(os.path.join(self._run.dir, g))):\n if policy == \"live\":\n Handler = FileEventHandlerThrottledOverwriteMinWait\n self._file_event_handlers[save_name] = Handler(\n file_path, save_name, self._api, self._file_pusher)\n return self._file_event_handlers[save_name]", - "docstring": "Get or create an event handler for a particular file.\n\n file_path: the file's actual path\n save_name: its path relative to the run directory (aka the watch directory)" - }, - { - "code": "def _setbin_unsafe(self, binstring):\n length = len(binstring)\n boundary = ((length + 7) // 8) * 8\n padded_binstring = binstring + '0' * (boundary - length)\\\n if len(binstring) < boundary else binstring\n try:\n bytelist = [int(padded_binstring[x:x + 8], 2)\n for x in xrange(0, len(padded_binstring), 8)]\n except ValueError:\n raise CreationError(\"Invalid character in bin initialiser {0}.\", binstring)\n self._setbytes_unsafe(bytearray(bytelist), length, 0)", - "docstring": "Same as _setbin_safe, but input isn't sanity checked. binstring mustn't start with '0b'." - }, - { - "code": "def to_array(self):\n array = super(InlineQueryResultCachedPhoto, self).to_array()\n array['type'] = u(self.type)\n array['id'] = u(self.id)\n array['photo_file_id'] = u(self.photo_file_id)\n if self.title is not None:\n array['title'] = u(self.title)\n if self.description is not None:\n array['description'] = u(self.description)\n if self.caption is not None:\n array['caption'] = u(self.caption)\n if self.parse_mode is not None:\n array['parse_mode'] = u(self.parse_mode)\n if self.reply_markup is not None:\n array['reply_markup'] = self.reply_markup.to_array()\n if self.input_message_content is not None:\n array['input_message_content'] = self.input_message_content.to_array()\n return array", - "docstring": "Serializes this InlineQueryResultCachedPhoto to a dictionary.\n\n :return: dictionary representation of this object.\n :rtype: dict" - }, - { - "code": "def line(h1: Union[Histogram1D, \"HistogramCollection\"], ax: Axes, *, errors: bool = False, **kwargs):\n show_stats = kwargs.pop(\"show_stats\", False)\n show_values = kwargs.pop(\"show_values\", False)\n density = kwargs.pop(\"density\", False)\n cumulative = kwargs.pop(\"cumulative\", False)\n value_format = kwargs.pop(\"value_format\", None)\n text_kwargs = pop_kwargs_with_prefix(\"text_\", kwargs)\n kwargs[\"label\"] = kwargs.get(\"label\", h1.name)\n data = get_data(h1, cumulative=cumulative, density=density)\n _apply_xy_lims(ax, h1, data, kwargs)\n _add_ticks(ax, h1, kwargs)\n _add_labels(ax, h1, kwargs)\n if errors:\n err_data = get_err_data(h1, cumulative=cumulative, density=density)\n ax.errorbar(h1.bin_centers, data, yerr=err_data, fmt=kwargs.pop(\n \"fmt\", \"-\"), ecolor=kwargs.pop(\"ecolor\", \"black\"), **kwargs)\n else:\n ax.plot(h1.bin_centers, data, **kwargs)\n if show_stats:\n _add_stats_box(h1, ax, stats=show_stats)\n if show_values:\n _add_values(ax, h1, data, value_format=value_format, **text_kwargs)", - "docstring": "Line plot of 1D histogram." - }, - { - "code": "def format(self, method, data):\n if data is None:\n if method == 'GET':\n raise NotFound()\n return ''\n return self._meta.formatter.format(data)", - "docstring": "Calls format on list or detail" - }, - { - "code": "def _load_rules(self):\n with self._sftp_connection.open(self.RULE_PATH) as file:\n data = file.read()\n lines = (\n line.strip()\n for line in data.split('\\n')\n )\n rule_strings = (\n line for line in lines\n if len(line) > 0\n )\n rules = (\n Rule.parse(rule_string)\n for rule_string in rule_strings\n )\n self._rules = [\n rule\n for rule in rules\n if rule is not None\n ]", - "docstring": "Loads the rules from the SSH-Connection" - }, - { - "code": "def update(self, of):\n for p in ('mime_type', 'preference', 'state', 'hash', 'modified', 'size', 'contents', 'source_hash', 'data'):\n setattr(self, p, getattr(of, p))\n return self", - "docstring": "Update a file from another file, for copying" - }, - { - "code": "def apply(self, func, axis, *args, **kwargs):\n if callable(func):\n return self._callable_func(func, axis, *args, **kwargs)\n elif isinstance(func, dict):\n return self._dict_func(func, axis, *args, **kwargs)\n elif is_list_like(func):\n return self._list_like_func(func, axis, *args, **kwargs)\n else:\n pass", - "docstring": "Apply func across given axis.\n\n Args:\n func: The function to apply.\n axis: Target axis to apply the function along.\n\n Returns:\n A new PandasQueryCompiler." - }, - { - "code": "def returnFees(self):\n from bitsharesbase.operations import operations\n r = {}\n obj, base = self.blockchain.rpc.get_objects([\"2.0.0\", \"1.3.0\"])\n fees = obj[\"parameters\"][\"current_fees\"][\"parameters\"]\n scale = float(obj[\"parameters\"][\"current_fees\"][\"scale\"])\n for f in fees:\n op_name = \"unknown %d\" % f[0]\n for name in operations:\n if operations[name] == f[0]:\n op_name = name\n fs = f[1]\n for _type in fs:\n fs[_type] = float(fs[_type]) * scale / 1e4 / 10 ** base[\"precision\"]\n r[op_name] = fs\n return r", - "docstring": "Returns a dictionary of all fees that apply through the\n network\n\n Example output:\n\n .. code-block:: js\n\n {'proposal_create': {'fee': 400000.0},\n 'asset_publish_feed': {'fee': 1000.0}, 'account_create':\n {'basic_fee': 950000.0, 'price_per_kbyte': 20000.0,\n 'premium_fee': 40000000.0}, 'custom': {'fee': 20000.0},\n 'asset_fund_fee_pool': {'fee': 20000.0},\n 'override_transfer': {'fee': 400000.0}, 'fill_order':\n {}, 'asset_update': {'price_per_kbyte': 20000.0, 'fee':\n 200000.0}, 'asset_update_feed_producers': {'fee':\n 10000000.0}, 'assert': {'fee': 20000.0},\n 'committee_member_create': {'fee': 100000000.0}}" - }, - { - "code": "def hmmalign_sequences(self, hmm, sequences, output_file):\n cmd = 'hmmalign --trim %s %s' % (hmm, sequences)\n output = extern.run(cmd)\n with open(output_file, 'w') as f:\n SeqIO.write(SeqIO.parse(StringIO(output), 'stockholm'), f, 'fasta')", - "docstring": "Run hmmalign and convert output to aligned fasta format\n\n Parameters\n ----------\n hmm: str\n path to hmm file\n sequences: str\n path to file of sequences to be aligned\n output_file: str\n write sequences to this file\n\n Returns\n -------\n nothing" - }, - { - "code": "def _create(self):\n if not os.path.exists(settings.SALMON_WHISPER_DB_PATH):\n os.makedirs(settings.SALMON_WHISPER_DB_PATH)\n archives = [whisper.parseRetentionDef(retentionDef)\n for retentionDef in settings.ARCHIVES.split(\",\")]\n whisper.create(self.path, archives,\n xFilesFactor=settings.XFILEFACTOR,\n aggregationMethod=settings.AGGREGATION_METHOD)", - "docstring": "Create the Whisper file on disk" - }, - { - "code": "def attribute(self):\n refs = re.findall(\n \"\\@([a-zA-Z:]+)=\\\\\\?[\\'\\\"]\\$\"+str(self.refsDecl.count(\"$\"))+\"\\\\\\?[\\'\\\"]\",\n self.refsDecl\n )\n return refs[-1]", - "docstring": "Attribute that serves as a reference getter" - }, - { - "code": "def get_arg(context, index):\n if index < len(context.argstreams):\n arg = \"\"\n chunk = yield context.argstreams[index].read()\n while chunk:\n arg += chunk\n chunk = yield context.argstreams[index].read()\n raise tornado.gen.Return(arg)\n else:\n raise TChannelError()", - "docstring": "get value from arg stream in async way" - }, - { - "code": "def find_transition(self, gene: Gene, multiplexes: Tuple[Multiplex, ...]) -> Transition:\n multiplexes = tuple(multiplex for multiplex in multiplexes if gene in multiplex.genes)\n for transition in self.transitions:\n if transition.gene == gene and set(transition.multiplexes) == set(multiplexes):\n return transition\n raise AttributeError(f'transition K_{gene.name}' + ''.join(f\"+{multiplex!r}\" for multiplex in multiplexes) + ' does not exist')", - "docstring": "Find and return a transition in the model for the given gene and multiplexes.\n Raise an AttributeError if there is no multiplex in the graph with the given name." - }, - { - "code": "def promptyn(msg, default=None):\n while True:\n yes = \"Y\" if default else \"y\"\n if default or default is None:\n no = \"n\"\n else:\n no = \"N\"\n confirm = prompt(\"%s [%s/%s]\" % (msg, yes, no), \"\").lower()\n if confirm in (\"y\", \"yes\"):\n return True\n elif confirm in (\"n\", \"no\"):\n return False\n elif not confirm and default is not None:\n return default", - "docstring": "Display a blocking prompt until the user confirms" - }, - { - "code": "def get_operator(self, operator):\n op = {\n '=': '',\n '>': '__gt',\n '>=': '__gte',\n '<': '__lt',\n '<=': '__lte',\n '~': '__icontains',\n 'in': '__in',\n }.get(operator)\n if op is not None:\n return op, False\n op = {\n '!=': '',\n '!~': '__icontains',\n 'not in': '__in',\n }[operator]\n return op, True", - "docstring": "Get a comparison suffix to be used in Django ORM & inversion flag for it\n\n :param operator: string, DjangoQL comparison operator\n :return: (suffix, invert) - a tuple with 2 values:\n suffix - suffix to be used in ORM query, for example '__gt' for '>'\n invert - boolean, True if this comparison needs to be inverted" - }, - { - "code": "def fast_int(\n x,\n key=lambda x: x,\n _uni=unicodedata.digit,\n _first_char=POTENTIAL_FIRST_CHAR,\n):\n if x[0] in _first_char:\n try:\n return long(x)\n except ValueError:\n try:\n return _uni(x, key(x)) if len(x) == 1 else key(x)\n except TypeError:\n return key(x)\n else:\n try:\n return _uni(x, key(x)) if len(x) == 1 else key(x)\n except TypeError:\n return key(x)", - "docstring": "Convert a string to a int quickly, return input as-is if not possible.\n\n We don't need to accept all input that the real fast_int accepts because\n natsort is controlling what is passed to this function.\n\n Parameters\n ----------\n x : str\n String to attempt to convert to an int.\n key : callable\n Single-argument function to apply to *x* if conversion fails.\n\n Returns\n -------\n *str* or *int*" - }, - { - "code": "def pot_ana(r, rho):\n I = 1.0\n sigma = 1.0 / rho\n phi = np.divide(I, (2.0 * np.pi * sigma * r))\n return phi", - "docstring": "Return the analytical potential in distance r over a homogeneous\n half-space" - }, - { - "code": "def to_node(self, exp, schema):\n if len(exp) == 1 and isinstance(exp[0], str):\n node = RelationNode(name=exp[0], schema=schema)\n elif len(exp) == 1 and isinstance(exp[0], list):\n node = self.to_node(exp[0], schema)\n elif isinstance(exp[0], str) and self.grammar.is_unary(exp[0]):\n child = self.to_node(exp[2:], schema)\n node = self.create_unary_node(operator=exp[0], child=child,\n param=exp[1], schema=schema)\n elif exp[1] is self.grammar.syntax.assign_op:\n child = self.to_node(exp[2:], schema)\n node = self.create_unary_node(operator=exp[1], child=child,\n param=exp[0], schema=schema)\n elif self.grammar.is_binary(exp[1]):\n if isinstance(exp[-2], str):\n op_pos = -2\n param = None\n else:\n op_pos = -3\n param = exp[-2]\n operator = exp[op_pos]\n left = self.to_node(exp[:op_pos], schema)\n right = self.to_node(exp[-1], schema)\n node = self.create_binary_node(operator=operator, left=left,\n right=right, param=param)\n else:\n raise ValueError\n return node", - "docstring": "Return a Node that is the root of the parse tree for the the specified\n expression.\n\n :param exp: A list that represents a relational algebra expression.\n Assumes that this list was generated by pyparsing.\n :param schema: A dictionary of relation names to attribute names used\n for verification and generating attributes.\n :return: A Node." - }, - { - "code": "def _dbus_get_object(bus_name, object_name):\n try:\n bus = dbus.SessionBus()\n obj = bus.get_object(bus_name, object_name)\n return obj\n except (NameError, dbus.exceptions.DBusException):\n return None", - "docstring": "Fetches DBUS proxy object given the specified parameters.\n\n `bus_name`\n Name of the bus interface.\n `object_name`\n Object path related to the interface.\n\n Returns object or ``None``." - }, - { - "code": "def smart_content_encoding(self):\n encoding = self.content_encoding\n if not encoding:\n base_list = self.basename.split('.')\n while (not encoding) and len(base_list) > 1:\n _, encoding = mimetypes.guess_type('.'.join(base_list))\n base_list.pop()\n return encoding", - "docstring": "Smart content encoding." - }, - { - "code": "def sapm_aoi_loss(aoi, module, upper=None):\n aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],\n module['B1'], module['B0']]\n aoi_loss = np.polyval(aoi_coeff, aoi)\n aoi_loss = np.clip(aoi_loss, 0, upper)\n aoi_lt_0 = np.full_like(aoi, False, dtype='bool')\n np.less(aoi, 0, where=~np.isnan(aoi), out=aoi_lt_0)\n aoi_loss = np.where(aoi_lt_0, 0, aoi_loss)\n if isinstance(aoi, pd.Series):\n aoi_loss = pd.Series(aoi_loss, aoi.index)\n return aoi_loss", - "docstring": "Calculates the SAPM angle of incidence loss coefficient, F2.\n\n Parameters\n ----------\n aoi : numeric\n Angle of incidence in degrees. Negative input angles will return\n zeros.\n\n module : dict-like\n A dict, Series, or DataFrame defining the SAPM performance\n parameters. See the :py:func:`sapm` notes section for more\n details.\n\n upper : None or float, default None\n Upper limit on the results.\n\n Returns\n -------\n F2 : numeric\n The SAPM angle of incidence loss coefficient.\n\n Notes\n -----\n The SAPM traditionally does not define an upper limit on the AOI\n loss function and values slightly exceeding 1 may exist for moderate\n angles of incidence (15-40 degrees). However, users may consider\n imposing an upper limit of 1.\n\n References\n ----------\n [1] King, D. et al, 2004, \"Sandia Photovoltaic Array Performance\n Model\", SAND Report 3535, Sandia National Laboratories, Albuquerque,\n NM.\n\n [2] B.H. King et al, \"Procedure to Determine Coefficients for the\n Sandia Array Performance Model (SAPM),\" SAND2016-5284, Sandia\n National Laboratories (2016).\n\n [3] B.H. King et al, \"Recent Advancements in Outdoor Measurement\n Techniques for Angle of Incidence Effects,\" 42nd IEEE PVSC (2015).\n DOI: 10.1109/PVSC.2015.7355849" - }, - { - "code": "def _resizeColumnToContents(self, header, data, col, limit_ms):\r\n hdr_width = self._sizeHintForColumn(header, col, limit_ms)\r\n data_width = self._sizeHintForColumn(data, col, limit_ms)\r\n if data_width > hdr_width:\r\n width = min(self.max_width, data_width)\r\n elif hdr_width > data_width * 2:\r\n width = max(min(hdr_width, self.min_trunc), min(self.max_width,\r\n data_width))\r\n else:\r\n width = max(min(self.max_width, hdr_width), self.min_trunc)\r\n header.setColumnWidth(col, width)", - "docstring": "Resize a column by its contents." - }, - { - "code": "def is_complete(self):\n return all(p.name in self.values for p in self.parameters if p.required)", - "docstring": "Do all required parameters have values?" - }, - { - "code": "def _add_ideal_atomic_weights_(elemental_array):\n for a in elemental_array:\n this_atomic_weight = elements_data[a[\"symbol\"]][\"atomic_weight\"]\n a[\"weight\"] = a[\"occurances\"] * this_atomic_weight\n return elemental_array", - "docstring": "Uses elements.json to find the molar mass of the element in question, and then multiplies that by the occurances of the element.\n Adds the \"weight\" property to each of the dictionaries in elemental_array\n\n :param elemental_array: an array of dictionaries containing information about the elements in the system\n :return: the appended elemental_array" - }, - { - "code": "def processCheckIn(request):\r\n if request.method == 'POST':\r\n event_id = request.POST.get('event_id')\r\n reg_ids = request.POST.getlist('reg_id')\r\n if not event_id:\r\n return HttpResponse(_(\"Error at start.\"))\r\n all_eventreg = list(EventRegistration.objects.filter(event__id=event_id))\r\n for this_reg in all_eventreg:\r\n if str(this_reg.registration.id) in reg_ids and not this_reg.checkedIn:\r\n this_reg.checkedIn = True\r\n this_reg.save()\r\n elif str(this_reg.registration.id) not in reg_ids and this_reg.checkedIn:\r\n this_reg.checkedIn = False\r\n this_reg.save()\r\n return HttpResponse(\"OK.\")", - "docstring": "This function handles the Ajax call made when a user is marked as checked in" - }, - { - "code": "def requires_lock(function):\n def new_lock_requiring_function(self, filename, *args, **kwargs):\n if self.owns_lock(filename):\n return function(self, filename, *args, **kwargs)\n else:\n raise RequiresLockException()\n return new_lock_requiring_function", - "docstring": "Decorator to check if the user owns the required lock.\n The first argument must be the filename." - }, - { - "code": "def _make_dav_request(self, method, path, **kwargs):\n if self._debug:\n print('DAV request: %s %s' % (method, path))\n if kwargs.get('headers'):\n print('Headers: ', kwargs.get('headers'))\n path = self._normalize_path(path)\n res = self._session.request(\n method,\n self._webdav_url + parse.quote(self._encode_string(path)),\n **kwargs\n )\n if self._debug:\n print('DAV status: %i' % res.status_code)\n if res.status_code in [200, 207]:\n return self._parse_dav_response(res)\n if res.status_code in [204, 201]:\n return True\n raise HTTPResponseError(res)", - "docstring": "Makes a WebDAV request\n\n :param method: HTTP method\n :param path: remote path of the targetted file\n :param \\*\\*kwargs: optional arguments that ``requests.Request.request`` accepts\n :returns array of :class:`FileInfo` if the response\n contains it, or True if the operation succeded, False\n if it didn't" - }, - { - "code": "def parse(self, stream):\n (result, _) = (self << eof).parse_partial(stream)\n return result", - "docstring": "Parse a string or list of tokens and return the result or raise a ParseError." - }, - { - "code": "def _download_args(options):\n return dict(\n version=options.version,\n download_base=options.download_base,\n downloader_factory=options.downloader_factory,\n to_dir=options.to_dir,\n )", - "docstring": "Return args for download_setuptools function from cmdline args." - }, - { - "code": "def build_trading_timeline(start, end):\n EMPTY_DATES = pd.date_range('2000/01/01', periods=0, tz=pytz.utc)\n now = dt.datetime.now(tz=pytz.utc)\n if not start:\n if not end:\n bt_dates = EMPTY_DATES\n live_dates = pd.date_range(\n start=now,\n end=normalize_date_format('23h59'))\n else:\n end = normalize_date_format(end)\n if end < now:\n bt_dates = pd.date_range(\n start=end - 360 * pd.datetools.day,\n end=end)\n live_dates = EMPTY_DATES\n elif end > now:\n bt_dates = EMPTY_DATES\n live_dates = pd.date_range(start=now, end=end)\n else:\n start = normalize_date_format(start)\n if start < now:\n if not end:\n end = start + 360 * pd.datetools.day\n if end > now:\n end = now - pd.datetools.day\n live_dates = EMPTY_DATES\n bt_dates = pd.date_range(\n start=start, end=end)\n else:\n end = normalize_date_format(end)\n if end < now:\n live_dates = EMPTY_DATES\n bt_dates = pd.date_range(start=start, end=end)\n elif end > now:\n bt_dates = pd.date_range(\n start=start, end=now - pd.datetools.day)\n live_dates = pd.date_range(start=now, end=end)\n elif start > now:\n if not end:\n bt_dates = EMPTY_DATES\n live_dates = pd.date_range(\n start=start,\n end=normalize_date_format('23h59'))\n else:\n end = normalize_date_format(end)\n bt_dates = EMPTY_DATES\n live_dates = pd.date_range(start=start, end=end)\n return bt_dates + live_dates", - "docstring": "Build the daily-based index we will trade on" - }, - { - "code": "def encrypt_key(key, password):\n public_key = load_pem_public_key(key.encode(), default_backend())\n encrypted_password = public_key.encrypt(password, PKCS1v15())\n return base64.b64encode(encrypted_password).decode('ascii')", - "docstring": "Encrypt the password with the public key and return an ASCII representation.\n\n The public key retrieved from the Travis API is loaded as an RSAPublicKey\n object using Cryptography's default backend. Then the given password\n is encrypted with the encrypt() method of RSAPublicKey. The encrypted\n password is then encoded to base64 and decoded into ASCII in order to\n convert the bytes object into a string object.\n\n Parameters\n ----------\n key: str\n Travis CI public RSA key that requires deserialization\n password: str\n the password to be encrypted\n\n Returns\n -------\n encrypted_password: str\n the base64 encoded encrypted password decoded as ASCII\n\n Notes\n -----\n Travis CI uses the PKCS1v15 padding scheme. While PKCS1v15 is secure,\n it is outdated and should be replaced with OAEP.\n\n Example:\n OAEP(mgf=MGF1(algorithm=SHA256()), algorithm=SHA256(), label=None))" - }, - { - "code": "def bigquery_data_type(self):\n BQ_DATA_TYPE_DIC = OrderedDict()\n BQ_DATA_TYPE_DIC[\"STRING\"] = {None: [re.compile(r\"(CHAR|TEXT|CLOB|JSON|UUID)\")]}\n BQ_DATA_TYPE_DIC[\"INTEGER\"] = {None: [re.compile(r\"INT|SERIAL|YEAR\")]}\n BQ_DATA_TYPE_DIC[\"FLOAT\"] = {None: [re.compile(r\"(FLOAT|DOUBLE)\"), \"REAL\", \"MONEY\"]}\n BQ_DATA_TYPE_DIC[\"DATETIME\"] = {\n None: [\"DATETIME\", \"TIMESTAMP\", \"TIMESTAMP WITHOUT TIME ZONE\"],\n self.DATABASE.oracle: [\"DATE\"]\n }\n BQ_DATA_TYPE_DIC[\"TIMESTAMP\"] = {None: [\"TIMESTAMPTZ\", \"TIMESTAMP WITH TIME ZONE\"]}\n BQ_DATA_TYPE_DIC[\"DATE\"] = {None: [\"DATE\"]}\n BQ_DATA_TYPE_DIC[\"TIME\"] = {None: [\"TIME\"]}\n BQ_DATA_TYPE_DIC[\"BOOLEAN\"] = {None: [re.compile(r\"BOOL\")]}\n for bq_type, conditions in BQ_DATA_TYPE_DIC.items():\n for source_db, source_datatypes in conditions.items():\n for source_datatype in source_datatypes:\n if isinstance(source_datatype, str):\n if self._data_type == source_datatype \\\n and ( self._source_database == source_db\n or (self._source_database is not None and source_db is None)):\n return bq_type\n elif re.search(source_datatype, self._data_type) \\\n and ( self._source_database == source_db\n or (self._source_database is not None and source_db is None)):\n return bq_type\n if self._data_type in [\"NUMERIC\", \"NUMBER\", \"DECIMAL\"]:\n if self._scale is not None:\n return \"FLOAT\"\n if self._data_type == \"NUMBER\" \\\n and self._source_database == self.DATABASE.oracle \\\n and self._length is None:\n return \"FLOAT\"\n return \"INTEGER\"\n raise ValueError(\"Unknown data type : '{}'\".format(self._data_type))", - "docstring": "Get BigQuery Legacy SQL data type" - }, - { - "code": "def renew(self):\n if self.token is not None:\n try:\n self.client.test_and_set(self.key, self.token, self.token, ttl=self.ttl)\n return True\n except ValueError, e:\n self.token = None\n return False", - "docstring": "Renew the lock if acquired." - }, - { - "code": "def set_iam_policy(self, policy, client=None):\n client = self._require_client(client)\n query_params = {}\n if self.user_project is not None:\n query_params[\"userProject\"] = self.user_project\n resource = policy.to_api_repr()\n resource[\"resourceId\"] = self.path\n info = client._connection.api_request(\n method=\"PUT\",\n path=\"%s/iam\" % (self.path,),\n query_params=query_params,\n data=resource,\n _target_object=None,\n )\n return Policy.from_api_repr(info)", - "docstring": "Update the IAM policy for the bucket.\n\n See\n https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy\n\n If :attr:`user_project` is set, bills the API request to that project.\n\n :type policy: :class:`google.api_core.iam.Policy`\n :param policy: policy instance used to update bucket's IAM policy.\n\n :type client: :class:`~google.cloud.storage.client.Client` or\n ``NoneType``\n :param client: Optional. The client to use. If not passed, falls back\n to the ``client`` stored on the current bucket.\n\n :rtype: :class:`google.api_core.iam.Policy`\n :returns: the policy instance, based on the resource returned from\n the ``setIamPolicy`` API request." - }, - { - "code": "def _parse_target(target):\n if len(target) != 8:\n raise ArgumentError(\"Invalid targeting data length\", expected=8, length=len(target))\n slot, match_op = struct.unpack(\" 0 else {}\n return assoc", - "docstring": "Fetch an association object by ID" - }, - { - "code": "def fmt_ria(ria, verbose=True, mip=False):\n if verbose:\n mechanism = 'Mechanism: {}\\n'.format(\n fmt_mechanism(ria.mechanism, ria.node_labels))\n direction = '\\nDirection: {}'.format(ria.direction)\n else:\n mechanism = ''\n direction = ''\n if config.REPR_VERBOSITY is HIGH:\n partition = '\\n{}:\\n{}'.format(\n ('MIP' if mip else 'Partition'),\n indent(fmt_partition(ria.partition)))\n repertoire = '\\nRepertoire:\\n{}'.format(\n indent(fmt_repertoire(ria.repertoire)))\n partitioned_repertoire = '\\nPartitioned repertoire:\\n{}'.format(\n indent(fmt_repertoire(ria.partitioned_repertoire)))\n else:\n partition = ''\n repertoire = ''\n partitioned_repertoire = ''\n return (\n '{SMALL_PHI} = {phi}\\n'\n '{mechanism}'\n 'Purview = {purview}'\n '{direction}'\n '{partition}'\n '{repertoire}'\n '{partitioned_repertoire}').format(\n SMALL_PHI=SMALL_PHI,\n mechanism=mechanism,\n purview=fmt_mechanism(ria.purview, ria.node_labels),\n direction=direction,\n phi=fmt_number(ria.phi),\n partition=partition,\n repertoire=repertoire,\n partitioned_repertoire=partitioned_repertoire)", - "docstring": "Format a |RepertoireIrreducibilityAnalysis|." - }, - { - "code": "def validate(filename, verbose=False):\n is_remote = filename.startswith(\"http://\") or filename.startswith(\n \"https://\")\n with tempfile.TemporaryFile() if is_remote else open(\n filename, \"rb\") as f:\n if is_remote:\n r = requests.get(filename, verify=False)\n f.write(r.content)\n f.seek(0)\n r = requests.post(\n HTML_VALIDATOR_URL,\n files={\"file\": (filename, f, \"text/html\")},\n data={\n \"out\": \"json\",\n \"showsource\": \"yes\",\n },\n verify=False)\n return r.json()", - "docstring": "Validate file and return JSON result as dictionary.\n\n \"filename\" can be a file name or an HTTP URL.\n Return \"\" if the validator does not return valid JSON.\n Raise OSError if curl command returns an error status." - }, - { - "code": "def sendmail_proxy(subject, email, template, **context):\n sendmail.delay(subject.value, email, template, **context)", - "docstring": "Cast the lazy_gettext'ed subject to string before passing to Celery" - }, - { - "code": "def find_by_id(self, story, params={}, **options): \n path = \"/stories/%s\" % (story)\n return self.client.get(path, params, **options)", - "docstring": "Returns the full record for a single story.\n\n Parameters\n ----------\n story : {Id} Globally unique identifier for the story.\n [params] : {Object} Parameters for the request" - }, - { - "code": "def l1_error(true, pred):\n return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))", - "docstring": "L1 distance between tensors true and pred." - }, - { - "code": "def extract_along_line(self, pid, xy0, xy1, N=10):\n assert N >= 2\n xy0 = np.array(xy0).squeeze()\n xy1 = np.array(xy1).squeeze()\n assert xy0.size == 2\n assert xy1.size == 2\n points = [(x, y) for x, y in zip(\n np.linspace(xy0[0], xy1[0], N), np.linspace(xy0[1], xy1[1], N)\n )]\n result = self.extract_points(pid, points)\n results_xyv = np.hstack((\n points,\n result[:, np.newaxis]\n ))\n return results_xyv", - "docstring": "Extract parameter values along a given line.\n\n Parameters\n ----------\n pid: int\n The parameter id to extract values from\n xy0: tuple\n A tupe with (x,y) start point coordinates\n xy1: tuple\n A tupe with (x,y) end point coordinates\n N: integer, optional\n The number of values to extract along the line (including start and\n end point)\n\n Returns\n -------\n values: numpy.ndarray (n x 1)\n data values for extracted data points" - }, - { - "code": "def get_all(cls, include_disabled=True):\n if cls == BaseAccount:\n raise InquisitorError('get_all on BaseAccount is not supported')\n account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id\n qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)\n if not include_disabled:\n qry = qry.filter(Account.enabled == 1)\n accounts = qry.find(Account.account_type_id == account_type_id)\n return {res.account_id: cls(res) for res in accounts}", - "docstring": "Returns a list of all accounts of a given type\n\n Args:\n include_disabled (`bool`): Include disabled accounts. Default: `True`\n\n Returns:\n list of account objects" - }, - { - "code": "def Expand(self, rdf_artifact, requested):\n source_type = rdf_artifacts.ArtifactSource.SourceType\n expanded_artifact = rdf_artifacts.ExpandedArtifact(\n name=rdf_artifact.name,\n provides=rdf_artifact.provides,\n requested_by_user=requested)\n for source in rdf_artifact.sources:\n if MeetsConditions(self._knowledge_base, source):\n type_name = source.type\n if type_name == source_type.ARTIFACT_GROUP:\n for subartifact in self._ExpandArtifactGroupSource(source, requested):\n yield subartifact\n continue\n elif type_name == source_type.ARTIFACT_FILES:\n expanded_sources = self._ExpandArtifactFilesSource(source, requested)\n else:\n expanded_sources = self._ExpandBasicSource(source)\n expanded_artifact.sources.Extend(expanded_sources)\n self.processed_artifacts.add(rdf_artifact.name)\n if expanded_artifact.sources:\n yield expanded_artifact", - "docstring": "Expand artifact by extending its sources.\n\n This method takes as input an rdf artifact object and returns a rdf expanded\n artifact. It iterates through the list of sources processing them by type.\n Each source of the original artifact can lead to one or more (in case of\n artifact groups and files where the sub artifacts are expanded recursively)\n sources in the expanded artifact. The list of sources of the expanded\n artifact is extended at the end of each iteration.\n\n The parameter `requested` is passed down at the recursive calls. So, if an\n artifact group is requested by the user, every artifact/source belonging to\n this group will be treated as requested by the user. The same applies to\n artifact files.\n\n Args:\n rdf_artifact: artifact object to expand (obtained from the registry)\n requested: Whether the artifact is requested by the user or scheduled for\n collection as a KnowledgeBase dependency.\n\n Yields:\n rdf value representation of expanded artifact containing the name of the\n artifact and the expanded sources" - }, - { - "code": "def set_log_level(self, level: str) -> None:\n if level == 'info':\n to_set = logging.INFO\n if level == 'debug':\n to_set = logging.DEBUG\n if level == 'error':\n to_set = logging.ERROR\n self.log.setLevel(to_set)", - "docstring": "Override the default log level of the class." - }, - { - "code": "def run(self) -> None:\n self.main_task = self.loop.create_task(self.main())\n try:\n self.loop.run_until_complete(self.main_task)\n except asyncio.CancelledError:\n pass\n finally:\n self.loop.run_until_complete(self.close())", - "docstring": "Sync function to run the worker, finally closes worker connections." - }, - { - "code": "def splitstring(string, splitcharacter=' ', part=None):\n if part in [None, '']:\n return str(string).split(splitcharacter)\n return str(string).split(splitcharacter)[part]", - "docstring": "Split a string based on a character and get the parts as a list.\n\n :type string: string\n :param string: The string to split.\n\n :type splitcharacter: string\n :param splitcharacter: The character to split for the string.\n\n :type part: integer\n :param part: Get a specific part of the list.\n\n :return: The split string or a specific part of it\n :rtype: list or string\n\n >>> splitstring('hello world !')\n ['hello', 'world', '!']\n\n >>> splitstring('hello world !', ' ', None)\n ['hello', 'world', '!']\n\n >>> splitstring('hello world !', ' ', None)\n ['hello', 'world', '!']\n\n >>> splitstring('hello world !', ' ', 0)\n 'hello'" - }, - { - "code": "def observed(self, date_observed=None):\n if self.name != 'Indicator':\n self.tcex.log.warning(u'Observed endpoint only available for \"indicator\" endpoint.')\n else:\n self._request_uri = '{}/observed'.format(self._request_uri)\n if date_observed is not None:\n self._request.add_payload('dateObserved', date_observed)", - "docstring": "Retrieve indicator observations count for top 10" - }, - { - "code": "def build_menu(self, display_type_menu, document_controller, display_panel):\n dynamic_live_actions = list()\n def switch_to_display_content(display_panel_type):\n self.switch_to_display_content(document_controller, display_panel, display_panel_type, display_panel.display_item)\n empty_action = display_type_menu.add_menu_item(_(\"Clear Display Panel\"), functools.partial(switch_to_display_content, \"empty-display-panel\"))\n display_type_menu.add_separator()\n data_item_display_action = display_type_menu.add_menu_item(_(\"Display Item\"), functools.partial(switch_to_display_content, \"data-display-panel\"))\n thumbnail_browser_action = display_type_menu.add_menu_item(_(\"Thumbnail Browser\"), functools.partial(switch_to_display_content, \"thumbnail-browser-display-panel\"))\n grid_browser_action = display_type_menu.add_menu_item(_(\"Grid Browser\"), functools.partial(switch_to_display_content, \"browser-display-panel\"))\n display_type_menu.add_separator()\n display_panel_type = display_panel.display_panel_type\n empty_action.checked = display_panel_type == \"empty\" and display_panel.display_panel_controller is None\n data_item_display_action.checked = display_panel_type == \"data_item\"\n thumbnail_browser_action.checked = display_panel_type == \"horizontal\"\n grid_browser_action.checked = display_panel_type == \"grid\"\n dynamic_live_actions.append(empty_action)\n dynamic_live_actions.append(data_item_display_action)\n dynamic_live_actions.append(thumbnail_browser_action)\n dynamic_live_actions.append(grid_browser_action)\n for factory in self.__display_controller_factories.values():\n dynamic_live_actions.extend(factory.build_menu(display_type_menu, display_panel))\n return dynamic_live_actions", - "docstring": "Build the dynamic menu for the selected display panel.\n\n The user accesses this menu by right-clicking on the display panel.\n\n The basic menu items are to an empty display panel or a browser display panel.\n\n After that, each display controller factory is given a chance to add to the menu. The display\n controllers (for instance, a scan acquisition controller), may add its own menu items." - }, - { - "code": "def process_block(self, current_block, previous_block, text):\n prev_fold_level = TextBlockHelper.get_fold_lvl(previous_block)\n if text.strip() == '':\n fold_level = prev_fold_level\n else:\n fold_level = self.detect_fold_level(\n previous_block, current_block)\n if fold_level > self.limit:\n fold_level = self.limit\n prev_fold_level = TextBlockHelper.get_fold_lvl(previous_block)\n if fold_level > prev_fold_level:\n block = current_block.previous()\n while block.isValid() and block.text().strip() == '':\n TextBlockHelper.set_fold_lvl(block, fold_level)\n block = block.previous()\n TextBlockHelper.set_fold_trigger(\n block, True)\n if text.strip():\n TextBlockHelper.set_fold_trigger(\n previous_block, fold_level > prev_fold_level)\n TextBlockHelper.set_fold_lvl(current_block, fold_level)\n prev = current_block.previous()\n if (prev and prev.isValid() and prev.text().strip() == '' and\n TextBlockHelper.is_fold_trigger(prev)):\n TextBlockHelper.set_collapsed(\n current_block, TextBlockHelper.is_collapsed(\n prev))\n TextBlockHelper.set_fold_trigger(prev, False)\n TextBlockHelper.set_collapsed(prev, False)", - "docstring": "Processes a block and setup its folding info.\n\n This method call ``detect_fold_level`` and handles most of the tricky\n corner cases so that all you have to do is focus on getting the proper\n fold level foreach meaningful block, skipping the blank ones.\n\n :param current_block: current block to process\n :param previous_block: previous block\n :param text: current block text" - }, - { - "code": "def _root_amplitude_brentq(counts, bkg, model, root_fn=_f_cash_root):\n amplitude_min, amplitude_max = _amplitude_bounds(counts, bkg, model)\n if not np.sum(counts) > 0:\n return amplitude_min, 0\n args = (counts, bkg, model)\n if root_fn(0.0, *args) < 0:\n return 0.0, 1\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n try:\n result = brentq(root_fn, amplitude_min, amplitude_max, args=args,\n maxiter=MAX_NITER, full_output=True, rtol=1E-4)\n return result[0], result[1].iterations\n except (RuntimeError, ValueError):\n return np.nan, MAX_NITER", - "docstring": "Fit amplitude by finding roots using Brent algorithm.\n\n See Appendix A Stewart (2009).\n\n Parameters\n ----------\n counts : `~numpy.ndarray`\n Slice of count map.\n bkg : `~numpy.ndarray`\n Slice of background map.\n model : `~numpy.ndarray`\n Model template to fit.\n\n Returns\n -------\n amplitude : float\n Fitted flux amplitude.\n niter : int\n Number of function evaluations needed for the fit." - }, - { - "code": "def _apply(self, func, name, window=None, center=None,\n check_minp=None, **kwargs):\n def f(x, name=name, *args):\n x = self._shallow_copy(x)\n if isinstance(name, str):\n return getattr(x, name)(*args, **kwargs)\n return x.apply(name, *args, **kwargs)\n return self._groupby.apply(f)", - "docstring": "Dispatch to apply; we are stripping all of the _apply kwargs and\n performing the original function call on the grouped object." - }, - { - "code": "def i2c_read_data(self, address):\n task = asyncio.ensure_future(self.core.i2c_read_data(address))\n value = self.loop.run_until_complete(task)\n return value", - "docstring": "Retrieve result of last data read from i2c device.\n i2c_read_request should be called before trying to retrieve data.\n It is intended for use by a polling application.\n\n :param address: i2c\n\n :returns: last data read or None if no data is present." - }, - { - "code": "def _read_file_header(self):\n header = self.fileobj.readline()\n payload1 = self.fileobj.readline()\n payload2 = self.fileobj.readline()\n version, reserved, organisation = payload1.split(None, 2)\n self.fileobj.readline()\n self.header_read = True\n if self.version and int(self.version) != version:\n raise IOError(\"Version mismatch. Requested version was '%s' but version in file was '%s'\"%(self.version, version))\n if version == '1':\n url, ip_address, date, content_type, length = header.split()\n self.file_headers = {\"ip_address\" : ip_address,\n \"date\" : datetime.datetime.strptime(date, \"%Y%m%d%H%M%S\"),\n \"org\" : organisation}\n self.version = 1\n elif version == '2':\n url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()\n self.file_headers = {\"ip_address\" : ip_address,\n \"date\" : datetime.datetime.strptime(date, \"%Y%m%d%H%M%S\"),\n \"org\" : organisation}\n self.version = 2\n else:\n raise IOError(\"Unknown ARC version '%s'\"%version)", - "docstring": "Reads out the file header for the arc file. If version was\n not provided, this will autopopulate it." - }, - { - "code": "def nextSolarReturn(date, lon):\n jd = eph.nextSolarReturn(date.jd, lon)\n return Datetime.fromJD(jd, date.utcoffset)", - "docstring": "Returns the next date when sun is at longitude 'lon'." - }, - { - "code": "def list_tags(self):\n from highton.models.tag import Tag\n return fields.ListField(\n name=self.ENDPOINT,\n init_class=Tag\n ).decode(\n self.element_from_string(\n self._get_request(\n endpoint=self.ENDPOINT + '/' + str(self.id) + '/' + Tag.ENDPOINT,\n ).text\n )\n )", - "docstring": "Get the tags of current object\n\n :return: the tags\n :rtype: list" - }, - { - "code": "def run(wrapped):\n @wraps(wrapped)\n def _run(self, query, bindings=None, *args, **kwargs):\n self._reconnect_if_missing_connection()\n start = time.time()\n try:\n result = wrapped(self, query, bindings, *args, **kwargs)\n except Exception as e:\n result = self._try_again_if_caused_by_lost_connection(\n e, query, bindings, wrapped\n )\n t = self._get_elapsed_time(start)\n self.log_query(query, bindings, t)\n return result\n return _run", - "docstring": "Special decorator encapsulating query method." - }, - { - "code": "def deployAll(self):\n targets = [Target.getTarget(iid) for iid, n, p in self.db.listTargets()]\n for target in targets:\n target.deploy()\n verbose('Deploy all complete')", - "docstring": "Deploys all the items from the vault. Useful after a format" - }, - { - "code": "def sample(program: Union[circuits.Circuit, schedules.Schedule],\n *,\n noise: devices.NoiseModel = devices.NO_NOISE,\n param_resolver: Optional[study.ParamResolver] = None,\n repetitions: int = 1,\n dtype: Type[np.number] = np.complex64) -> study.TrialResult:\n if noise == devices.NO_NOISE and protocols.has_unitary(program):\n return sparse_simulator.Simulator(dtype=dtype).run(\n program=program,\n param_resolver=param_resolver,\n repetitions=repetitions)\n return density_matrix_simulator.DensityMatrixSimulator(\n dtype=dtype, noise=noise).run(program=program,\n param_resolver=param_resolver,\n repetitions=repetitions)", - "docstring": "Simulates sampling from the given circuit or schedule.\n\n Args:\n program: The circuit or schedule to sample from.\n noise: Noise model to use while running the simulation.\n param_resolver: Parameters to run with the program.\n repetitions: The number of samples to take.\n dtype: The `numpy.dtype` used by the simulation. Typically one of\n `numpy.complex64` or `numpy.complex128`.\n Favors speed over precision by default, i.e. uses `numpy.complex64`." - }, - { - "code": "def get_url(self, url):\n _r = self.br.open(url)\n if self.br.geturl().startswith(self.AUTH_URL):\n raise AuthRequiredException\n elif self.br.geturl().startswith(self.ERROR_URL):\n raise RequestErrorException\n else:\n return _r.read()", - "docstring": "Internally used to retrieve the contents of a URL" - }, - { - "code": "def _from_io(self, source: IO):\n read = source.read\n if unpack('>I', source.read(4))[0] != ClassFile.MAGIC:\n raise ValueError('invalid magic number')\n self.version = unpack('>HH', source.read(4))[::-1]\n self._constants.unpack(source)\n self.access_flags.unpack(read(2))\n self._this, self._super, interfaces_count = unpack('>HHH', read(6))\n self._interfaces = unpack(\n f'>{interfaces_count}H',\n read(2 * interfaces_count)\n )\n self.fields.unpack(source)\n self.methods.unpack(source)\n self.attributes.unpack(source)", - "docstring": "Loads an existing JVM ClassFile from any file-like object." - }, - { - "code": "def references_date(year=None):\n \"Handle year value parsing for some edge cases\"\n date = None\n discriminator = None\n in_press = None\n if year and \"in press\" in year.lower().strip():\n in_press = True\n elif year and re.match(\"^[0-9]+$\", year):\n date = year\n elif year:\n discriminator_match = re.match(\"^([0-9]+?)([a-z]+?)$\", year)\n if discriminator_match:\n date = discriminator_match.group(1)\n discriminator = discriminator_match.group(2)\n else:\n date = year\n return (date, discriminator, in_press)", - "docstring": "Handle year value parsing for some edge cases" - }, - { - "code": "def check_for_duplicate_assignments(participant):\n participants = models.Participant.query.filter_by(\n assignment_id=participant.assignment_id\n ).all()\n duplicates = [\n p for p in participants if (p.id != participant.id and p.status == \"working\")\n ]\n for d in duplicates:\n q.enqueue(worker_function, \"AssignmentAbandoned\", None, d.id)", - "docstring": "Check that the assignment_id of the participant is unique.\n\n If it isnt the older participants will be failed." - }, - { - "code": "def map(self, func, *columns):\n if not columns:\n return map(func, self.rows)\n else:\n values = (self.values(column) for column in columns)\n result = [map(func, v) for v in values]\n if len(columns) == 1:\n return result[0]\n else:\n return result", - "docstring": "Map a function to rows, or to given columns" - }, - { - "code": "def link(self, req, ino, newparent, newname):\n self.reply_err(req, errno.EROFS)", - "docstring": "Create a hard link\n\n Valid replies:\n reply_entry\n reply_err" - }, - { - "code": "def add_node(self, node):\n if self.controllers.get(node.controller_type, None):\n raise RuntimeError(\"Cannot add node {} to the node group. A node for {} group is already assigned\".format(\n node,\n node.controller_type\n ))\n self.nodes.append(node)\n if node.controller:\n self.controllers[node.controller_type] = node.controller\n setattr(self, node.controller_type, node.controller)", - "docstring": "A a Node object to the group. Only one node per cgroup is supported" - }, - { - "code": "def parse_insert_size(self, f):\n s_name = self.get_s_name(f)\n d = dict()\n zero_insertsize = 0\n for l in f['f']:\n if l.startswith('\n continue\n insertsize, count = l.split(None, 1)\n insertsize = int(round(float(insertsize)))\n count = float(count) / 1000000\n if(insertsize == 0):\n zero_insertsize = count\n else:\n d[insertsize] = count\n num_counts = sum(d.values())\n cum_counts = 0\n median_insert_size = None\n for thisins, thiscount in d.items():\n cum_counts += thiscount\n if cum_counts >= num_counts/2:\n median_insert_size = thisins\n break\n self.general_stats_data[s_name]['median_insert_size'] = median_insert_size\n if s_name in self.qualimap_bamqc_insert_size_hist:\n log.debug(\"Duplicate insert size histogram sample name found! Overwriting: {}\".format(s_name))\n self.qualimap_bamqc_insert_size_hist[s_name] = d\n self.add_data_source(f, s_name=s_name, section='insert_size_histogram')", - "docstring": "Parse the contents of the Qualimap BamQC Insert Size Histogram file" - }, - { - "code": "def like(self, id, reblog_key):\n url = \"/v2/user/like\"\n params = {'id': id, 'reblog_key': reblog_key}\n return self.send_api_request(\"post\", url, params, ['id', 'reblog_key'])", - "docstring": "Like the post of the given blog\n\n :param id: an int, the id of the post you want to like\n :param reblog_key: a string, the reblog key of the post\n\n :returns: a dict created from the JSON response" - }, - { - "code": "def get_fixed_long_line(target, previous_line, original,\n indent_word=' ', max_line_length=79,\n aggressive=False, experimental=False, verbose=False):\n indent = _get_indentation(target)\n source = target[len(indent):]\n assert source.lstrip() == source\n assert not target.lstrip().startswith('\n tokens = list(generate_tokens(source))\n candidates = shorten_line(\n tokens, source, indent,\n indent_word,\n max_line_length,\n aggressive=aggressive,\n experimental=experimental,\n previous_line=previous_line)\n candidates = sorted(\n sorted(set(candidates).union([target, original])),\n key=lambda x: line_shortening_rank(\n x,\n indent_word,\n max_line_length,\n experimental=experimental))\n if verbose >= 4:\n print(('-' * 79 + '\\n').join([''] + candidates + ['']),\n file=wrap_output(sys.stderr, 'utf-8'))\n if candidates:\n best_candidate = candidates[0]\n if longest_line_length(best_candidate) > longest_line_length(original):\n return None\n return best_candidate", - "docstring": "Break up long line and return result.\n\n Do this by generating multiple reformatted candidates and then\n ranking the candidates to heuristically select the best option." - }, - { - "code": "def raw_partlist(input, timeout=20, showgui=False):\n output = tempfile.NamedTemporaryFile(\n prefix='eagexp_', suffix='.partlist', delete=0).name\n export_partlist_to_file(\n input=input, output=output, timeout=timeout, showgui=showgui)\n s = Path(output).text(encoding='latin1')\n os.remove(output)\n return s", - "docstring": "export partlist by eagle, then return it\n\n :param input: .sch or .brd file name\n :param timeout: int\n :param showgui: Bool, True -> do not hide eagle GUI\n :rtype: string" - }, - { - "code": "def _param32(ins):\n output = _32bit_oper(ins.quad[1])\n output.append('push de')\n output.append('push hl')\n return output", - "docstring": "Pushes 32bit param into the stack" - }, - { - "code": "def blue_hour(self, direction=SUN_RISING, date=None, local=True, use_elevation=True):\n if local and self.timezone is None:\n raise ValueError(\"Local time requested but Location has no timezone set.\")\n if self.astral is None:\n self.astral = Astral()\n if date is None:\n date = datetime.date.today()\n elevation = self.elevation if use_elevation else 0\n start, end = self.astral.blue_hour_utc(\n direction, date, self.latitude, self.longitude, elevation\n )\n if local:\n start = start.astimezone(self.tz)\n end = end.astimezone(self.tz)\n return start, end", - "docstring": "Returns the start and end times of the Blue Hour when the sun is traversing\n in the specified direction.\n\n This method uses the definition from PhotoPills i.e. the\n blue hour is when the sun is between 6 and 4 degrees below the horizon.\n\n :param direction: Determines whether the time is for the sun rising or setting.\n Use ``astral.SUN_RISING`` or ``astral.SUN_SETTING``. Default is rising.\n :type direction: int\n\n :param date: The date for which to calculate the times.\n If no date is specified then the current date will be used.\n :type date: :class:`~datetime.date`\n\n :param local: True = Times to be returned in location's time zone;\n False = Times to be returned in UTC.\n If not specified then the time will be returned in local time\n :type local: bool\n\n :param use_elevation: True = Return times that allow for the location's elevation;\n False = Return times that don't use elevation.\n If not specified then times will take elevation into account.\n :type use_elevation: bool\n\n :return: A tuple of the date and time at which the Blue Hour starts and ends.\n :rtype: (:class:`~datetime.datetime`, :class:`~datetime.datetime`)" - }, - { - "code": "def _arrayize_vectorized_indexer(indexer, shape):\n slices = [v for v in indexer.tuple if isinstance(v, slice)]\n if len(slices) == 0:\n return indexer\n arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]\n n_dim = arrays[0].ndim if len(arrays) > 0 else 0\n i_dim = 0\n new_key = []\n for v, size in zip(indexer.tuple, shape):\n if isinstance(v, np.ndarray):\n new_key.append(np.reshape(v, v.shape + (1, ) * len(slices)))\n else:\n shape = ((1,) * (n_dim + i_dim) + (-1,) +\n (1,) * (len(slices) - i_dim - 1))\n new_key.append(np.arange(*v.indices(size)).reshape(shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))", - "docstring": "Return an identical vindex but slices are replaced by arrays" - }, - { - "code": "def getfieldindex(data, commdct, objkey, fname):\n objindex = data.dtls.index(objkey)\n objcomm = commdct[objindex]\n for i_index, item in enumerate(objcomm):\n try:\n if item['field'] == [fname]:\n break\n except KeyError as err:\n pass\n return i_index", - "docstring": "given objkey and fieldname, return its index" - }, - { - "code": "def do_failures(self, arg):\n usable, filename, append = self._redirect_split(arg)\n a = self.tests[self.active]\n args = self.curargs\n splitargs = usable.split()\n if len(splitargs) > 0:\n tfilter = splitargs[0]\n else:\n tfilter = \"*\"\n outfiles = None\n if len(splitargs) > 1:\n outfiles = splitargs[1:len(splitargs)]\n result = a.failures(outfiles, args[\"threshold\"], tfilter)\n self._redirect_output(result, filename, append, msg.info)", - "docstring": "Prints a list of test cases that failed for the current unit test and analysis\n group settings. To only check failure on specific output files, set the list of\n files to check as arguments." - }, - { - "code": "def implode_multi_values(self, name, data):\n mkeys = [k for k in data.keys() if k.startswith(name + '.')]\n mvls = [data.pop(k)[0] for k in mkeys]\n if mvls:\n data.setlist(name, mvls)", - "docstring": "Due to the way Angular organizes it model, when Form data is sent via a POST request,\n then for this kind of widget, the posted data must to be converted into a format suitable\n for Django's Form validation." - }, - { - "code": "def cancel(self):\n if self.__process.is_alive():\n self.__process.terminate()\n _raise_exception(self.__timeout_exception, self.__exception_message)", - "docstring": "Terminate any possible execution of the embedded function." - }, - { - "code": "def compliance(self, value):\n if (self.api_version < '2.0'):\n self.profile = value\n else:\n try:\n self.profile[0] = value\n except AttributeError:\n self.profile = [value]", - "docstring": "Set the compliance profile URI." - }, - { - "code": "def hub(self, port):\r\n self._ip = \"localhost\"\r\n self._port = port \r\n self.command = [self._conf[\"java_path\"], \"-jar\", self._conf[\"jar_path\"], \"-port\", str(port), \"-role\", \"hub\"] \r\n return self", - "docstring": "java -jar selenium-server.jar -role hub -port 4444\r\n @param port: listen port of selenium hub" - }, - { - "code": "def get_module_classes(module):\n clslst = get_module_members(module, type=inspect.isclass)\n return list(filter(lambda cls: not issubclass(cls, Exception),\n clslst))", - "docstring": "Get a list of module member classes.\n\n Parameters\n ----------\n module : string or module object\n Module for which member list is to be generated\n\n Returns\n -------\n mbrlst : list\n List of module functions" - }, - { - "code": "def _ConvertDataType(self, dataType):\n if dataType:\n vmodlName = dataType.name\n wsdlName = dataType.wsdlName\n version = dataType.version\n parent = dataType.base[0]\n props = self._Filter(self._ConvertDataPropertyType, dataType.property)\n doType = (vmodlName, wsdlName, parent, version, props)\n else:\n doType = None\n return doType", - "docstring": "Convert vmodl.reflect.DynamicTypeManager.DataTypeInfo to pyVmomi data\n type definition" - }, - { - "code": "def gen_undef():\n empty_reg = ReilEmptyOperand()\n return ReilBuilder.build(ReilMnemonic.UNDEF, empty_reg, empty_reg, empty_reg)", - "docstring": "Return an UNDEF instruction." - }, - { - "code": "def delete(self, *args, **kwargs):\n cache_key = self.make_key(args, kwargs)\n with self._cache_lock:\n try:\n del self._cache[cache_key]\n except KeyError:\n pass", - "docstring": "Delete an item from the cache for this combination of args and\n kwargs." - }, - { - "code": "def _GetFileNames(self):\n if self._zip:\n return self._zip.namelist()\n else:\n return os.listdir(self._path)", - "docstring": "Returns a list of file names in the feed." - }, - { - "code": "async def new_client_from_config(config_file=None, context=None, persist_config=True):\n client_config = type.__call__(Configuration)\n await load_kube_config(config_file=config_file, context=context,\n client_configuration=client_config,\n persist_config=persist_config)\n return ApiClient(configuration=client_config)", - "docstring": "Loads configuration the same as load_kube_config but returns an ApiClient\n to be used with any API object. This will allow the caller to concurrently\n talk with multiple clusters." - }, - { - "code": "def split_timesteps(data, consistent_abmn=False):\n if has_multiple_timesteps(data):\n grouped = data.groupby(\"timestep\")\n return [group[1] for group in grouped]\n else:\n return data", - "docstring": "Split data into multiple timesteps." - }, - { - "code": "def wheel_metadata(self):\n if not self.is_wheel:\n raise TypeError(\"Requirement is not a wheel distribution!\")\n for distribution in find_distributions(self.source_directory):\n return distribution\n msg = \"pkg_resources didn't find a wheel distribution in %s!\"\n raise Exception(msg % self.source_directory)", - "docstring": "Get the distribution metadata of an unpacked wheel distribution." - }, - { - "code": "def all_synsets(self):\n for synset_dict in self._mongo_db.synsets.find():\n yield Synset(self, synset_dict)", - "docstring": "A generator over all the synsets in the GermaNet database." - }, - { - "code": "def block_header_to_hex( block_data, prev_hash ):\n header_info = {\n \"version\": block_data['version'],\n \"prevhash\": prev_hash,\n \"merkle_root\": block_data['merkleroot'],\n \"timestamp\": block_data['time'],\n \"bits\": int(block_data['bits'], 16),\n \"nonce\": block_data['nonce'],\n \"hash\": block_data['hash']\n }\n return block_header_serialize(header_info)", - "docstring": "Calculate the hex form of a block's header, given its getblock information from bitcoind." - }, - { - "code": "def list_commands(self, ctx):\n commands_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'commands')\n command_list = [name for __, name, ispkg in pkgutil.iter_modules([commands_path]) if ispkg]\n command_list.sort()\n return command_list", - "docstring": "List CLI commands\n @type ctx: Context\n @rtype: list" - }, - { - "code": "def exp_transform(params):\n weights = np.exp(np.asarray(params) - np.mean(params))\n return (len(weights) / weights.sum()) * weights", - "docstring": "Transform parameters into exp-scale weights." - }, - { - "code": "def execute_update(self, update, safe=False):\n\t\tassert len(update.update_data) > 0\n\t\tself.queue.append(UpdateOp(self.transaction_id, self, update.query.type, safe, update))\n\t\tif self.autoflush:\n\t\t\treturn self.flush()", - "docstring": "Execute an update expression. Should generally only be called implicitly." - }, - { - "code": "def back_dfs(self, start, end=None):\n return list(self.iterdfs(start, end, forward=False))", - "docstring": "Returns a list of nodes in some backward DFS order.\n\n Starting from the start node the depth first search proceeds along\n incoming edges." - }, - { - "code": "def get_data_for_sensors(macs=[], search_duratio_sec=5, bt_device=''):\n log.info('Get latest data for sensors. Stop with Ctrl+C.')\n log.info('Stops automatically in %ss', search_duratio_sec)\n log.info('MACs: %s', macs)\n datas = dict()\n for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, search_duratio_sec, bt_device=bt_device):\n datas[new_data[0]] = new_data[1]\n return datas", - "docstring": "Get lates data for sensors in the MAC's list.\n\n Args:\n macs (array): MAC addresses\n search_duratio_sec (int): Search duration in seconds. Default 5\n bt_device (string): Bluetooth device id\n Returns:\n dict: MAC and state of found sensors" - }, - { - "code": "def to_array(self):\n array = super(Game, self).to_array()\n array['title'] = u(self.title)\n array['description'] = u(self.description)\n array['photo'] = self._as_array(self.photo)\n if self.text is not None:\n array['text'] = u(self.text)\n if self.text_entities is not None:\n array['text_entities'] = self._as_array(self.text_entities)\n if self.animation is not None:\n array['animation'] = self.animation.to_array()\n return array", - "docstring": "Serializes this Game to a dictionary.\n\n :return: dictionary representation of this object.\n :rtype: dict" - }, - { - "code": "def set_schema_to_public(self):\n self.tenant = FakeTenant(schema_name=get_public_schema_name())\n self.schema_name = get_public_schema_name()\n self.set_settings_schema(self.schema_name)\n self.search_path_set = False", - "docstring": "Instructs to stay in the common 'public' schema." - }, - { - "code": "def get_queryset(self):\n self.queryset = super(CustomFieldsMixin, self).get_queryset()\n serializer_class = self.get_serializer_class()\n if hasattr(serializer_class.Meta, 'nested_fields'):\n nested_fields = serializer_class.Meta.nested_fields\n fields = serializer_class.Meta.fields\n self._expand_queryset(fields, nested_fields, self.queryset.model)\n return self.queryset", - "docstring": "For reducing the query count the queryset is expanded with `prefetch_related` and `select_related` depending on the\n specified fields and nested fields" - }, - { - "code": "def remove_infinite_values(self):\n if util.is_shape(self.faces, (-1, 3)):\n face_mask = np.isfinite(self.faces).all(axis=1)\n self.update_faces(face_mask)\n if util.is_shape(self.vertices, (-1, 3)):\n vertex_mask = np.isfinite(self.vertices).all(axis=1)\n self.update_vertices(vertex_mask)", - "docstring": "Ensure that every vertex and face consists of finite numbers.\n\n This will remove vertices or faces containing np.nan and np.inf\n\n Alters\n ----------\n self.faces : masked to remove np.inf/np.nan\n self.vertices : masked to remove np.inf/np.nan" - }, - { - "code": "def score(self, X, design, scan_onsets=None):\n assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \\\n 'The shape of X is not consistent with the shape of data '\\\n 'used in the fitting step. They should have the same number '\\\n 'of voxels'\n assert scan_onsets is None or (scan_onsets.ndim == 1 and\n 0 in scan_onsets), \\\n 'scan_onsets should either be None or an array of indices '\\\n 'If it is given, it should include at least 0'\n if scan_onsets is None:\n scan_onsets = np.array([0], dtype=int)\n else:\n scan_onsets = np.int32(scan_onsets)\n ll = self._score(Y=X, design=design, beta=self.beta_,\n scan_onsets=scan_onsets, beta0=self.beta0_,\n rho_e=self.rho_, sigma_e=self.sigma_,\n rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)\n ll_null = self._score(Y=X, design=None, beta=None,\n scan_onsets=scan_onsets, beta0=self.beta0_,\n rho_e=self.rho_, sigma_e=self.sigma_,\n rho_X0=self._rho_X0_,\n sigma2_X0=self._sigma2_X0_)\n return ll, ll_null", - "docstring": "Use the model and parameters estimated by fit function\n from some data of a participant to evaluate the log\n likelihood of some new data of the same participant.\n Design matrix of the same set of experimental\n conditions in the testing data should be provided, with each\n column corresponding to the same condition as that column\n in the design matrix of the training data.\n Unknown nuisance time series will be marginalized, assuming\n they follow the same spatial pattern as in the training\n data. The hypothetical response captured by the design matrix\n will be subtracted from data before the marginalization\n when evaluating the log likelihood. For null model,\n nothing will be subtracted before marginalization.\n\n There is a difference between the form of likelihood function\n used in fit() and score(). In fit(), the response amplitude\n beta to design matrix X and the modulation beta0 by nuisance\n regressor X0 are both marginalized, with X provided and X0\n estimated from data. In score(), posterior estimation of\n beta and beta0 from the fitting step are assumed unchanged\n to testing data and X0 is marginalized.\n The logic underlying score() is to transfer\n as much as what we can learn from training data when\n calculating a likelihood score for testing data.\n\n If you z-scored your data during fit step, you should\n z-score them for score function as well. If you did not\n z-score in fitting, you should not z-score here either.\n\n Parameters\n ----------\n X : numpy arrays, shape=[time_points, voxels]\n fMRI data of new data of the same subject. The voxels should\n match those used in the fit() function. If data are z-scored\n (recommended) when fitting the model, data should be z-scored\n as well when calling transform()\n design : numpy array, shape=[time_points, conditions]\n Design matrix expressing the hypothetical response of\n the task conditions in data X.\n scan_onsets : numpy array, shape=[number of runs].\n A list of indices corresponding to the onsets of\n scans in the data X. If not provided, data will be assumed\n to be acquired in a continuous scan.\n Returns\n -------\n ll: float.\n The log likelihood of the new data based on the model and its\n parameters fit to the training data.\n ll_null: float.\n The log likelihood of the new data based on a null model\n which assumes the same as the full model for everything\n except for that there is no response to any of the\n task conditions." - }, - { - "code": "def _to_user_defined(pif_obj):\n res = {}\n rv = ReadView(pif_obj)\n for k in rv.keys():\n name, value = _extract_key_value(rv[k].raw)\n if name and value is not None:\n res[name] = value\n pif = pif_obj.as_dictionary()\n elements = {}\n if pif.get(\"composition\"):\n for comp in pif[\"composition\"]:\n if comp.get(\"actualAtomicPercent\"):\n elements[comp[\"element\"]] = float(comp[\"actualAtomicPercent\"][\"value\"])\n elif comp.get(\"actualWeightPercent\"):\n elements[comp[\"element\"]] = float(comp[\"actualWeightPercent\"][\"value\"])\n if elements:\n res[\"elemental_percent\"] = elements\n elif pif.get(\"chemicalFormula\"):\n symbol = \"\"\n num = \"\"\n for char in pif[\"chemicalFormula\"]:\n if char.isupper():\n if symbol:\n try:\n elements[symbol] = int(num)\n except ValueError:\n elements[symbol] = float(num) if num else 1\n symbol = \"\"\n num = \"\"\n symbol += char\n elif char.islower():\n symbol += char\n elif char.isdigit():\n num += char\n elif char == \".\":\n num += char\n if elements:\n res[\"elemental_proportion\"] = elements\n return res", - "docstring": "Read the systems in the PIF to populate the user-defined portion" - }, - { - "code": "def getparent(d, pth):\n c = d\n for key in pth[:-1]:\n if not isinstance(c, dict):\n raise InvalidValueError(c)\n elif key not in c:\n raise UnknownKeyError(pth)\n else:\n c = c.__getitem__(key)\n return c", - "docstring": "Get the parent node of a subdict as specified by the key path in\n `pth`.\n\n Parameters\n ----------\n d : dict\n Dict tree in which access is required\n pth : str or tuple of str\n Dict key" - }, - { - "code": "def get(cls, dname):\n Domain = cls\n dname = dname.hostname if hasattr(dname, 'hostname') else dname.lower()\n return Session.query(Domain).filter(Domain.name == dname).first()", - "docstring": "Get the requested domain\n @param dname: Domain name\n @type dname: str\n @rtype: Domain or None" - }, - { - "code": "def hamming_calc(TP, POP):\n try:\n length = POP\n return (1 / length) * (length - sum(TP.values()))\n except Exception:\n return \"None\"", - "docstring": "Calculate hamming loss.\n\n :param TP: true positive\n :type TP : dict\n :param POP: population\n :type POP : int\n :return: hamming loss as float" - }, - { - "code": "def hash_coloured_escapes(text):\n ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230\n prefix, suffix = colored('SPLIT', ansi_code=ansi_code).split('SPLIT')\n return prefix, suffix", - "docstring": "Return the ANSI hash colour prefix and suffix for a given text" - }, - { - "code": "def _volume_command(ramp, volume):\n if volume is not None:\n ramp.set_volume(float(volume))\n else:\n print ramp.volume", - "docstring": "Set the value if a volume level is provided, else print the current\n volume level." - }, - { - "code": "def loads(string, *transformers, **kwargs):\n ignore_remaining_data = kwargs.get(\"ignore_remaining_data\", False)\n return load(\n BytesIO(string), *transformers, ignore_remaining_data=ignore_remaining_data\n )", - "docstring": "Deserializes Java objects and primitive data serialized using\n ObjectOutputStream from a string.\n\n :param string: A Java data string\n :param transformers: Custom transformers to use\n :param ignore_remaining_data: If True, don't log an error when unused\n trailing bytes are remaining\n :return: The deserialized object" - }, - { - "code": "def export(self, path, variables_saver=None):\n proto = saved_model_pb2.SavedModel()\n proto.CopyFrom(self._proto)\n assets_map = _make_assets_key_collection(proto, path)\n self._save_all_assets(path, assets_map)\n self._save_variables(path, variables_saver)\n self._save_proto(path, proto)", - "docstring": "Exports to SavedModel directory.\n\n Args:\n path: path where to export the SavedModel to.\n variables_saver: lambda that receives a directory path where to\n export checkpoints of variables." - }, - { - "code": "def byteify(input_object):\n if isinstance(input_object, dict):\n return {byteify(key): byteify(value)\n for key, value in list(input_object.items())}\n elif isinstance(input_object, list):\n return [byteify(element) for element in input_object]\n elif isinstance(input_object, str):\n return input_object.encode('utf-8')\n else:\n return input_object", - "docstring": "Recursive function to transform an object to byte.\n\n :param input_object: A python object such as unicode, dictionary or list.\n :type: unicode, list, dict\n\n :return: The object with byte only." - }, - { - "code": "def translate_argv(raw_args):\n kwargs = {}\n def get_parameter(param_str):\n for i, a in enumerate(raw_args):\n if a == param_str:\n assert len(raw_args) == i+2 and raw_args[i+1][0] != '-', \\\n 'All arguments must have a value, e.g. `-testing true`'\n return raw_args[i+1]\n return None\n value = get_parameter('-testing')\n if value is not None and value.lower() in ('true', 't', 'yes'):\n kwargs['testing'] = True\n value = get_parameter('-connect')\n if value is not None:\n colon = value.find(':')\n if colon > -1:\n kwargs['host'] = value[0:colon]\n kwargs['port'] = int(value[colon+1:])\n else:\n kwargs['host'] = value\n value = get_parameter('-name')\n if value is not None:\n kwargs['name'] = value\n value = get_parameter('-group')\n if value is not None:\n kwargs['group_name'] = value\n value = get_parameter('-scan')\n if value in ('true', 't', 'yes'):\n kwargs['scan_for_port'] = True\n value = get_parameter('-debug')\n if value in ('true', 't', 'yes'):\n kwargs['debug'] = True\n return kwargs", - "docstring": "Enables conversion from system arguments.\n\n Parameters\n ----------\n raw_args : list\n Arguments taken raw from the system input.\n\n Returns\n -------\n kwargs : dict\n The input arguments formatted as a kwargs dict.\n To use as input, simply use `KQMLModule(**kwargs)`." - }, - { - "code": "def _mock_input(self, target, content):\n content = helper.to_str(content)\n for w in content:\n target.send_keys(w)\n rand_block(0.01, 0.01)", - "docstring": "mock human input\n\n :param target: the element to input to\n :param content: the content\n :return:" - }, - { - "code": "def get_tip_label_coords(self):\n ns = self.ttree.ntips\n tip_xpos = self.coords.verts[:ns, 0]\n tip_ypos = self.coords.verts[:ns, 1]\n align_edges = None\n align_verts = None\n if self.style.orient in (0, 'down'):\n if self.style.tip_labels_align:\n tip_yend = np.zeros(ns)\n align_edges = np.array([\n (i + len(tip_ypos), i) for i in range(len(tip_ypos))\n ])\n align_verts = np.array(\n list(zip(tip_xpos, tip_ypos)) + \\\n list(zip(tip_xpos, tip_yend))\n )\n tip_ypos = tip_yend\n else:\n if self.style.tip_labels_align:\n tip_xend = np.zeros(ns)\n align_edges = np.array([\n (i + len(tip_xpos), i) for i in range(len(tip_xpos))\n ])\n align_verts = np.array(\n list(zip(tip_xpos, tip_ypos)) + \\\n list(zip(tip_xend, tip_ypos))\n )\n tip_xpos = tip_xend\n return tip_xpos, tip_ypos, align_edges, align_verts", - "docstring": "Get starting position of tip labels text based on locations of the \n leaf nodes on the tree and style offset and align options. Node\n positions are found using the .verts attribute of coords and is \n already oriented for the tree face direction." - }, - { - "code": "def cartesian_square_centred_on_point(self, point, distance, **kwargs):\n point_surface = Point(point.longitude, point.latitude, 0.)\n north_point = point_surface.point_at(distance, 0., 0.)\n east_point = point_surface.point_at(distance, 0., 90.)\n south_point = point_surface.point_at(distance, 0., 180.)\n west_point = point_surface.point_at(distance, 0., 270.)\n is_long = np.logical_and(\n self.catalogue.data['longitude'] >= west_point.longitude,\n self.catalogue.data['longitude'] < east_point.longitude)\n is_surface = np.logical_and(\n is_long,\n self.catalogue.data['latitude'] >= south_point.latitude,\n self.catalogue.data['latitude'] < north_point.latitude)\n upper_depth, lower_depth = _check_depth_limits(kwargs)\n is_valid = np.logical_and(\n is_surface,\n self.catalogue.data['depth'] >= upper_depth,\n self.catalogue.data['depth'] < lower_depth)\n return self.select_catalogue(is_valid)", - "docstring": "Select earthquakes from within a square centered on a point\n\n :param point:\n Centre point as instance of nhlib.geo.point.Point class\n\n :param distance:\n Distance (km)\n\n :returns:\n Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`\n class containing only selected events" - }, - { - "code": "def resolve(container, expression):\n itemgetter = getattr(container, 'get_item', container.get)\n tokens = []\n expression = expression.strip()\n for sel_delim, _id, _range in selection_re.findall(expression):\n tokens.append(delimiters.get(sel_delim, ''))\n item = itemgetter(_id)\n if item is None:\n raise XigtStructureError(\n 'Referred Item (id: {}) from reference \"{}\" does not '\n 'exist in the given container.'\n .format(_id, expression)\n )\n value = item.value() or ''\n if _range:\n for spn_delim, start, end in span_re.findall(_range):\n start = int(start) if start else None\n end = int(end) if end else None\n tokens.extend([\n delimiters.get(spn_delim, ''),\n value[start:end]\n ])\n else:\n tokens.append(value)\n return ''.join(tokens)", - "docstring": "Return the string that is the resolution of the alignment expression\n `expression`, which selects ids from `container`." - }, - { - "code": "def get_data(self, start=None, length=None):\n PointerToRawData_adj = self.pe.adjust_FileAlignment( self.PointerToRawData,\n self.pe.OPTIONAL_HEADER.FileAlignment )\n VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress,\n self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment )\n if start is None:\n offset = PointerToRawData_adj\n else:\n offset = ( start - VirtualAddress_adj ) + PointerToRawData_adj\n if length is not None:\n end = offset + length\n else:\n end = offset + self.SizeOfRawData\n if end > self.PointerToRawData + self.SizeOfRawData:\n end = self.PointerToRawData + self.SizeOfRawData\n return self.pe.__data__[offset:end]", - "docstring": "Get data chunk from a section.\n\n Allows to query data from the section by passing the\n addresses where the PE file would be loaded by default.\n It is then possible to retrieve code and data by their real\n addresses as they would be if loaded.\n\n Returns bytes() under Python 3.x and set() under Python 2.7" - }, - { - "code": "def string_matches_sans_whitespace(self, str1, str2_fuzzy_whitespace):\n str2_fuzzy_whitespace = re.sub('\\s+', '\\s*', str2_fuzzy_whitespace)\n return re.search(str2_fuzzy_whitespace, str1) is not None", - "docstring": "Check if two strings match, modulo their whitespace." - }, - { - "code": "def endSubscription(self, subscriber):\n self._reqId2Contract.pop(subscriber.reqId, None)\n self.reqId2Subscriber.pop(subscriber.reqId, None)", - "docstring": "Unregister a live subscription." - }, - { - "code": "def claimInterface(self, interface):\n r\n if isinstance(interface, Interface):\n interface = interface.interfaceNumber\n util.claim_interface(self.dev, interface)\n self.__claimed_interface = interface", - "docstring": "r\"\"\"Claims the interface with the Operating System.\n\n Arguments:\n interface: interface number or an Interface object." - }, - { - "code": "def tdController(self):\n cid = c_int()\n ctype = c_int()\n name = create_string_buffer(255)\n available = c_int()\n self._lib.tdController(byref(cid), byref(ctype), name, sizeof(name),\n byref(available))\n return {'id': cid.value, 'type': ctype.value,\n 'name': self._to_str(name), 'available': available.value}", - "docstring": "Get the next controller while iterating.\n\n :return: a dict with the keys: id, type, name, available." - }, - { - "code": "def com_google_fonts_check_name_line_breaks(ttFont):\n failed = False\n for name in ttFont[\"name\"].names:\n string = name.string.decode(name.getEncoding())\n if \"\\n\" in string:\n failed = True\n yield FAIL, (\"Name entry {} on platform {} contains\"\n \" a line-break.\").format(NameID(name.nameID).name,\n PlatformID(name.platformID).name)\n if not failed:\n yield PASS, (\"Name table entries are all single-line\"\n \" (no line-breaks found).\")", - "docstring": "Name table entries should not contain line-breaks." - }, - { - "code": "def delete(self):\n if 'delete' in self._URL:\n extra = {'resource': self.__class__.__name__, 'query': {\n 'id': self.id}}\n logger.info(\"Deleting {} resource.\".format(self), extra=extra)\n self._api.delete(url=self._URL['delete'].format(id=self.id))\n else:\n raise SbgError('Resource can not be deleted!')", - "docstring": "Deletes the resource on the server." - }, - { - "code": "def getAuthenticatedRole(store):\n def tx():\n def addToEveryone(newAuthenticatedRole):\n newAuthenticatedRole.becomeMemberOf(getEveryoneRole(store))\n return newAuthenticatedRole\n return store.findOrCreate(Role, addToEveryone, externalID=u'Authenticated')\n return store.transact(tx)", - "docstring": "Get the base 'Authenticated' role for this store, which is the role that is\n given to every user who is explicitly identified by a non-anonymous\n username." - }, - { - "code": "def _get_range_from_filters(cls, filters, model_class):\n if not filters:\n return None, None, None\n range_property = None\n start_val = None\n end_val = None\n start_filter = None\n end_filter = None\n for f in filters:\n prop, op, val = f\n if op in [\">\", \">=\", \"<\", \"<=\"]:\n if range_property and range_property != prop:\n raise errors.BadReaderParamsError(\n \"Range on only one property is supported.\")\n range_property = prop\n if val is None:\n raise errors.BadReaderParamsError(\n \"Range can't be None in filter %s\", f)\n if op in [\">\", \">=\"]:\n if start_val is not None:\n raise errors.BadReaderParamsError(\n \"Operation %s is specified more than once.\", op)\n start_val = val\n start_filter = f\n else:\n if end_val is not None:\n raise errors.BadReaderParamsError(\n \"Operation %s is specified more than once.\", op)\n end_val = val\n end_filter = f\n elif op != \"=\":\n raise errors.BadReaderParamsError(\n \"Only < <= > >= = are supported as operation. Got %s\", op)\n if not range_property:\n return None, None, None\n if start_val is None or end_val is None:\n raise errors.BadReaderParamsError(\n \"Filter should contains a complete range on property %s\",\n range_property)\n if issubclass(model_class, db.Model):\n property_obj = model_class.properties()[range_property]\n else:\n property_obj = (\n model_class._properties[\n range_property])\n supported_properties = (\n _DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() +\n _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys())\n if not isinstance(property_obj, tuple(supported_properties)):\n raise errors.BadReaderParamsError(\n \"Filtered property %s is not supported by sharding.\", range_property)\n if not start_val < end_val:\n raise errors.BadReaderParamsError(\n \"Start value %s should be smaller than end value %s\",\n start_val, end_val)\n return property_obj, start_filter, end_filter", - "docstring": "Get property range from filters user provided.\n\n This method also validates there is one and only one closed range on a\n single property.\n\n Args:\n filters: user supplied filters. Each filter should be a list or tuple of\n format (, ,\n ). Value type should satisfy the property's type.\n model_class: the model class for the entity type to apply filters on.\n\n Returns:\n a tuple of (property, start_filter, end_filter). property is the model's\n field that the range is about. start_filter and end_filter define the\n start and the end of the range. (None, None, None) if no range is found.\n\n Raises:\n BadReaderParamsError: if any filter is invalid in any way." - }, - { - "code": "def change_nick(self, nick):\n old_nick = self.nick\n self.nick = IRCstr(nick)\n for c in self.channels:\n c.users.remove(old_nick)\n c.users.add(self.nick)", - "docstring": "Update this user's nick in all joined channels." - }, - { - "code": "def scan(self, search_path=None):\n if search_path is None:\n search_path = sys.path\n for item in search_path:\n for dist in find_distributions(item):\n self.add(dist)", - "docstring": "Scan `search_path` for distributions usable in this environment\n\n Any distributions found are added to the environment.\n `search_path` should be a sequence of ``sys.path`` items. If not\n supplied, ``sys.path`` is used. Only distributions conforming to\n the platform/python version defined at initialization are added." - }, - { - "code": "def standard_bytes_header(self, title, addr, length):\n self.save_header(self.HEADER_TYPE_CODE, title, length, param1=addr, param2=32768)", - "docstring": "Generates a standard header block of CODE type" - }, - { - "code": "def listar_por_equipamento(self, id_equipamento):\n if not is_valid_int_param(id_equipamento):\n raise InvalidParameterError(\n u'Equipment id is invalid or was not informed.')\n url = 'interface/equipamento/' + str(id_equipamento) + '/'\n code, map = self.submit(None, 'GET', url)\n key = 'interface'\n return get_list_map(self.response(code, map, [key]), key)", - "docstring": "List all interfaces of an equipment.\n\n :param id_equipamento: Equipment identifier.\n\n :return: Dictionary with the following:\n\n ::\n\n {'interface':\n [{'protegida': < protegida >,\n 'nome': < nome >,\n 'id_ligacao_front': < id_ligacao_front >,\n 'id_equipamento': < id_equipamento >,\n 'id': < id >,\n 'descricao': < descricao >,\n 'id_ligacao_back': < id_ligacao_back >}, ... other interfaces ...]}\n\n :raise InvalidParameterError: Equipment identifier is invalid or none.\n :raise DataBaseError: Networkapi failed to access the database.\n :raise XMLError: Networkapi failed to generate the XML response." - }, - { - "code": "def _fill_result_cache(self):\n idx = 0\n try:\n while True:\n idx += 1000\n self._fill_result_cache_to_idx(idx)\n except StopIteration:\n pass\n self._count = len(self._result_cache)", - "docstring": "Fill the result cache with all results." - }, - { - "code": "def approve(self, peer_jid):\n self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())", - "docstring": "Approve a subscription request from jid\n\n Args:\n peer_jid (str): the JID to approve" - }, - { - "code": "def auto_find_instance_path(self) -> Path:\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return package_path / \"instance\"\n return prefix / \"var\" / f\"{self.name}-instance\"", - "docstring": "Locates the instace_path if it was not provided" - }, - { - "code": "def SetParserProp(self, prop, value):\n ret = libxml2mod.xmlTextReaderSetParserProp(self._o, prop, value)\n return ret", - "docstring": "Change the parser processing behaviour by changing some of\n its internal properties. Note that some properties can only\n be changed before any read has been done." - }, - { - "code": "def standardize_shapes(features, batch_size=None):\n for fname in [\"inputs\", \"targets\"]:\n if fname not in features:\n continue\n f = features[fname]\n while len(f.get_shape()) < 4:\n f = tf.expand_dims(f, axis=-1)\n features[fname] = f\n if batch_size:\n for _, t in six.iteritems(features):\n shape = t.get_shape().as_list()\n shape[0] = batch_size\n t.set_shape(t.get_shape().merge_with(shape))\n t.get_shape().assert_is_fully_defined()\n return features", - "docstring": "Set the right shapes for the features." - }, - { - "code": "def _reads_per_position(bam_in, loci_file, out_dir):\n data = Counter()\n a = pybedtools.BedTool(bam_in)\n b = pybedtools.BedTool(loci_file)\n c = a.intersect(b, s=True, bed=True, wo=True)\n for line in c:\n end = int(line[1]) + 1 + int(line[2]) if line[5] == \"+\" else int(line[1]) + 1\n start = int(line[1]) + 1 if line[5] == \"+\" else int(line[1]) + 1 + int(line[2])\n side5 = \"%s\\t5p\\t%s\" % (line[15], start)\n side3 = \"%s\\t3p\\t%s\" % (line[15], end)\n data[side5] += 1\n data[side3] += 1\n counts_reads = op.join(out_dir, 'locus_readpos.counts')\n with open(counts_reads, 'w') as out_handle:\n for k in data:\n print(k, file=out_handle, end=\"\")\n return counts_reads", - "docstring": "Create input for compute entropy" - }, - { - "code": "def save_current(self, path=None):\n try:\n if not path and not self._current.file.path:\n path, filter = QtWidgets.QFileDialog.getSaveFileName(\n self, _('Choose destination path'))\n if not path:\n return False\n old_path = self._current.file.path\n code_edit = self._current\n self._save_editor(code_edit, path)\n path = code_edit.file.path\n if path and old_path != path:\n self._ensure_unique_name(code_edit, code_edit.file.name)\n self.setTabText(self.currentIndex(), code_edit._tab_name)\n ext = os.path.splitext(path)[1]\n old_ext = os.path.splitext(old_path)[1]\n if ext != old_ext or not old_path:\n icon = QtWidgets.QFileIconProvider().icon(\n QtCore.QFileInfo(code_edit.file.path))\n self.setTabIcon(self.currentIndex(), icon)\n return True\n except AttributeError:\n pass\n return False", - "docstring": "Save current editor content. Leave file to None to erase the previous\n file content. If the current editor's file_path is None and path\n is None, the function will call\n ``QtWidgets.QFileDialog.getSaveFileName`` to get a valid save filename.\n\n :param path: path of the file to save, leave it None to overwrite\n existing file." - }, - { - "code": "def handle_key(self, key):\n try:\n func = None\n if key.is_sequence:\n try:\n func = self.config.keymap[key.name]\n except:\n try:\n func = self.config.keymap[key.code]\n except:\n func = self.config.keymap[str(key)]\n else:\n func = self.config.keymap[str(key)]\n if func in self.valid_key_funcs:\n getattr(self, func)()\n except:\n raise", - "docstring": "Handle a keypress. Concrete subclasses can implement this method if\n custom keypresses need to be handled other than for exit and scrolling." - }, - { - "code": "def _pquery(scheduler, data, ndata, ndim, leafsize,\n x, nx, d, i, k, eps, p, dub, ierr):\n try:\n _data = shmem_as_nparray(data).reshape((ndata, ndim))\n _x = shmem_as_nparray(x).reshape((nx, ndim))\n _d = shmem_as_nparray(d).reshape((nx, k))\n _i = shmem_as_nparray(i).reshape((nx, k))\n kdtree = cKDTree(_data, leafsize=leafsize)\n for s in scheduler:\n d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub)\n m_d = d_out.shape[0]\n m_i = i_out.shape[0]\n _d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1)\n except:\n ierr.value += 1", - "docstring": "Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler" - }, - { - "code": "def process_data(self):\n \"read and process input from self.socket\"\n try:\n reader = getattr(self.socket, 'read', self.socket.recv)\n new_data = reader(2 ** 14)\n except socket.error:\n self.disconnect(\"Connection reset by peer\")\n return\n if not new_data:\n self.disconnect(\"Connection reset by peer\")\n return\n self.buffer.feed(new_data)\n for line in self.buffer:\n log.debug(\"FROM SERVER: %s\", line)\n if not line:\n continue\n self._process_line(line)", - "docstring": "read and process input from self.socket" - }, - { - "code": "def Popen(*args, **kwargs):\n read_line = None\n if 'read_line' in kwargs:\n read_line = kwargs['read_line']\n del kwargs['read_line']\n p = subprocess.Popen(*args, **kwargs)\n wait_stdout = None\n wait_stderr = None\n if p.stdout:\n wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line)\n if p.stderr:\n wait_stderr = sys.stderr.attach(p.stderr)\n original_wait = p.wait\n def wait():\n original_wait()\n if wait_stdout:\n wait_stdout()\n if wait_stderr:\n wait_stderr()\n p.wait = wait\n return p", - "docstring": "Executes a command using subprocess.Popen and redirects output to AETROS and stdout.\n Parses stdout as well for stdout API calls.\n\n Use read_line argument to read stdout of command's stdout line by line.\n Use returned process stdin to communicate with the command.\n\n :return: subprocess.Popen" - }, - { - "code": "def create_manifest(self):\n self.manifest = {}\n self.manifest['applications'] = [{'name': self.app_name}]\n self.manifest['services'] = []\n self.manifest['env'] = {\n 'PREDIXPY_VERSION': str(predix.version),\n }\n self.write_manifest()", - "docstring": "Create a new manifest and write it to\n disk." - }, - { - "code": "def _login(session):\n session.get(LOGIN_REFERER)\n resp = session.post(LOGIN_URL, {\n 'user': session.auth.username,\n 'pwd': session.auth.password\n }, headers={\n 'Referer': LOGIN_REFERER,\n 'X-Requested-With': 'XMLHttpRequest'\n })\n if resp.status_code != 200:\n raise FedexError('could not login')\n data = resp.json()\n if not data['successful']:\n raise FedexError(data['errorList'][0]['error']['message'])\n _save_cookies(session.cookies, session.auth.cookie_path)", - "docstring": "Login to Fedex Delivery Manager." - }, - { - "code": "def unblockUser(self, user_id):\n data = {\"fbid\": user_id}\n r = self._post(self.req_url.UNBLOCK_USER, data)\n return r.ok", - "docstring": "Unblocks messages from a blocked user\n\n :param user_id: The ID of the user that you want to unblock\n :return: Whether the request was successful\n :raises: FBchatException if request failed" - }, - { - "code": "def paint_agent_trail(self, y, x, val):\n for j in range(1,self.cell_height-1):\n for i in range(1,self.cell_width-1):\n self.img.put(self.agent_color(val), (x*self.cell_width+i, y*self.cell_height+j))", - "docstring": "paint an agent trail as ONE pixel to allow for multiple agent\n trails to be seen in the same cell" - }, - { - "code": "def update(self, reconfigure=False):\n try:\n self.phase = PHASE.UPDATE\n self.logger.info(\"Updating environment %s...\" % self.namespace)\n self.install_sandboxes()\n self.instantiate_features()\n if reconfigure:\n self.grab_inputs(reconfigure=True)\n else:\n self._copy_source_to_target()\n self._specialize(reconfigure=reconfigure)\n for feature in self.features.run_order:\n self.run_action(feature, 'sync')\n self.inject_environment_config()\n self._finalize()\n except Exception:\n self.logger.debug(\"\", exc_info=sys.exc_info())\n et, ei, tb = sys.exc_info()\n reraise(et, ei, tb)", - "docstring": "update the environment" - }, - { - "code": "def from_response(self, response_data):\n return HSAccessTokenAuth(\n response_data['access_token'],\n response_data['token_type'], \n response_data['refresh_token'],\n response_data['expires_in'], \n response_data.get('state')\n )", - "docstring": "Builds a new HSAccessTokenAuth straight from response data \n\n Args:\n response_data (dict): Response data to use\n\n Returns:\n A HSAccessTokenAuth objet" - }, - { - "code": "def search(self, search_content, search_type, limit=9):\n url = 'http://music.163.com/weapi/cloudsearch/get/web?csrf_token='\n params = {'s': search_content, 'type': search_type, 'offset': 0,\n 'sub': 'false', 'limit': limit}\n result = self.post_request(url, params)\n return result", - "docstring": "Search entrance.\n\n :params search_content: search content.\n :params search_type: search type.\n :params limit: result count returned by weapi.\n :return: a dict." - }, - { - "code": "def is_fasta_valid(fasta_file):\n try:\n f = pysam.FastaFile(fasta_file)\n except IOError:\n raise\n else:\n f.close()\n return True", - "docstring": "Check if fasta file is valid. Raises a ValueError if pysam cannot read the file.\n\n #TODO: pysam does not differentiate between BAM and SAM" - }, - { - "code": "def published(self, request=None):\n language = getattr(request, 'LANGUAGE_CODE', get_language())\n if not language:\n return self.model.objects.none()\n qs = self.get_queryset()\n qs = qs.filter(\n translations__is_published=True,\n translations__language_code=language,\n )\n qs = qs.filter(\n models.Q(category__isnull=True) |\n models.Q(category__is_published=True))\n return qs", - "docstring": "Returns the published documents in the current language.\n\n :param request: A Request instance." - }, - { - "code": "def index_buses(self, buses=None, start=0):\n bs = self.connected_buses if buses is None else buses\n for i, b in enumerate(bs):\n b._i = start + i", - "docstring": "Updates the indices of all buses.\n\n @param start: Starting index, typically 0 or 1.\n @type start: int" - }, - { - "code": "def get_all(self, request, notifications, mark_as_read=False):\n return self.list(request, notifications)", - "docstring": "return all notifications with pagination" - }, - { - "code": "def as_datetime(self):\n year = int(self.year)\n month = int(self.month) if self.month else 1\n day = int(self.day) if self.day else 1\n hour = int(self.hour) if self.hour else 0\n minute = int(self.minute) if self.minute else 0\n second = int(self.second) if self.second else 0\n microsecond = int(self.microsecond) if self.microsecond else 0\n return datetime.datetime(year, month, day, hour, minute, second, microsecond)", - "docstring": "Get as python datetime.datetime.\n\n Require year to be a valid datetime year. Default month and day to 1 if\n do not exist.\n\n @return: datetime.datetime object." - }, - { - "code": "def do_walk(data_path):\n data_files = {}\n def cond(File, prefix):\n file_path = path.join(prefix, 'data_files', File)\n return (not File.startswith('!') and\n not File.endswith('~') and\n not File.endswith('\n not File.endswith('.pyc') and\n not File.startswith('.') and\n path.exists(path.join(prefix, File)))\n for (dir_path, dirs, files) in os.walk(data_path):\n data_files[dir_path] = [f for f in files if cond(f, dir_path)]\n if not dirs:\n continue\n else:\n for Dir in dirs:\n do_walk(os.path.join(dir_path, Dir))\n return data_files", - "docstring": "Walk through data_files and list all in dict format" - }, - { - "code": "def create(self, virtual_host):\n virtual_host = quote(virtual_host, '')\n return self.http_client.put(API_VIRTUAL_HOST % virtual_host)", - "docstring": "Create a Virtual Host.\n\n :param str virtual_host: Virtual host name\n\n :raises ApiError: Raises if the remote server encountered an error.\n :raises ApiConnectionError: Raises if there was a connectivity issue.\n\n :rtype: dict" - }, - { - "code": "def cancelMktData(self, contract: Contract):\n ticker = self.ticker(contract)\n reqId = self.wrapper.endTicker(ticker, 'mktData')\n if reqId:\n self.client.cancelMktData(reqId)\n else:\n self._logger.error(\n 'cancelMktData: ' f'No reqId found for contract {contract}')", - "docstring": "Unsubscribe from realtime streaming tick data.\n\n Args:\n contract: The exact contract object that was used to\n subscribe with." - }, - { - "code": "def keep_alive(self, conn):\n while self.__up:\n msg = conn.recv(len(AUTH_KEEP_ALIVE))\n if msg != AUTH_KEEP_ALIVE:\n log.error('Received something other than %s', AUTH_KEEP_ALIVE)\n conn.close()\n return\n try:\n conn.send(AUTH_KEEP_ALIVE_ACK)\n except (IOError, socket.error) as err:\n log.error('Unable to send auth keep alive: %s', err)\n conn.close()\n return", - "docstring": "Maintains auth sessions" - }, - { - "code": "def pulse(time, start, duration):\n t = time()\n return 1 if start <= t < start + duration else 0", - "docstring": "Implements vensim's PULSE function\n\n In range [-inf, start) returns 0\n In range [start, start + duration) returns 1\n In range [start + duration, +inf] returns 0" - }, - { - "code": "def time_to_jump( self ):\n k_tot = rate_prefactor * np.sum( self.p )\n return -( 1.0 / k_tot ) * math.log( random.random() )", - "docstring": "The timestep until the next jump.\n\n Args:\n None\n\n Returns:\n (Float): The timestep until the next jump." - }, - { - "code": "def all_query(expression):\n def _all(index, expression=expression):\n ev = expression() if callable(expression) else expression\n try:\n iter(ev)\n except TypeError:\n raise AttributeError('$all argument must be an iterable!')\n hashed_ev = [index.get_hash_for(v) for v in ev]\n store_keys = set()\n if len(hashed_ev) == 0:\n return []\n store_keys = set(index.get_keys_for(hashed_ev[0]))\n for value in hashed_ev[1:]:\n store_keys &= set(index.get_keys_for(value))\n return list(store_keys)\n return _all", - "docstring": "Match arrays that contain all elements in the query." - }, - { - "code": "def guess_currency_from_address(address):\n if is_py2:\n fixer = lambda x: int(x.encode('hex'), 16)\n else:\n fixer = lambda x: x\n first_byte = fixer(b58decode_check(address)[0])\n double_first_byte = fixer(b58decode_check(address)[:2])\n hits = []\n for currency, data in crypto_data.items():\n if hasattr(data, 'get'):\n version = data.get('address_version_byte', None)\n if version is not None and version in [double_first_byte, first_byte]:\n hits.append([currency, data['name']])\n if hits:\n return hits\n raise ValueError(\"Unknown Currency with first byte: %s\" % first_byte)", - "docstring": "Given a crypto address, find which currency it likely belongs to.\n Raises an exception if it can't find a match. Raises exception if address\n is invalid." - }, - { - "code": "def neighbors_iter(self):\n for n, adj in self.graph.adj.items():\n yield n, {n: attr[\"bond\"] for n, attr in adj.items()}", - "docstring": "Iterate over atoms and return its neighbors." - }, - { - "code": "def create_api_pool(self):\n return ApiPool(\n self.networkapi_url,\n self.user,\n self.password,\n self.user_ldap)", - "docstring": "Get an instance of Api Pool services facade." - }, - { - "code": "def all_high_level_calls(self):\n if self._all_high_level_calls is None:\n self._all_high_level_calls = self._explore_functions(lambda x: x.high_level_calls)\n return self._all_high_level_calls", - "docstring": "recursive version of high_level calls" - }, - { - "code": "def _get_field_by_name(table: LdapObjectClass, name: str) -> tldap.fields.Field:\n fields = table.get_fields()\n return fields[name]", - "docstring": "Lookup a field by its name." - }, - { - "code": "def dataset(ctx, client, revision, datadir, format):\n ctx.meta['renku.datasets.datadir'] = datadir\n if ctx.invoked_subcommand is not None:\n return\n if revision is None:\n datasets = client.datasets.values()\n else:\n datasets = client.datasets_from_commit(client.repo.commit(revision))\n DATASETS_FORMATS[format](client, datasets)", - "docstring": "Handle datasets." - }, - { - "code": "def run_example(example_name, environ):\n mod = EXAMPLE_MODULES[example_name]\n register_calendar(\"YAHOO\", get_calendar(\"NYSE\"), force=True)\n return run_algorithm(\n initialize=getattr(mod, 'initialize', None),\n handle_data=getattr(mod, 'handle_data', None),\n before_trading_start=getattr(mod, 'before_trading_start', None),\n analyze=getattr(mod, 'analyze', None),\n bundle='test',\n environ=environ,\n **merge({'capital_base': 1e7}, mod._test_args())\n )", - "docstring": "Run an example module from zipline.examples." - }, - { - "code": "def getCandScoresMap(self, profile):\n elecType = profile.getElecType()\n if elecType != \"soc\" and elecType != \"toc\":\n print(\"ERROR: unsupported election type\")\n exit()\n candScoresMap = dict()\n for cand in profile.candMap.keys():\n candScoresMap[cand] = 0.0\n rankMaps = profile.getRankMaps()\n rankMapCounts = profile.getPreferenceCounts()\n scoringVector = self.getScoringVector(profile)\n for i in range(0, len(rankMaps)):\n rankMap = rankMaps[i]\n rankMapCount = rankMapCounts[i]\n for cand in rankMap.keys():\n candScoresMap[cand] += scoringVector[rankMap[cand] - 1] * rankMapCount\n return candScoresMap", - "docstring": "Returns a dictonary that associates the integer representation of each candidate with the\n score they recieved in the profile.\n\n :ivar Profile profile: A Profile object that represents an election profile." - }, - { - "code": "def _add_kwarg_datasets(datasets, kwargs):\n for test_method_suffix, dataset in six.iteritems(kwargs):\n datasets[test_method_suffix] = dataset", - "docstring": "Add data sets of the given kwargs.\n\n :param datasets:\n The dict where to accumulate data sets.\n :type datasets:\n `dict`\n :param kwargs:\n Dict of pre-named data sets.\n :type kwargs:\n `dict` of `unicode` to varies" - }, - { - "code": "def _validate(self, writing=False):\n if self.brand not in ['jp2 ', 'jpx ']:\n msg = (\"The file type brand was '{brand}'. \"\n \"It should be either 'jp2 ' or 'jpx '.\")\n msg = msg.format(brand=self.brand)\n if writing:\n raise IOError(msg)\n else:\n warnings.warn(msg, UserWarning)\n for item in self.compatibility_list:\n if item not in self._valid_cls:\n msg = (\"The file type compatibility list {items} is \"\n \"not valid. All items should be members of \"\n \"{valid_entries}.\")\n msg = msg.format(items=self.compatibility_list,\n valid_entries=self._valid_cls)\n if writing:\n raise IOError(msg)\n else:\n warnings.warn(msg, UserWarning)", - "docstring": "Validate the box before writing to file." - }, - { - "code": "def wirevector_list(names, bitwidth=None, wvtype=WireVector):\n if isinstance(names, str):\n names = names.replace(',', ' ').split()\n if any('/' in name for name in names) and bitwidth is not None:\n raise PyrtlError('only one of optional \"/\" or bitwidth parameter allowed')\n if bitwidth is None:\n bitwidth = 1\n if isinstance(bitwidth, numbers.Integral):\n bitwidth = [bitwidth]*len(names)\n if len(bitwidth) != len(names):\n raise ValueError('number of names ' + str(len(names))\n + ' should match number of bitwidths ' + str(len(bitwidth)))\n wirelist = []\n for fullname, bw in zip(names, bitwidth):\n try:\n name, bw = fullname.split('/')\n except ValueError:\n name, bw = fullname, bw\n wirelist.append(wvtype(bitwidth=int(bw), name=name))\n return wirelist", - "docstring": "Allocate and return a list of WireVectors.\n\n :param names: Names for the WireVectors. Can be a list or single comma/space-separated string\n :param bitwidth: The desired bitwidth for the resulting WireVectors.\n :param WireVector wvtype: Which WireVector type to create.\n :return: List of WireVectors.\n\n Additionally, the ``names`` string can also contain an additional bitwidth specification\n separated by a ``/`` in the name. This cannot be used in combination with a ``bitwidth``\n value other than ``1``.\n\n Examples: ::\n\n wirevector_list(['name1', 'name2', 'name3'])\n wirevector_list('name1, name2, name3')\n wirevector_list('input1 input2 input3', bitwidth=8, wvtype=pyrtl.wire.Input)\n wirevector_list('output1, output2 output3', bitwidth=3, wvtype=pyrtl.wire.Output)\n wirevector_list('two_bits/2, four_bits/4, eight_bits/8')\n wirevector_list(['name1', 'name2', 'name3'], bitwidth=[2, 4, 8])" - }, - { - "code": "def get_bound(pts):\n (x0, y0, x1, y1) = (INF, INF, -INF, -INF)\n for (x, y) in pts:\n x0 = min(x0, x)\n y0 = min(y0, y)\n x1 = max(x1, x)\n y1 = max(y1, y)\n return (x0, y0, x1, y1)", - "docstring": "Compute a minimal rectangle that covers all the points." - }, - { - "code": "def earth_accel(RAW_IMU,ATTITUDE):\n r = rotation(ATTITUDE)\n accel = Vector3(RAW_IMU.xacc, RAW_IMU.yacc, RAW_IMU.zacc) * 9.81 * 0.001\n return r * accel", - "docstring": "return earth frame acceleration vector" - }, - { - "code": "def sort_key(val):\n return numpy.sum((max(val)+1)**numpy.arange(len(val)-1, -1, -1)*val)", - "docstring": "Sort key for sorting keys in grevlex order." - }, - { - "code": "def populate_menv(menv, agent_cls_name, log_folder):\n gs = menv.gs\n n_agents = gs[0] * gs[1]\n n_slaves = len(menv.addrs)\n logger.info(\"Populating {} with {} agents\".format(HOST, n_agents*n_slaves))\n run(menv.populate(agent_cls_name, n_agents, log_folder=log_folder))\n logger.info(\"Populating complete.\")", - "docstring": "Populate given multiprocessing grid environment with agents.\n\n :param menv: Instance of :py:class:`GridMultiEnvironment`\n :param str agent_cls_name: Name of the agent class, e.g. 'grip_mp:GridAgent'\n :param str log_folder: Root logging folder for the agents." - }, - { - "code": "def run(self, resources):\n if not resources['connection']._port.startswith('jlink'):\n raise ArgumentError(\"FlashBoardStep is currently only possible through jlink\", invalid_port=args['port'])\n hwman = resources['connection']\n debug = hwman.hwman.debug(self._debug_string)\n debug.flash(self._file)", - "docstring": "Runs the flash step\n\n Args:\n resources (dict): A dictionary containing the required resources that\n we needed access to in order to perform this step." - }, - { - "code": "def rels(self):\n r = []\n for i in self.metadata:\n r = r + i[REL]\n return []", - "docstring": "Returns a LIST of all the metadata relations" - }, - { - "code": "def fetch(self):\n if self._newflg:\n self._newflg = False\n temp_dtgram = copy.deepcopy(self._dtgram)\n for (bufid, buffer) in self._buffer.items():\n temp_dtgram += self.submit(buffer, bufid=bufid)\n return tuple(temp_dtgram)\n return tuple(self._dtgram)", - "docstring": "Fetch datagram." - }, - { - "code": "def eknelt(selidx, row):\n selidx = ctypes.c_int(selidx)\n row = ctypes.c_int(row)\n return libspice.eknelt_c(selidx, row)", - "docstring": "Return the number of elements in a specified column entry in\n the current row.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eknelt_c.html\n\n :param selidx: Index of parent column in SELECT clause.\n :type selidx: int\n :param row: Row containing element.\n :type row: int\n :return: The number of elements in entry in current row.\n :rtype: int" - }, - { - "code": "def _parse_resource_reference(cls, ref_value):\n no_result = (None, None)\n if not isinstance(ref_value, string_types):\n return no_result\n splits = ref_value.split(cls._resource_ref_separator, 1)\n if len(splits) != 2 or not all(splits):\n return no_result\n return splits[0], splits[1]", - "docstring": "Splits a resource reference of structure \"LogicalId.Property\" and returns the \"LogicalId\" and \"Property\"\n separately.\n\n :param string ref_value: Input reference value which *may* contain the structure \"LogicalId.Property\"\n :return string, string: Returns two values - logical_id, property. If the input does not contain the structure,\n then both `logical_id` and property will be None" - }, - { - "code": "def rotate(self, g):\n temp_matrix = Matrix3()\n a = self.a\n b = self.b\n c = self.c\n temp_matrix.a.x = a.y * g.z - a.z * g.y\n temp_matrix.a.y = a.z * g.x - a.x * g.z\n temp_matrix.a.z = a.x * g.y - a.y * g.x\n temp_matrix.b.x = b.y * g.z - b.z * g.y\n temp_matrix.b.y = b.z * g.x - b.x * g.z\n temp_matrix.b.z = b.x * g.y - b.y * g.x\n temp_matrix.c.x = c.y * g.z - c.z * g.y\n temp_matrix.c.y = c.z * g.x - c.x * g.z\n temp_matrix.c.z = c.x * g.y - c.y * g.x\n self.a += temp_matrix.a\n self.b += temp_matrix.b\n self.c += temp_matrix.c", - "docstring": "rotate the matrix by a given amount on 3 axes" - }, - { - "code": "def _find_metric_value(session_or_group, metric_name):\n for metric_value in session_or_group.metric_values:\n if (metric_value.name.tag == metric_name.tag and\n metric_value.name.group == metric_name.group):\n return metric_value", - "docstring": "Returns the metric_value for a given metric in a session or session group.\n\n Args:\n session_or_group: A Session protobuffer or SessionGroup protobuffer.\n metric_name: A MetricName protobuffer. The metric to search for.\n Returns:\n A MetricValue protobuffer representing the value of the given metric or\n None if no such metric was found in session_or_group." - }, - { - "code": "def download(\n feature_type,\n output_base_path,\n extent,\n progress_dialog=None,\n server_url=None):\n if not server_url:\n server_url = PRODUCTION_SERVER\n min_longitude = extent[0]\n min_latitude = extent[1]\n max_longitude = extent[2]\n max_latitude = extent[3]\n box = (\n '{min_longitude},{min_latitude},{max_longitude},'\n '{max_latitude}').format(\n min_longitude=min_longitude,\n min_latitude=min_latitude,\n max_longitude=max_longitude,\n max_latitude=max_latitude\n )\n url = (\n '{url_osm_prefix}'\n '{feature_type}'\n '{url_osm_suffix}?'\n 'bbox={box}&'\n 'qgis_version={qgis}&'\n 'lang={lang}&'\n 'inasafe_version={inasafe_version}'.format(\n url_osm_prefix=server_url,\n feature_type=feature_type,\n url_osm_suffix=URL_OSM_SUFFIX,\n box=box,\n qgis=qgis_version(),\n lang=locale(),\n inasafe_version=get_version()))\n path = tempfile.mktemp('.shp.zip')\n fetch_zip(url, path, feature_type, progress_dialog)\n extract_zip(path, output_base_path)\n if progress_dialog:\n progress_dialog.done(QDialog.Accepted)", - "docstring": "Download shapefiles from Kartoza server.\n\n .. versionadded:: 3.2\n\n :param feature_type: What kind of features should be downloaded.\n Currently 'buildings', 'building-points' or 'roads' are supported.\n :type feature_type: str\n\n :param output_base_path: The base path of the shape file.\n :type output_base_path: str\n\n :param extent: A list in the form [xmin, ymin, xmax, ymax] where all\n coordinates provided are in Geographic / EPSG:4326.\n :type extent: list\n\n :param progress_dialog: A progress dialog.\n :type progress_dialog: QProgressDialog\n\n :param server_url: The server URL to use.\n :type: basestring\n\n :raises: ImportDialogError, CanceledImportDialogError" - }, - { - "code": "def parse_equality(cls, equality_string):\n cls.register()\n assert '=' in equality_string, \"There must be an '=' sign in the equality\"\n [left_side, right_side] = equality_string.split('=', 1)\n left_side_value = yaml.safe_load(left_side.strip())\n right_side_value = yaml.safe_load(right_side.strip())\n assert isinstance(left_side_value, str), \"Left side of equality must be a string\"\n return left_side_value, right_side_value", - "docstring": "Parse some simple equality statements" - }, - { - "code": "def handle(self, *args, **options):\n\t\tfor subscriber in get_subscriber_model().objects.filter(djstripe_customers=None):\n\t\t\tCustomer.get_or_create(subscriber=subscriber)\n\t\t\tprint(\"Created subscriber for {0}\".format(subscriber.email))", - "docstring": "Create Customer objects for Subscribers without Customer objects associated." - }, - { - "code": "def generate(env):\n path, _f77, _shf77, version = get_xlf77(env)\n if path:\n _f77 = os.path.join(path, _f77)\n _shf77 = os.path.join(path, _shf77)\n f77.generate(env)\n env['F77'] = _f77\n env['SHF77'] = _shf77", - "docstring": "Add Builders and construction variables for the Visual Age FORTRAN\n compiler to an Environment." - }, - { - "code": "def parse_slab_stats(slab_stats):\n stats_dict = {'slabs': defaultdict(lambda: {})}\n for line in slab_stats.splitlines():\n if line == 'END':\n break\n cmd, key, value = line.split(' ')\n if cmd != 'STAT':\n continue\n if \":\" not in key:\n stats_dict[key] = int(value)\n continue\n slab, key = key.split(':')\n stats_dict['slabs'][int(slab)][key] = int(value)\n return stats_dict", - "docstring": "Convert output from memcached's `stats slabs` into a Python dict.\n\n Newlines are returned by memcached along with carriage returns\n (i.e. '\\r\\n').\n\n >>> parse_slab_stats(\n \"STAT 1:chunk_size 96\\r\\nSTAT 1:chunks_per_page 10922\\r\\nSTAT \"\n \"active_slabs 1\\r\\nSTAT total_malloced 1048512\\r\\nEND\\r\\n\")\n {\n 'slabs': {\n 1: {\n 'chunk_size': 96,\n 'chunks_per_page': 10922,\n # ...\n },\n },\n 'active_slabs': 1,\n 'total_malloced': 1048512,\n }" - }, - { - "code": "def get_shape(self, prune=False, hs_dims=None):\n if not prune:\n return self.as_array(include_transforms_for_dims=hs_dims).shape\n shape = compress_pruned(\n self.as_array(prune=True, include_transforms_for_dims=hs_dims)\n ).shape\n return tuple(n for n in shape if n > 1)", - "docstring": "Tuple of array dimensions' lengths.\n\n It returns a tuple of ints, each representing the length of a cube\n dimension, in the order those dimensions appear in the cube.\n Pruning is supported. Dimensions that get reduced to a single element\n (e.g. due to pruning) are removed from the returning shape, thus\n allowing for the differentiation between true 2D cubes (over which\n statistical testing can be performed) and essentially\n 1D cubes (over which it can't).\n\n Usage:\n\n >>> shape = get_shape()\n >>> pruned_shape = get_shape(prune=True)" - }, - { - "code": "def members(self):\n result = list()\n for member in self.run_command(command=\"replSetGetStatus\", is_eval=False)['members']:\n result.append({\n \"_id\": member['_id'],\n \"host\": member[\"name\"],\n \"server_id\": self._servers.host_to_server_id(member[\"name\"]),\n \"state\": member['state']\n })\n return result", - "docstring": "return list of members information" - }, - { - "code": "def ready(self, count):\n self.ready_count = count\n self.send(nsq.ready(count))", - "docstring": "Indicate you are ready to receive ``count`` messages." - }, - { - "code": "def check_errors(self, is_global=False):\n errors = self.global_errors if is_global else self.errors\n if errors:\n print('dfTimewolf encountered one or more errors:')\n for error, critical in errors:\n print('{0:s} {1:s}'.format('CRITICAL: ' if critical else '', error))\n if critical:\n print('Critical error found. Aborting.')\n sys.exit(-1)", - "docstring": "Checks for errors and exits if any of them are critical.\n\n Args:\n is_global: If True, check the global_errors attribute. If false, check the\n error attribute." - }, - { - "code": "def generate_configuration(directory):\n conf = osp.join(get_module_source_path('spyder.plugins.help.utils'),\n 'conf.py')\n layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html')\n os.makedirs(osp.join(directory, 'templates'))\n os.makedirs(osp.join(directory, 'static'))\n shutil.copy(conf, directory)\n shutil.copy(layout, osp.join(directory, 'templates'))\n open(osp.join(directory, '__init__.py'), 'w').write('')\n open(osp.join(directory, 'static', 'empty'), 'w').write('')", - "docstring": "Generates a Sphinx configuration in `directory`.\n\n Parameters\n ----------\n directory : str\n Base directory to use" - }, - { - "code": "def _right_zero_blocks(self, r):\n if not self._include_off_diagonal:\n return self._block_rows - r - 1\n elif self._upper:\n return 0\n elif self._include_diagonal:\n return self._block_rows - r - 1\n else:\n return self._block_rows - r", - "docstring": "Number of blocks with zeros from the right in block row `r`." - }, - { - "code": "def formatTime(self, record, datefmt=None):\n formatted = super(PalletFormatter, self).formatTime(\n record, datefmt=datefmt)\n return formatted + '.%03dZ' % record.msecs", - "docstring": "Format time, including milliseconds." - }, - { - "code": "def get_object_closure(subject, object_category=None, **kwargs):\n results = search_associations(subject=subject,\n object_category=object_category,\n select_fields=[],\n facet_fields=[M.OBJECT_CLOSURE],\n facet_limit=-1,\n rows=0,\n **kwargs)\n return set(results['facet_counts'][M.OBJECT_CLOSURE].keys())", - "docstring": "Find all terms used to annotate subject plus ancestors" - }, - { - "code": "def html_parts(input_string, source_path=None, destination_path=None,\n input_encoding='unicode', doctitle=1, initial_header_level=1):\n overrides = {\n 'input_encoding': input_encoding,\n 'doctitle_xform': doctitle,\n 'initial_header_level': initial_header_level,\n 'report_level': 5\n }\n parts = core.publish_parts(\n source=input_string, source_path=source_path,\n destination_path=destination_path,\n writer_name='html', settings_overrides=overrides)\n return parts", - "docstring": "Given an input string, returns a dictionary of HTML document parts.\n\n Dictionary keys are the names of parts, and values are Unicode strings;\n encoding is up to the client.\n\n Parameters:\n\n - `input_string`: A multi-line text string; required.\n - `source_path`: Path to the source file or object. Optional, but useful\n for diagnostic output (system messages).\n - `destination_path`: Path to the file or object which will receive the\n output; optional. Used for determining relative paths (stylesheets,\n source links, etc.).\n - `input_encoding`: The encoding of `input_string`. If it is an encoded\n 8-bit string, provide the correct encoding. If it is a Unicode string,\n use \"unicode\", the default.\n - `doctitle`: Disable the promotion of a lone top-level section title to\n document title (and subsequent section title to document subtitle\n promotion); enabled by default.\n - `initial_header_level`: The initial level for header elements (e.g. 1\n for \"

\")." - }, - { - "code": "def setup_venv(self):\n venv = self.opts.venv\n if not venv:\n venv = os.environ.get('CRONY_VENV')\n if not venv and self.config['crony']:\n venv = self.config['crony'].get('venv')\n if venv:\n if not venv.endswith('activate'):\n add_path = os.path.join('bin', 'activate')\n self.logger.debug(f'Venv directory given, adding {add_path}')\n venv = os.path.join(venv, add_path)\n self.logger.debug(f'Adding sourcing virtualenv {venv}')\n self.cmd = f'. {venv} && {self.cmd}'", - "docstring": "Setup virtualenv if necessary." - }, - { - "code": "def Grashof(L, beta, T1, T2=0, rho=None, mu=None, nu=None, g=g):\n r\n if rho and mu:\n nu = mu/rho\n elif not nu:\n raise Exception('Either density and viscosity, or dynamic viscosity, \\\n is needed')\n return g*beta*abs(T2-T1)*L**3/nu**2", - "docstring": "r'''Calculates Grashof number or `Gr` for a fluid with the given\n properties, temperature difference, and characteristic length.\n\n .. math::\n Gr = \\frac{g\\beta (T_s-T_\\infty)L^3}{\\nu^2}\n = \\frac{g\\beta (T_s-T_\\infty)L^3\\rho^2}{\\mu^2}\n\n Inputs either of any of the following sets:\n\n * L, beta, T1 and T2, and density `rho` and kinematic viscosity `mu`\n * L, beta, T1 and T2, and dynamic viscosity `nu`\n\n Parameters\n ----------\n L : float\n Characteristic length [m]\n beta : float\n Volumetric thermal expansion coefficient [1/K]\n T1 : float\n Temperature 1, usually a film temperature [K]\n T2 : float, optional\n Temperature 2, usually a bulk temperature (or 0 if only a difference\n is provided to the function) [K]\n rho : float, optional\n Density, [kg/m^3]\n mu : float, optional\n Dynamic viscosity, [Pa*s]\n nu : float, optional\n Kinematic viscosity, [m^2/s]\n g : float, optional\n Acceleration due to gravity, [m/s^2]\n\n Returns\n -------\n Gr : float\n Grashof number []\n\n Notes\n -----\n .. math::\n Gr = \\frac{\\text{Buoyancy forces}}{\\text{Viscous forces}}\n\n An error is raised if none of the required input sets are provided.\n Used in free convection problems only.\n\n Examples\n --------\n Example 4 of [1]_, p. 1-21 (matches):\n\n >>> Grashof(L=0.9144, beta=0.000933, T1=178.2, rho=1.1613, mu=1.9E-5)\n 4656936556.178915\n >>> Grashof(L=0.9144, beta=0.000933, T1=378.2, T2=200, nu=1.636e-05)\n 4657491516.530312\n\n References\n ----------\n .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,\n Eighth Edition. McGraw-Hill Professional, 2007.\n .. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and\n Applications. Boston: McGraw Hill Higher Education, 2006." - }, - { - "code": "def invalidate(self, callback=True):\n with self._lock:\n if self.state != StoredInstance.VALID:\n return False\n self.state = StoredInstance.INVALID\n self.__safe_handlers_callback(\"pre_invalidate\")\n if callback:\n self.__safe_validation_callback(\n constants.IPOPO_CALLBACK_INVALIDATE\n )\n self._ipopo_service._fire_ipopo_event(\n constants.IPopoEvent.INVALIDATED,\n self.factory_name,\n self.name,\n )\n self.__safe_handlers_callback(\"post_invalidate\")\n return True", - "docstring": "Applies the component invalidation.\n\n :param callback: If True, call back the component before the\n invalidation\n :return: False if the component wasn't valid" - }, - { - "code": "def wb020(self, value=None):\n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `wb020`'.format(value))\n self._wb020 = value", - "docstring": "Corresponds to IDD Field `wb020`\n Wet-bulb temperature corresponding to 02.0% annual cumulative frequency of occurrence\n\n Args:\n value (float): value for IDD Field `wb020`\n Unit: C\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\n Raises:\n ValueError: if `value` is not a valid value" - }, - { - "code": "def to_td(frame, name, con, if_exists='fail', time_col=None, time_index=None, index=True, index_label=None, chunksize=10000, date_format=None):\n database, table = name.split('.')\n uploader = StreamingUploader(con.client, database, table, show_progress=True, clear_progress=True)\n uploader.message('Streaming import into: {0}.{1}'.format(database, table))\n if if_exists == 'fail':\n try:\n con.client.table(database, table)\n except tdclient.api.NotFoundError:\n uploader.message('creating new table...')\n con.client.create_log_table(database, table)\n else:\n raise RuntimeError('table \"%s\" already exists' % name)\n elif if_exists == 'replace':\n try:\n con.client.table(database, table)\n except tdclient.api.NotFoundError:\n pass\n else:\n uploader.message('deleting old table...')\n con.client.delete_table(database, table)\n uploader.message('creating new table...')\n con.client.create_log_table(database, table)\n elif if_exists == 'append':\n try:\n con.client.table(database, table)\n except tdclient.api.NotFoundError:\n uploader.message('creating new table...')\n con.client.create_log_table(database, table)\n else:\n raise ValueError('invalid value for if_exists: %s' % if_exists)\n if time_index:\n index = None\n frame = frame.copy()\n frame = _convert_time_column(frame, time_col, time_index)\n frame = _convert_index_column(frame, index, index_label)\n frame = _convert_date_format(frame, date_format)\n uploader.upload_frame(frame, chunksize)\n uploader.wait_for_import(len(frame))", - "docstring": "Write a DataFrame to a Treasure Data table.\n\n This method converts the dataframe into a series of key-value pairs\n and send them using the Treasure Data streaming API. The data is divided\n into chunks of rows (default 10,000) and uploaded separately. If upload\n failed, the client retries the process for a certain amount of time\n (max_cumul_retry_delay; default 600 secs). This method may fail and\n raise an exception when retries did not success, in which case the data\n may be partially inserted. Use the bulk import utility if you cannot\n accept partial inserts.\n\n Parameters\n ----------\n frame : DataFrame\n DataFrame to be written.\n name : string\n Name of table to be written, in the form 'database.table'.\n con : Connection\n Connection to a Treasure Data account.\n if_exists: {'fail', 'replace', 'append'}, default 'fail'\n - fail: If table exists, do nothing.\n - replace: If table exists, drop it, recreate it, and insert data.\n - append: If table exists, insert data. Create if does not exist.\n time_col : string, optional\n Column name to use as \"time\" column for the table. Column type must be\n integer (unixtime), datetime, or string. If None is given (default),\n then the current time is used as time values.\n time_index : int, optional\n Level of index to use as \"time\" column for the table. Set 0 for a single index.\n This parameter implies index=False.\n index : boolean, default True\n Write DataFrame index as a column.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and index is True,\n then the index names are used. A sequence should be given if the DataFrame uses\n MultiIndex.\n chunksize : int, default 10,000\n Number of rows to be inserted in each chunk from the dataframe.\n date_format : string, default None\n Format string for datetime objects" - }, - { - "code": "def evaluateCommand(self, cmd):\n for msg in self.flunkingIssues:\n if self.counts[msg] != 0:\n return FAILURE\n if self.getProperty('cppcheck-total') != 0:\n return WARNINGS\n return SUCCESS", - "docstring": "cppcheck always return 0, unless a special parameter is given" - }, - { - "code": "def shell_django(session: DjangoSession, backend: ShellBackend):\n namespace = {\n 'session': session\n }\n namespace.update(backend.get_namespace())\n embed(user_ns=namespace, header=backend.header)", - "docstring": "This command includes Django DB Session" - }, - { - "code": "def get_user_presence(self, userid):\n response, status_code = self.__pod__.Presence.get_v2_user_uid_presence(\n sessionToken=self.__session__,\n uid=userid\n ).result()\n self.logger.debug('%s: %s' % (status_code, response))\n return status_code, response", - "docstring": "check on presence of a user" - }, - { - "code": "def Uninitialize(\n self,\n Channel):\n try:\n res = self.__m_dllBasic.CAN_Uninitialize(Channel)\n return TPCANStatus(res)\n except:\n logger.error(\"Exception on PCANBasic.Uninitialize\")\n raise", - "docstring": "Uninitializes one or all PCAN Channels initialized by CAN_Initialize\n\n Remarks:\n Giving the TPCANHandle value \"PCAN_NONEBUS\", uninitialize all initialized channels\n\n Parameters:\n Channel : A TPCANHandle representing a PCAN Channel\n\n Returns:\n A TPCANStatus error code" - }, - { - "code": "def add_relation(app_f, app_t, weight=1):\n recs = TabRel.select().where(\n (TabRel.post_f_id == app_f) & (TabRel.post_t_id == app_t)\n )\n if recs.count() > 1:\n for record in recs:\n MRelation.delete(record.uid)\n if recs.count() == 0:\n uid = tools.get_uuid()\n entry = TabRel.create(\n uid=uid,\n post_f_id=app_f,\n post_t_id=app_t,\n count=1,\n )\n return entry.uid\n elif recs.count() == 1:\n MRelation.update_relation(app_f, app_t, weight)\n else:\n return False", - "docstring": "Adding relation between two posts." - }, - { - "code": "def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False):\n return aligner(reducer(normalizer(data, normalize=normalize, internal=internal),\n reduce=reduce, ndims=ndims, internal=internal), align=align)", - "docstring": "Wrapper function for normalize -> reduce -> align transformations.\n\n Parameters\n ----------\n data : numpy array, pandas df, or list of arrays/dfs\n The data to analyze\n\n normalize : str or False or None\n If set to 'across', the columns of the input data will be z-scored\n across lists (default). That is, the z-scores will be computed with\n with respect to column n across all arrays passed in the list. If set\n to 'within', the columns will be z-scored within each list that is\n passed. If set to 'row', each row of the input data will be z-scored.\n If set to False, the input data will be returned with no z-scoring.\n\n reduce : str or dict\n Decomposition/manifold learning model to use. Models supported: PCA,\n IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,\n FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,\n TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be\n passed as a string, but for finer control of the model parameters, pass\n as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.\n See scikit-learn specific model docs for details on parameters supported\n for each model.\n\n ndims : int\n Number of dimensions to reduce\n\n align : str or dict\n If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be\n hyperalignment. If 'SRM', alignment algorithm will be shared response\n model. You can also pass a dictionary for finer control, where the 'model'\n key is a string that specifies the model and the params key is a dictionary\n of parameter values (default : 'hyper').\n\n Returns\n ----------\n analyzed_data : list of numpy arrays\n The processed data" - }, - { - "code": "def init_token(self):\n app_filename = appid_file()\n token_filename = token_file()\n approle_filename = approle_file()\n token = None\n if 'VAULT_ROLE_ID' in os.environ and \\\n 'VAULT_SECRET_ID' in os.environ and \\\n os.environ['VAULT_ROLE_ID'] and os.environ['VAULT_SECRET_ID']:\n token = approle_token(self,\n os.environ['VAULT_ROLE_ID'],\n os.environ['VAULT_SECRET_ID'])\n LOG.debug(\"Token derived from VAULT_ROLE_ID and VAULT_SECRET_ID\")\n elif 'VAULT_TOKEN' in os.environ and os.environ['VAULT_TOKEN']:\n LOG.debug('Token derived from VAULT_TOKEN environment variable')\n token = os.environ['VAULT_TOKEN'].strip()\n elif 'VAULT_USER_ID' in os.environ and \\\n 'VAULT_APP_ID' in os.environ and \\\n os.environ['VAULT_USER_ID'] and os.environ['VAULT_APP_ID']:\n LOG.debug(\"Token derived from VAULT_APP_ID and VAULT_USER_ID\")\n token = app_token(self,\n os.environ['VAULT_APP_ID'].strip(),\n os.environ['VAULT_USER_ID'].strip())\n elif approle_filename:\n creds = yaml.safe_load(open(approle_filename).read().strip())\n if 'role_id' in creds and 'secret_id' in creds:\n LOG.debug(\"Token derived from approle file\")\n token = approle_token(self,\n creds['role_id'],\n creds['secret_id'])\n elif token_filename:\n LOG.debug(\"Token derived from %s\", token_filename)\n try:\n token = open(token_filename, 'r').read().strip()\n except IOError as os_exception:\n if os_exception.errno == 21:\n raise aomi.exceptions.AomiFile('Bad Vault token file')\n raise\n elif app_filename:\n token = yaml.safe_load(open(app_filename).read().strip())\n if 'app_id' in token and 'user_id' in token:\n LOG.debug(\"Token derived from %s\", app_filename)\n token = app_token(self,\n token['app_id'],\n token['user_id'])\n else:\n raise aomi.exceptions.AomiCredentials('unknown method')\n return token", - "docstring": "Generate our first token based on workstation configuration" - }, - { - "code": "def _context_names():\n import inspect\n from renku.models import provenance\n from renku.models._jsonld import JSONLDMixin\n for name in dir(provenance):\n cls = getattr(provenance, name)\n if inspect.isclass(cls) and issubclass(cls, JSONLDMixin):\n yield name", - "docstring": "Return list of valid context names." - }, - { - "code": "def update_grid(self):\n info_map = self.ms_game.get_info_map()\n for i in xrange(self.ms_game.board_height):\n for j in xrange(self.ms_game.board_width):\n self.grid_wgs[(i, j)].info_label(info_map[i, j])\n self.ctrl_wg.move_counter.display(self.ms_game.num_moves)\n if self.ms_game.game_status == 2:\n self.ctrl_wg.reset_button.setIcon(QtGui.QIcon(CONTINUE_PATH))\n elif self.ms_game.game_status == 1:\n self.ctrl_wg.reset_button.setIcon(QtGui.QIcon(WIN_PATH))\n self.timer.stop()\n elif self.ms_game.game_status == 0:\n self.ctrl_wg.reset_button.setIcon(QtGui.QIcon(LOSE_PATH))\n self.timer.stop()", - "docstring": "Update grid according to info map." - }, - { - "code": "def write_additional(self, productversion, channel):\n self.fileobj.seek(self.additional_offset)\n extras = extras_header.build(dict(\n count=1,\n sections=[dict(\n channel=six.u(channel),\n productversion=six.u(productversion),\n size=len(channel) + len(productversion) + 2 + 8,\n padding=b'',\n )],\n ))\n self.fileobj.write(extras)\n self.last_offset = self.fileobj.tell()", - "docstring": "Write the additional information to the MAR header.\n\n Args:\n productversion (str): product and version string\n channel (str): channel string" - }, - { - "code": "def subscribe(self, tag, fun, description=None):\n self.methods[tag] = fun\n self.descriptions[tag] = description\n self.socket.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, tag)", - "docstring": "Subscribe to something and register a function" - }, - { - "code": "def copy_without_prompts(self):\r\n text = self.get_selected_text()\r\n lines = text.split(os.linesep)\r\n for index, line in enumerate(lines):\r\n if line.startswith('>>> ') or line.startswith('... '):\r\n lines[index] = line[4:]\r\n text = os.linesep.join(lines)\r\n QApplication.clipboard().setText(text)", - "docstring": "Copy text to clipboard without prompts" - }, - { - "code": "def _get_base(**kwargs):\n profile = get_container_profile(copy.deepcopy(kwargs.get('profile')))\n kw_overrides = copy.deepcopy(kwargs)\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n template = select('template')\n image = select('image')\n vgname = select('vgname')\n path = kwargs.get('path', None)\n for param in ('path', 'image', 'vgname', 'template'):\n kwargs.pop(param, None)\n if image:\n proto = _urlparse(image).scheme\n img_tar = __salt__['cp.cache_file'](image)\n img_name = os.path.basename(img_tar)\n hash_ = salt.utils.hashutils.get_hash(\n img_tar,\n __salt__['config.get']('hash_type'))\n name = '__base_{0}_{1}_{2}'.format(proto, img_name, hash_)\n if not exists(name, path=path):\n create(name, template=template, image=image,\n path=path, vgname=vgname, **kwargs)\n if vgname:\n rootfs = os.path.join('/dev', vgname, name)\n edit_conf(info(name, path=path)['config'],\n out_format='commented', **{'lxc.rootfs': rootfs})\n return name\n elif template:\n name = '__base_{0}'.format(template)\n if not exists(name, path=path):\n create(name, template=template, image=image, path=path,\n vgname=vgname, **kwargs)\n if vgname:\n rootfs = os.path.join('/dev', vgname, name)\n edit_conf(info(name, path=path)['config'],\n out_format='commented', **{'lxc.rootfs': rootfs})\n return name\n return ''", - "docstring": "If the needed base does not exist, then create it, if it does exist\n create nothing and return the name of the base lxc container so\n it can be cloned." - }, - { - "code": "def find_all_paths(G, start, end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return [path]\r\n if start not in G.vertices:\r\n raise GraphInsertError(\"Vertex %s doesn't exist.\" % (start,))\r\n if end not in G.vertices:\r\n raise GraphInsertError(\"Vertex %s doesn't exist.\" % (end,))\r\n paths = []\r\n for vertex in G.vertices[start]:\r\n if vertex not in path:\r\n newpaths = find_all_paths(G, vertex, end, path)\r\n for newpath in newpaths:\r\n paths.append(newpath)\r\n return paths", - "docstring": "Find all paths between vertices start and end in graph." - }, - { - "code": "def __listeners_for_thread(self):\n thread = get_thread_ident()\n with self.lock:\n return [l for tid, l in self.listeners.items() if tid == thread]", - "docstring": "All Listeners for the current thread" - }, - { - "code": "def _get_tag(repo, name):\n try:\n return [x for x in _all_tags(repo) if x[0] == name][0]\n except IndexError:\n return False", - "docstring": "Find the requested tag in the specified repo" - }, - { - "code": "def DbGetDevicePropertyHist(self, argin):\n self._log.debug(\"In DbGetDevicePropertyHist()\")\n device_name = argin[0]\n prop_name = argin[1]\n return self.db.get_device_property_hist(device_name, prop_name)", - "docstring": "Retrieve device property history\n\n :param argin: Str[0] = Device name\n Str[1] = Property name\n :type: tango.DevVarStringArray\n :return: Str[0] = Property name\n Str[1] = date\n Str[2] = Property value number (array case)\n Str[3] = Property value 1\n Str[n] = Property value n\n :rtype: tango.DevVarStringArray" - }, - { - "code": "def write(filename, groupname, items, times, features, properties=None,\n dformat='dense', chunk_size='auto', sparsity=0.1, mode='a'):\n sparsity = sparsity if dformat == 'sparse' else None\n data = Data(items, times, features, properties=properties,\n sparsity=sparsity, check=True)\n Writer(filename, chunk_size=chunk_size).write(data, groupname, append=True)", - "docstring": "Write h5features data in a HDF5 file.\n\n This function is a wrapper to the Writer class. It has three purposes:\n\n * Check parameters for errors (see details below),\n * Create Items, Times and Features objects\n * Send them to the Writer.\n\n :param str filename: HDF5 file to be writted, potentially serving\n as a container for many small files. If the file does not\n exist, it is created. If the file is already a valid HDF5\n file, try to append the data in it.\n\n :param str groupname: Name of the group to write the data in, or\n to append the data to if the group already exists in the file.\n\n :param items: List of files from which the features where\n extracted. Items must not contain duplicates.\n :type items: list of str\n\n :param times: Time value for the features array. Elements of\n a 1D array are considered as the center of the time window\n associated with the features. A 2D array must have 2 columns\n corresponding to the begin and end timestamps of the features\n time window.\n :type times: list of 1D or 2D numpy arrays\n\n :param features: Features should have\n time along the lines and features along the columns\n (accomodating row-major storage in hdf5 files).\n :type features: list of 2D numpy arrays\n\n :param properties: Optional. Properties associated with each\n item. Properties describe the features associated with each\n item in a dictionnary. It can store parameters or fields\n recorded by the user.\n :type properties: list of dictionnaries\n\n :param str dformat: Optional. Which format to store the features\n into (sparse or dense). Default is dense.\n\n :param float chunk_size: Optional. In Mo, tuning parameter\n corresponding to the size of a chunk in the h5file. By default\n the chunk size is guessed automatically. Tis parameter is\n ignored if the file already exists.\n\n :param float sparsity: Optional. Tuning parameter corresponding to\n the expected proportion (in [0, 1]) of non-zeros elements on\n average in a single frame.\n\n :param char mode: Optional. The mode for overwriting an existing\n file, 'a' to append data to the file, 'w' to overwrite it\n\n :raise IOError: if the filename is not valid or parameters are\n inconsistent.\n\n :raise NotImplementedError: if dformat == 'sparse'" - }, - { - "code": "def set_value(self, value: ScalarType) -> None:\n if isinstance(value, bool):\n value_str = 'true' if value else 'false'\n else:\n value_str = str(value)\n start_mark = self.yaml_node.start_mark\n end_mark = self.yaml_node.end_mark\n tag = self.yaml_node.tag\n if tag.startswith('tag:yaml.org,2002:'):\n tag = scalar_type_to_tag[type(value)]\n new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark)\n self.yaml_node = new_node", - "docstring": "Sets the value of the node to a scalar value.\n\n After this, is_scalar(type(value)) will return true.\n\n Args:\n value: The value to set this node to, a str, int, float, \\\n bool, or None." - }, - { - "code": "def metarate(self, func, name='values'):\n setattr(func, name, self.values)\n return func", - "docstring": "Set the values object to the function object's namespace" - }, - { - "code": "def get(self, cycle_list, dataitem=None, isotope=None, sparse=1):\n return self.se.get(cycle_list,dataitem,isotope,sparse)", - "docstring": "Simple function that simply calls h5T.py get method. There\n are three ways to call this function.\n\n Parameters\n ----------\n cycle_list : string, list\n If cycle_list is a string, then get interpates the argument\n cycle_list as a dataitem and fetches the dataitem for all\n cycles.\n\n If cycle_list is a list, then get fetches the dataitem for\n the cycles in the list.\n dataitem : string, optional\n fetches the dataitem from the list of cycles. If dataitem\n is None, then cycle_list must be a string and will be used\n as dataitem. If dataitem is an isotope in the form 'H-2',\n it then returns the result of,\n\n >>> self.get(cycle_list,'iso_massf',dataitem)\n\n The default is None.\n isotope : string, optional\n The name of the isotope to fetch, it must be in the form\n 'H-2'. If isotope is None, then cycle_list or dataitem\n must be a string. The default is None.\n sparse : integer, optional\n Implements a sparsity factor on the fetched data. The\n default is 1.\n\n Notes\n -----\n Calling the get method directly in the form,\n\n >>> self.get(cycle_list,'iso_massf',dataitem)\n\n is depricated, and only included for compatibility." - }, - { - "code": "def description(self):\n return (\n self.name,\n self.type_code,\n None,\n self.get_column_length(),\n self.get_column_length(),\n self.scale,\n self.flags % 2 == 0)", - "docstring": "Provides a 7-item tuple compatible with the Python PEP249 DB Spec." - }, - { - "code": "def associate_devices(self, thing_names, config_file, region=None,\n profile_name=None):\n logging.info(\"associate_devices thing_names:{0}\".format(thing_names))\n config = GroupConfigFile(config_file=config_file)\n if region is None:\n region = self._region\n devices = config['devices']\n if type(thing_names) is str:\n thing_names = [thing_names]\n iot_client = _get_iot_session(region=region, profile_name=profile_name)\n for thing_name in thing_names:\n thing = iot_client.describe_thing(thingName=thing_name)\n logging.info(\"Found existing Thing:{0}\".format(thing))\n p = iot_client.list_thing_principals(thingName=thing_name)\n logging.info(\"Existing Thing has principals:{0}\".format(p))\n devices[thing_name] = {\n 'thing_arn': thing['attributes']['thingArn'],\n 'cert_arn': p['principals'][0],\n 'cert_id': thing['attributes']['certificateId'],\n 'thing_name': thing_name\n }\n logging.info(\"Thing:'{0}' associated with config:'{1}'\".format(\n thing_name, config_file))\n config['devices'] = devices", - "docstring": "Using the `thing_names` values, associate existing Things in AWS IoT\n with the config of another Greengrass Group for use as Greengrass\n Devices.\n\n :param thing_names: the thing name or list of thing names to associate\n as Greengrass Devices\n :param config_file: config file used to track the Greengrass Devices in\n the group\n :param region: the region in which to associate devices.\n [default: us-west-2]\n :param profile_name: the name of the `awscli` profile to use.\n [default: None]" - }, - { - "code": "def setedge(delta, is_multigraph, graph, orig, dest, idx, exists):\n if is_multigraph(graph):\n delta.setdefault(graph, {}).setdefault('edges', {})\\\n .setdefault(orig, {}).setdefault(dest, {})[idx] = bool(exists)\n else:\n delta.setdefault(graph, {}).setdefault('edges', {})\\\n .setdefault(orig, {})[dest] = bool(exists)", - "docstring": "Change a delta to say that an edge was created or deleted" - }, - { - "code": "def _remap_cortex_out(cortex_out, region, out_file):\n def _remap_vcf_line(line, contig, start):\n parts = line.split(\"\\t\")\n if parts[0] == \"\" or parts[1] == \"\":\n return None\n parts[0] = contig\n try:\n parts[1] = str(int(parts[1]) + start)\n except ValueError:\n raise ValueError(\"Problem in {0} with \\n{1}\".format(\n cortex_out, parts))\n return \"\\t\".join(parts)\n def _not_filtered(line):\n parts = line.split(\"\\t\")\n return parts[6] == \"PASS\"\n contig, start, _ = region\n start = int(start)\n with open(cortex_out) as in_handle:\n with open(out_file, \"w\") as out_handle:\n for line in in_handle:\n if line.startswith(\"\n pass\n elif line.startswith(\"\n out_handle.write(line)\n elif _not_filtered(line):\n update_line = _remap_vcf_line(line, contig, start)\n if update_line:\n out_handle.write(update_line)", - "docstring": "Remap coordinates in local cortex variant calls to the original global region." - }, - { - "code": "def convert_celeba_aligned_cropped(directory, output_directory,\n output_filename=OUTPUT_FILENAME):\n output_path = os.path.join(output_directory, output_filename)\n h5file = _initialize_conversion(directory, output_path, (218, 178))\n features_dataset = h5file['features']\n image_file_path = os.path.join(directory, IMAGE_FILE)\n with zipfile.ZipFile(image_file_path, 'r') as image_file:\n with progress_bar('images', NUM_EXAMPLES) as bar:\n for i in range(NUM_EXAMPLES):\n image_name = 'img_align_celeba/{:06d}.jpg'.format(i + 1)\n features_dataset[i] = numpy.asarray(\n Image.open(\n image_file.open(image_name, 'r'))).transpose(2, 0, 1)\n bar.update(i + 1)\n h5file.flush()\n h5file.close()\n return (output_path,)", - "docstring": "Converts the aligned and cropped CelebA dataset to HDF5.\n\n Converts the CelebA dataset to an HDF5 dataset compatible with\n :class:`fuel.datasets.CelebA`. The converted dataset is saved as\n 'celeba_aligned_cropped.hdf5'.\n\n It assumes the existence of the following files:\n\n * `img_align_celeba.zip`\n * `list_attr_celeba.txt`\n\n Parameters\n ----------\n directory : str\n Directory in which input files reside.\n output_directory : str\n Directory in which to save the converted dataset.\n output_filename : str, optional\n Name of the saved dataset. Defaults to\n 'celeba_aligned_cropped.hdf5'.\n\n Returns\n -------\n output_paths : tuple of str\n Single-element tuple containing the path to the converted\n dataset." - }, - { - "code": "def take(attributes, properties):\n assert is_iterable_typed(attributes, basestring)\n assert is_iterable_typed(properties, basestring)\n result = []\n for e in properties:\n if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):\n result.append(e)\n return result", - "docstring": "Returns a property set which include all\n properties in 'properties' that have any of 'attributes'." - }, - { - "code": "def render_js_code(self, id_, *args, **kwargs):\n if id_:\n options = self.render_select2_options_code(\n dict(self.get_options()), id_)\n return mark_safe(self.html.format(id=id_, options=options))\n return u''", - "docstring": "Render html container for Select2 widget with options." - }, - { - "code": "def list_vapps(kwargs=None, call=None):\n if call != 'function':\n raise SaltCloudSystemExit(\n 'The list_vapps function must be called with '\n '-f or --function.'\n )\n return {'vApps': salt.utils.vmware.list_vapps(_get_si())}", - "docstring": "List all the vApps for this VMware environment\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f list_vapps my-vmware-config" - }, - { - "code": "def setup_output(self, output_file, force=False, injection_file=None):\n checkpoint_file = output_file + '.checkpoint'\n backup_file = output_file + '.bkup'\n logging.info(\"Looking for checkpoint file\")\n checkpoint_valid = validate_checkpoint_files(checkpoint_file,\n backup_file)\n self.new_checkpoint = False\n if not checkpoint_valid:\n logging.info(\"Checkpoint not found or not valid\")\n create_new_output_file(self, checkpoint_file, force=force,\n injection_file=injection_file)\n self.new_checkpoint = True\n shutil.copy(checkpoint_file, backup_file)\n for fn in [checkpoint_file, backup_file]:\n with self.io(fn, \"a\") as fp:\n fp.write_command_line()\n fp.write_resume_point()\n self.checkpoint_file = checkpoint_file\n self.backup_file = backup_file\n self.checkpoint_valid = checkpoint_valid", - "docstring": "Sets up the sampler's checkpoint and output files.\n\n The checkpoint file has the same name as the output file, but with\n ``.checkpoint`` appended to the name. A backup file will also be\n created.\n\n If the output file already exists, an ``OSError`` will be raised.\n This can be overridden by setting ``force`` to ``True``.\n\n Parameters\n ----------\n sampler : sampler instance\n Sampler\n output_file : str\n Name of the output file.\n force : bool, optional\n If the output file already exists, overwrite it.\n injection_file : str, optional\n If an injection was added to the data, write its information." - }, - { - "code": "def update(self, validate=False):\n unfiltered_rs = self.connection.get_all_volumes([self.id])\n rs = [ x for x in unfiltered_rs if x.id == self.id ]\n if len(rs) > 0:\n self._update(rs[0])\n elif validate:\n raise ValueError('%s is not a valid Volume ID' % self.id)\n return self.status", - "docstring": "Update the data associated with this volume by querying EC2.\n\n :type validate: bool\n :param validate: By default, if EC2 returns no data about the\n volume the update method returns quietly. If\n the validate param is True, however, it will\n raise a ValueError exception if no data is\n returned from EC2." - }, - { - "code": "def _read_softgz(filename) -> AnnData:\n filename = str(filename)\n import gzip\n with gzip.open(filename, mode='rt') as file:\n samples_info = {}\n for line in file:\n if line.startswith(\"!dataset_table_begin\"):\n break\n elif line.startswith(\"!subset_description\"):\n subset_description = line.split(\"=\")[1].strip()\n elif line.startswith(\"!subset_sample_id\"):\n subset_ids = line.split(\"=\")[1].split(\",\")\n subset_ids = [x.strip() for x in subset_ids]\n for k in subset_ids:\n samples_info[k] = subset_description\n sample_names = file.readline().strip().split(\"\\t\")\n I = [i for i, x in enumerate(sample_names) if x.startswith(\"GSM\")]\n sample_names = [sample_names[i] for i in I]\n groups = [samples_info[k] for k in sample_names]\n gene_names, X = [], []\n for line in file:\n if line.startswith(\"!dataset_table_end\"):\n break\n V = line.split(\"\\t\")\n x = [float(V[i]) for i in I]\n X.append(x)\n gene_names.append(V[1])\n X = np.array(X).T\n obs = pd.DataFrame({\"groups\": groups}, index=sample_names)\n var = pd.DataFrame(index=gene_names)\n return AnnData(X=X, obs=obs, var=var)", - "docstring": "Read a SOFT format data file.\n\n The SOFT format is documented here\n http://www.ncbi.nlm.nih.gov/geo/info/soft2.html.\n\n Notes\n -----\n The function is based on a script by Kerby Shedden.\n http://dept.stat.lsa.umich.edu/~kshedden/Python-Workshop/gene_expression_comparison.html" - }, - { - "code": "def deletesshkey(self, key_id):\n request = requests.delete(\n '{0}/{1}'.format(self.keys_url, key_id), headers=self.headers,\n verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)\n if request.content == b'null':\n return False\n else:\n return True", - "docstring": "Deletes an sshkey for the current user identified by id\n\n :param key_id: the id of the key\n :return: False if it didn't delete it, True if it was deleted" - }, - { - "code": "def setmaxage(self, time):\n _runshell([brctlexe, 'setmaxage', self.name, str(time)],\n \"Could not set max message age in %s.\" % self.name)", - "docstring": "Set bridge max message age time." - }, - { - "code": "def _process_has_edge_degree_filter_directive(filter_operation_info, location, context, parameters):\n if isinstance(filter_operation_info.field_ast, InlineFragment):\n raise AssertionError(u'Received InlineFragment AST node in \"has_edge_degree\" filter '\n u'handler. This should have been caught earlier: '\n u'{}'.format(filter_operation_info.field_ast))\n filtered_field_name = filter_operation_info.field_name\n if filtered_field_name is None or not is_vertex_field_name(filtered_field_name):\n raise AssertionError(u'Invalid value for \"filtered_field_name\" in \"has_edge_degree\" '\n u'filter: {}'.format(filtered_field_name))\n if not is_vertex_field_type(filter_operation_info.field_type):\n raise AssertionError(u'Invalid value for \"filter_operation_info.field_type\" in '\n u'\"has_edge_degree\" filter: {}'.format(filter_operation_info))\n argument = parameters[0]\n if not is_variable_argument(argument):\n raise GraphQLCompilationError(u'The \"has_edge_degree\" filter only supports runtime '\n u'variable arguments. Tagged values are not supported.'\n u'Argument name: {}'.format(argument))\n argument_inferred_type = GraphQLInt\n argument_expression, non_existence_expression = _represent_argument(\n location, context, argument, argument_inferred_type)\n if non_existence_expression is not None:\n raise AssertionError(u'Since we do not support tagged values, non_existence_expression '\n u'should have been None. However, it was: '\n u'{}'.format(non_existence_expression))\n argument_is_zero = expressions.BinaryComposition(\n u'=', argument_expression, expressions.ZeroLiteral)\n edge_field_is_null = expressions.BinaryComposition(\n u'=', expressions.LocalField(filtered_field_name), expressions.NullLiteral)\n edge_degree_is_zero = expressions.BinaryComposition(\n u'&&', argument_is_zero, edge_field_is_null)\n edge_field_is_not_null = expressions.BinaryComposition(\n u'!=', expressions.LocalField(filtered_field_name), expressions.NullLiteral)\n edge_degree = expressions.UnaryTransformation(\n u'size', expressions.LocalField(filtered_field_name))\n edge_degree_matches_argument = expressions.BinaryComposition(\n u'=', edge_degree, argument_expression)\n edge_degree_is_non_zero = expressions.BinaryComposition(\n u'&&', edge_field_is_not_null, edge_degree_matches_argument)\n filter_predicate = expressions.BinaryComposition(\n u'||', edge_degree_is_zero, edge_degree_is_non_zero)\n return blocks.Filter(filter_predicate)", - "docstring": "Return a Filter basic block that checks the degree of the edge to the given vertex field.\n\n Args:\n filter_operation_info: FilterOperationInfo object, containing the directive and field info\n of the field where the filter is to be applied.\n location: Location where this filter is used.\n context: dict, various per-compilation data (e.g. declared tags, whether the current block\n is optional, etc.). May be mutated in-place in this function!\n parameters: list of 1 element, containing the value to check the edge degree against;\n if the parameter is optional and missing, the check will return True\n\n Returns:\n a Filter basic block that performs the check" - }, - { - "code": "def readResources(self, elem):\n try:\n iterator = getattr(elem, 'iter')\n except AttributeError:\n iterator = getattr(elem, 'getiterator')\n for include in iterator(\"include\"):\n loc = include.attrib.get(\"location\")\n if loc and loc.endswith('.qrc'):\n mname = os.path.basename(loc[:-4] + self._resource_suffix)\n if mname not in self.resources:\n self.resources.append(mname)", - "docstring": "Read a \"resources\" tag and add the module to import to the parser's\n list of them." - }, - { - "code": "def dtype_for(t):\n if t in dtype_dict:\n return dtype_dict[t]\n return np.typeDict.get(t, t)", - "docstring": "return my dtype mapping, whether number or name" - }, - { - "code": "def get_int(self, key: str) -> Optional[int]:\n v = self.get(key)\n if v is None:\n return None\n try:\n return int(v)\n except:\n raise ConfigTypeError(self.full_key(key), v, 'int')", - "docstring": "Returns an optional configuration value, as an int, by its key, or None if it doesn't exist.\n If the configuration value isn't a legal int, this function will throw an error.\n\n :param str key: The requested configuration key.\n :return: The configuration key's value, or None if one does not exist.\n :rtype: Optional[int]\n :raises ConfigTypeError: The configuration value existed but couldn't be coerced to int." - }, - { - "code": "def fix_tree(cls, destructive=False):\n cls = get_result_class(cls)\n vendor = cls.get_database_vendor('write')\n if destructive:\n dump = cls.dump_bulk(None, True)\n cls.objects.all().delete()\n cls.load_bulk(dump, None, True)\n else:\n cursor = cls._get_database_cursor('write')\n sql = (\n \"UPDATE %s \"\n \"SET depth=\" + sql_length(\"path\", vendor=vendor) + \"/%%s \"\n \"WHERE depth!=\" + sql_length(\"path\", vendor=vendor) + \"/%%s\"\n ) % (connection.ops.quote_name(cls._meta.db_table), )\n vals = [cls.steplen, cls.steplen]\n cursor.execute(sql, vals)\n vals = ['_' * cls.steplen]\n if cls.get_database_vendor('read') == 'mysql':\n sql = (\n \"SELECT tbn1.path, tbn1.numchild, (\"\n \"SELECT COUNT(1) \"\n \"FROM %(table)s AS tbn2 \"\n \"WHERE tbn2.path LIKE \" +\n sql_concat(\"tbn1.path\", \"%%s\", vendor=vendor) + \") AS real_numchild \"\n \"FROM %(table)s AS tbn1 \"\n \"HAVING tbn1.numchild != real_numchild\"\n ) % {'table': connection.ops.quote_name(cls._meta.db_table)}\n else:\n subquery = \"(SELECT COUNT(1) FROM %(table)s AS tbn2\"\\\n \" WHERE tbn2.path LIKE \" + sql_concat(\"tbn1.path\", \"%%s\", vendor=vendor) + \")\"\n sql = (\"SELECT tbn1.path, tbn1.numchild, \" + subquery +\n \" FROM %(table)s AS tbn1 WHERE tbn1.numchild != \" +\n subquery)\n sql = sql % {\n 'table': connection.ops.quote_name(cls._meta.db_table)}\n vals *= 2\n cursor.execute(sql, vals)\n sql = \"UPDATE %(table)s \"\\\n \"SET numchild=%%s \"\\\n \"WHERE path=%%s\" % {\n 'table': connection.ops.quote_name(cls._meta.db_table)}\n for node_data in cursor.fetchall():\n vals = [node_data[2], node_data[0]]\n cursor.execute(sql, vals)", - "docstring": "Solves some problems that can appear when transactions are not used and\n a piece of code breaks, leaving the tree in an inconsistent state.\n\n The problems this method solves are:\n\n 1. Nodes with an incorrect ``depth`` or ``numchild`` values due to\n incorrect code and lack of database transactions.\n 2. \"Holes\" in the tree. This is normal if you move/delete nodes a\n lot. Holes in a tree don't affect performance,\n 3. Incorrect ordering of nodes when ``node_order_by`` is enabled.\n Ordering is enforced on *node insertion*, so if an attribute in\n ``node_order_by`` is modified after the node is inserted, the\n tree ordering will be inconsistent.\n\n :param destructive:\n\n A boolean value. If True, a more agressive fix_tree method will be\n attempted. If False (the default), it will use a safe (and fast!)\n fix approach, but it will only solve the ``depth`` and\n ``numchild`` nodes, it won't fix the tree holes or broken path\n ordering.\n\n .. warning::\n\n Currently what the ``destructive`` method does is:\n\n 1. Backup the tree with :meth:`dump_data`\n 2. Remove all nodes in the tree.\n 3. Restore the tree with :meth:`load_data`\n\n So, even when the primary keys of your nodes will be preserved,\n this method isn't foreign-key friendly. That needs complex\n in-place tree reordering, not available at the moment (hint:\n patches are welcome)." - }, - { - "code": "def index(req, res):\n number = req.session.get('counter', -1)\n req.session['counter'] = int(number) + 1\n print(\" -- Session '{id}' returned {counter} times\".format(**req.session))\n msg = \"Hello!! You've been here [[%s]] times\" % (req.session['counter'])\n res.send_text(msg)\n req.session.save()", - "docstring": "Return root page of website." - }, - { - "code": "def is_action(task):\n result = False\n if _extract_from_env_in_payload(task, 'ACTION_CALLBACK'):\n result = True\n if task.get('extra', {}).get('action') is not None:\n result = True\n return result", - "docstring": "Determine if a task is an action task.\n\n Trusted decision and action tasks are important in that they can generate\n other valid tasks. The verification of decision and action tasks is slightly\n different, so we need to be able to tell them apart.\n\n This checks for the following things::\n\n * ``task.payload.env.ACTION_CALLBACK`` exists\n * ``task.extra.action`` exists\n\n Args:\n task (dict): the task definition to check\n\n Returns:\n bool: True if it's an action" - }, - { - "code": "def get_requirements() -> List[str]:\n requirements_path = os.path.join(\n os.path.dirname(__file__), 'requirements.txt'\n )\n with open(requirements_path) as f:\n return f.read().split()", - "docstring": "Return the requirements as a list of string." - }, - { - "code": "def update(self, message):\n if isinstance(message, ExpanderMessage):\n zone = -1\n if message.type == ExpanderMessage.ZONE:\n zone = self.expander_to_zone(message.address, message.channel, self.alarmdecoder_object.mode)\n if zone != -1:\n status = Zone.CLEAR\n if message.value == 1:\n status = Zone.FAULT\n elif message.value == 2:\n status = Zone.CHECK\n try:\n self._update_zone(zone, status=status)\n except IndexError:\n self._add_zone(zone, status=status, expander=True)\n else:\n if message.ready and not message.text.startswith(\"SYSTEM\"):\n for zone in self._zones_faulted:\n self._update_zone(zone, Zone.CLEAR)\n self._last_zone_fault = 0\n elif self.alarmdecoder_object.mode != DSC and (message.check_zone or message.text.startswith(\"FAULT\") or message.text.startswith(\"ALARM\")):\n zone = message.parse_numeric_code()\n if zone == 191:\n zone_regex = re.compile('^CHECK (\\d+).*$')\n match = zone_regex.match(message.text)\n if match is None:\n return\n zone = match.group(1)\n if zone in self._zones_faulted:\n self._update_zone(zone)\n self._clear_zones(zone)\n else:\n status = Zone.FAULT\n if message.check_zone:\n status = Zone.CHECK\n self._add_zone(zone, status=status)\n self._zones_faulted.append(zone)\n self._zones_faulted.sort()\n self._last_zone_fault = zone\n self._clear_expired_zones()", - "docstring": "Update zone statuses based on the current message.\n\n :param message: message to use to update the zone tracking\n :type message: :py:class:`~alarmdecoder.messages.Message` or :py:class:`~alarmdecoder.messages.ExpanderMessage`" - }, - { - "code": "def confirm(prompt):\n while True:\n try:\n answer = input(prompt)\n except KeyboardInterrupt:\n return False\n answer = answer.strip().lower()\n if answer not in ('y', 'n'):\n print('Please enter y or n')\n continue\n return answer == 'y'", - "docstring": "Ask for confirmation, given a ``prompt`` and return a boolean value." - }, - { - "code": "def _backup_compresslevel(self, dirs):\n with ZipFile(self.zip_filename, 'w', compresslevel=self.compress_level) as backup_zip:\n for path in tqdm(dirs, desc='Writing Zip Files', total=len(dirs)):\n backup_zip.write(path, path[len(self.source):len(path)])", - "docstring": "Create a backup file with a compresslevel parameter." - }, - { - "code": "def requires_auth(func):\n @six.wraps(func)\n def wrapper(self, *args, **kwargs):\n if self.token_expired:\n self.authenticate()\n return func(self, *args, **kwargs)\n return wrapper", - "docstring": "Handle authentication checks.\n\n .. py:decorator:: requires_auth\n\n Checks if the token has expired and performs authentication if needed." - }, - { - "code": "def get_favorites(self):\n url = self._imgur._base_url + \"/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self._imgur) for thing in resp]", - "docstring": "Return the users favorited images." - }, - { - "code": "def gen_nop():\n empty_reg = ReilEmptyOperand()\n return ReilBuilder.build(ReilMnemonic.NOP, empty_reg, empty_reg, empty_reg)", - "docstring": "Return a NOP instruction." - }, - { - "code": "def ctrl_x(self, x, to=None):\n seq = [Keys.CONTROL, x, Keys.CONTROL]\n if (self.firefox and self.windows) or (self.linux and self.chrome):\n seq.append(Keys.PAUSE)\n if to is None:\n ActionChains(self.driver) \\\n .send_keys(seq) \\\n .perform()\n else:\n self.send_keys(to, seq)", - "docstring": "Sends a character to the currently active element with Ctrl\n pressed. This method takes care of pressing and releasing\n Ctrl." - }, - { - "code": "def discover(self):\n response = {}\n masters = {}\n self.log.info(\"Looking for a server discovery\")\n self._query()\n self._collect_masters_map(response)\n if not response:\n msg = 'No master has been discovered.'\n self.log.info(msg)\n else:\n for addr, descriptions in response.items():\n for data in descriptions:\n msg = salt.utils.stringutils.to_unicode(data)\n if msg.startswith(self.signature):\n msg = msg.split(self.signature)[-1]\n self.log.debug(\n \"Service announcement at '%s:%s'. Response: '%s'\",\n addr[0], addr[1], msg\n )\n if ':E:' in msg:\n err = msg.split(':E:')[-1]\n self.log.error(\n 'Error response from the service publisher at %s: %s',\n addr, err\n )\n if \"timestamp\" in err:\n self.log.error('Publisher sent shifted timestamp from %s', addr)\n else:\n if addr not in masters:\n masters[addr] = []\n masters[addr].append(\n salt.utils.json.loads(msg.split(':@:')[-1], _json_module=_json)\n )\n return masters", - "docstring": "Gather the information of currently declared servers.\n\n :return:" - }, - { - "code": "def is_job_done(job_id, conn=None):\n result = False\n get_done = RBJ.get_all(DONE, index=STATUS_FIELD)\n for item in get_done.filter({ID_FIELD: job_id}).run(conn):\n result = item\n return result", - "docstring": "is_job_done function checks to if Brain.Jobs Status is 'Done'\n\n :param job_id: id for the job\n :param conn: (optional) to run on\n :return: if job is done if" - }, - { - "code": "def key(self, key, strictkey=None):\n return self._select(self._pointer.key(key, strictkey))", - "docstring": "Return a chunk referencing a key in a mapping with the name 'key'." - }, - { - "code": "def _validate_states(states, topology):\n states = states or []\n if isinstance(states, dict):\n for x in states:\n assert x in topology.node\n else:\n assert len(states) <= len(topology)\n return states", - "docstring": "Validate states to avoid ignoring states during initialization" - }, - { - "code": "def complain(error):\n if callable(error):\n if DEVELOP:\n raise error()\n elif DEVELOP:\n raise error\n else:\n logger.warn_err(error)", - "docstring": "Raises in develop; warns in release." - }, - { - "code": "def do_quit(self, arg):\n if self.saved:\n self.save()\n else:\n self.parser.clean()\n return True", - "docstring": "Quits the program." - }, - { - "code": "def format_rst(self):\n res = ''\n num_cols = len(self.header)\n col_width = 25\n for _ in range(num_cols):\n res += ''.join(['=' for _ in range(col_width - 1)]) + ' ' \n res += '\\n'\n for c in self.header:\n res += c.ljust(col_width) \n res += '\\n'\n for _ in range(num_cols):\n res += ''.join(['=' for _ in range(col_width - 1)]) + ' ' \n res += '\\n'\n for row in self.arr:\n for c in row:\n res += self.force_to_string(c).ljust(col_width)\n res += '\\n' \n for _ in range(num_cols):\n res += ''.join(['=' for _ in range(col_width - 1)]) + ' ' \n res += '\\n'\n return res", - "docstring": "return table in RST format" - }, - { - "code": "def internet_gateway_absent(name, detach=False, region=None,\n key=None, keyid=None, profile=None):\n ret = {'name': name,\n 'result': True,\n 'comment': '',\n 'changes': {}\n }\n r = __salt__['boto_vpc.get_resource_id']('internet_gateway', name=name,\n region=region, key=key,\n keyid=keyid, profile=profile)\n if 'error' in r:\n ret['result'] = False\n ret['comment'] = 'Failed to delete internet gateway: {0}.'.format(r['error']['message'])\n return ret\n igw_id = r['id']\n if not igw_id:\n ret['comment'] = 'Internet gateway {0} does not exist.'.format(name)\n return ret\n if __opts__['test']:\n ret['comment'] = 'Internet gateway {0} is set to be removed.'.format(name)\n ret['result'] = None\n return ret\n r = __salt__['boto_vpc.delete_internet_gateway'](internet_gateway_name=name,\n detach=detach, region=region,\n key=key, keyid=keyid,\n profile=profile)\n if not r.get('deleted'):\n ret['result'] = False\n ret['comment'] = 'Failed to delete internet gateway: {0}.'.format(r['error']['message'])\n return ret\n ret['changes']['old'] = {'internet_gateway': igw_id}\n ret['changes']['new'] = {'internet_gateway': None}\n ret['comment'] = 'Internet gateway {0} deleted.'.format(name)\n return ret", - "docstring": "Ensure the named internet gateway is absent.\n\n name\n Name of the internet gateway.\n\n detach\n First detach the internet gateway from a VPC, if attached.\n\n region\n Region to connect to.\n\n key\n Secret key to be used.\n\n keyid\n Access key to be used.\n\n profile\n A dict with region, key and keyid, or a pillar key (string) that\n contains a dict with region, key and keyid." - }, - { - "code": "def AddFXrefWrite(self, method, classobj, field):\n if field not in self._fields:\n self._fields[field] = FieldClassAnalysis(field)\n self._fields[field].AddXrefWrite(classobj, method)", - "docstring": "Add a Field Write to this class\n\n :param method:\n :param classobj:\n :param field:\n :return:" - }, - { - "code": "def format(self, info_dict, delimiter='/'):\n def dfs(father, path, acc):\n if isinstance(father, list):\n for child in father:\n dfs(child, path, acc)\n elif isinstance(father, collections.Mapping):\n for child in sorted(father.items(), key=itemgetter(0)), :\n dfs(child, path, acc)\n elif isinstance(father, tuple):\n path = copy.copy(path)\n path.append(father[0])\n dfs(father[1], path, acc)\n else:\n path[-1] = '{}: {}'.format(path[-1], str(father))\n acc.append(delimiter.join(path))\n result = []\n dfs(info_dict.get('Prefix') or info_dict, [], result)\n return '\\n'.join(result)", - "docstring": "This formatter will take a data structure that\n represent a tree and will print all the paths\n from the root to the leaves\n\n in our case it will print each value and the keys\n that needed to get to it, for example:\n\n vm0:\n net: lago\n memory: 1024\n\n will be output as:\n\n vm0/net/lago\n vm0/memory/1024\n\n Args:\n info_dict (dict): information to reformat\n delimiter (str): a delimiter for the path components\n Returns:\n str: String representing the formatted info" - }, - { - "code": "def config(name,\n config,\n write=True):\n _build_config_tree(name, config)\n configs = _render_configuration()\n if __opts__.get('test', False):\n comment = 'State syslog_ng will write \\'{0}\\' into {1}'.format(\n configs,\n __SYSLOG_NG_CONFIG_FILE\n )\n return _format_state_result(name, result=None, comment=comment)\n succ = write\n if write:\n succ = _write_config(config=configs)\n return _format_state_result(name, result=succ,\n changes={'new': configs, 'old': ''})", - "docstring": "Builds syslog-ng configuration. This function is intended to be used from\n the state module, users should not use it directly!\n\n name : the id of the Salt document or it is the format of .id\n config : the parsed YAML code\n write : if True, it writes the config into the configuration file,\n otherwise just returns it\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' syslog_ng.config name='s_local' config=\"[{'tcp':[{'ip':'127.0.0.1'},{'port':1233}]}]\"" - }, - { - "code": "def _contains_blinded_text(stats_xml):\n tree = ET.parse(stats_xml)\n root = tree.getroot()\n total_tokens = int(root.find('size/total/tokens').text)\n unique_lemmas = int(root.find('lemmas').get('unique'))\n return (unique_lemmas / total_tokens) < 0.01", - "docstring": "Heuristic to determine whether the treebank has blinded texts or not" - }, - { - "code": "def _default_pad(self, data_type, numElems):\n order = self._convert_option()\n if (data_type == 1) or (data_type == 41):\n pad_value = struct.pack(order+'b', -127)\n elif data_type == 2:\n pad_value = struct.pack(order+'h', -32767)\n elif data_type == 4:\n pad_value = struct.pack(order+'i', -2147483647)\n elif (data_type == 8) or (data_type == 33):\n pad_value = struct.pack(order+'q', -9223372036854775807)\n elif data_type == 11:\n pad_value = struct.pack(order+'B', 254)\n elif data_type == 12:\n pad_value = struct.pack(order+'H', 65534)\n elif data_type == 14:\n pad_value = struct.pack(order+'I', 4294967294)\n elif (data_type == 21) or (data_type == 44):\n pad_value = struct.pack(order+'f', -1.0E30)\n elif (data_type == 22) or (data_type == 45):\n pad_value = struct.pack(order+'d', -1.0E30)\n elif (data_type == 31):\n pad_value = struct.pack(order+'d', 0.0)\n elif (data_type == 32):\n pad_value = struct.pack(order+'2d', *[0.0, 0.0])\n elif (data_type == 51) or (data_type == 52):\n tmpPad = str(' '*numElems).encode()\n form = str(numElems)\n pad_value = struct.pack(form+'b', *tmpPad)\n return pad_value", - "docstring": "Determines the default pad data for a \"data_type\"" - }, - { - "code": "def get_current_session(request, hproPk):\n retour = {}\n base_key = 'plugit_' + str(hproPk) + '_'\n for key, value in request.session.iteritems():\n if key.startswith(base_key):\n retour[key[len(base_key):]] = value\n return retour", - "docstring": "Get the current session value" - }, - { - "code": "def _copy_with_dtype(data, dtype):\n result = np.empty(data.shape, dtype)\n result[...] = data\n return result", - "docstring": "Create a copy of an array with the given dtype.\n\n We use this instead of np.array() to ensure that custom object dtypes end\n up on the resulting array." - }, - { - "code": "def wait_running(self, timeout=None):\n ioloop = getattr(self, 'ioloop', None)\n if not ioloop:\n raise RuntimeError('Call start() before wait_running()')\n return self._running.wait_with_ioloop(ioloop, timeout)", - "docstring": "Wait until the client is running.\n\n Parameters\n ----------\n timeout : float in seconds\n Seconds to wait for the client to start running.\n\n Returns\n -------\n running : bool\n Whether the client is running\n\n Notes\n -----\n Do not call this from the ioloop, use until_running()." - }, - { - "code": "def get_field_mapping(self, using=None, **kwargs):\n return self._get_connection(using).indices.get_field_mapping(index=self._name, **kwargs)", - "docstring": "Retrieve mapping definition of a specific field.\n\n Any additional keyword arguments will be passed to\n ``Elasticsearch.indices.get_field_mapping`` unchanged." - }, - { - "code": "def has_user(self, username):\n return self.model.objects.filter(\n **self._filter_user_by(username)\n ).exists()", - "docstring": "return True if exists user." - }, - { - "code": "def excludeSNPs(inPrefix, outPrefix, exclusionFileName):\n plinkCommand = [\"plink\", \"--noweb\", \"--bfile\", inPrefix, \"--exclude\",\n exclusionFileName, \"--make-bed\", \"--out\", outPrefix]\n runCommand(plinkCommand)", - "docstring": "Exclude some SNPs using Plink.\n\n :param inPrefix: the prefix of the input file.\n :param outPrefix: the prefix of the output file.\n :param exclusionFileName: the name of the file containing the markers to be\n excluded.\n\n :type inPrefix: str\n :type outPrefix: str\n :type exclusionFileName: str\n\n Using Plink, exclude a list of markers from ``inPrefix``, and saves the\n results in ``outPrefix``. The list of markers are in ``exclusionFileName``." - }, - { - "code": "def _create_default_tiles(self):\n for value, background, text in self.DEFAULT_TILES:\n self.tiles[value] = self._make_tile(value, background, text)", - "docstring": "Create all default tiles, as defined above." - }, - { - "code": "def count(self, weighted=True):\n return self._measures.weighted_n if weighted else self._measures.unweighted_n", - "docstring": "Return numberic count of rows considered for cube response." - }, - { - "code": "def show_qouts(self, nids=None, stream=sys.stdout):\n lines = []\n for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):\n header = \"=== \" + task.qout_file.path + \"===\"\n lines.append(header)\n if task.qout_file.exists:\n with open(task.qout_file.path, \"rt\") as fh:\n lines += fh.readlines()\n else:\n lines.append(\"File does not exist!\")\n lines.append(\"=\" * len(header) + 2*\"\\n\")\n return stream.writelines(lines)", - "docstring": "Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.\n\n Args:\n nids: optional list of node identifiers used to filter the tasks.\n stream: File-like object. Default: sys.stdout" - }, - { - "code": "def validate_model(path):\n notifications = {\"warnings\": [], \"errors\": []}\n model, sbml_ver = val.load_cobra_model(path, notifications)\n return model, sbml_ver, notifications", - "docstring": "Validate a model structurally and optionally store results as JSON.\n\n Parameters\n ----------\n path :\n Path to model file.\n\n Returns\n -------\n tuple\n cobra.Model\n The metabolic model under investigation.\n tuple\n A tuple reporting on the SBML level, version, and FBC package\n version used (if any) in the SBML document.\n dict\n A simple dictionary containing a list of errors and warnings." - }, - { - "code": "def get_neighbor_names(self, node_name: str, order: int = 1) -> list:\n logger.info(\"In get_neighbor_names()\")\n node = self.graph.vs.find(name=node_name)\n neighbors = self.graph.neighborhood(node, order=order)\n names = self.graph.vs[neighbors][\"name\"]\n names.append(node_name)\n return list(names)", - "docstring": "Get the names of all neighbors of a node, and the node itself.\n\n :param node_name: Node whose neighbor names are requested.\n :return: A list of names of all neighbors of a node, and the node itself." - }, - { - "code": "def _caches_dicts(self):\n qs = (self.get_query_set() if django.VERSION < (1, 6)\n else self.get_queryset())\n variants_dict = self._get_variants_dict(qs)\n cache.set(VARIANTS_DICT_CACHE_KEY, variants_dict)\n replace_dict = self._get_replace_dict(qs)\n cache.set(REPLACE_DICT_CACHE_KEY, replace_dict)\n return variants_dict, replace_dict", - "docstring": "Caches variants_dict and replace_dict in a single database hit." - }, - { - "code": "def cell_sides_angstrom(self):\n return np.asarray(\n self.header['cella']['value'], dtype=float) / self.data_shape", - "docstring": "Array of sizes of a unit cell in Angstroms.\n\n The value is determined from the ``'cella'`` entry in `header`." - }, - { - "code": "def _delete(self, tree):\n tablename = tree.table\n table = self.describe(tablename, require=True)\n kwargs = {}\n visitor = Visitor(self.reserved_words)\n if tree.where:\n constraints = ConstraintExpression.from_where(tree.where)\n kwargs[\"condition\"] = constraints.build(visitor)\n kwargs[\"expr_values\"] = visitor.expression_values\n kwargs[\"alias\"] = visitor.attribute_names\n return self._query_and_op(tree, table, \"delete_item\", kwargs)", - "docstring": "Run a DELETE statement" - }, - { - "code": "def alias_authorization(self, authorization_id, alias_id):\n self._alias_id(primary_id=authorization_id, equivalent_id=alias_id)", - "docstring": "Adds an ``Id`` to an ``Authorization`` for the purpose of creating compatibility.\n\n The primary ``Id`` of the ``Authorization`` is determined by the\n provider. The new ``Id`` performs as an alias to the primary\n ``Id``. If the alias is a pointer to another authorization. it\n is reassigned to the given authorization ``Id``.\n\n arg: authorization_id (osid.id.Id): the ``Id`` of an\n ``Authorization``\n arg: alias_id (osid.id.Id): the alias ``Id``\n raise: AlreadyExists - ``alias_id`` is already assigned\n raise: NotFound - ``authorization_id`` not found\n raise: NullArgument - ``authorization_id`` or ``alias_id`` is\n ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def bind_channel(self, channel, callback):\n if channel in self.CHANNELS:\n warning = (\n \"The event name '{}' will soon be deprecated. Use \"\n \"the real event name '{}' instead.\"\n ).format(channel, self.CHANNELS[channel])\n self.handle_error(warning)\n channel = self.CHANNELS[channel]\n if channel == self.EVENT_NAME_ERROR:\n self.error_callback = callback\n elif channel == self.EVENT_NAME_RESULTS:\n self.socketIO.on(channel, partial(self.unpack_results, callback))\n else:\n self.socketIO.on(channel, callback)", - "docstring": "Bind given channel with the given callback" - }, - { - "code": "def rsync_git(local_path, remote_path, exclude=None, extra_opts=None,\n version_file='version.txt'):\n with settings(hide('output', 'running'), warn_only=True):\n print(green('Version On Server: ' + run('cat ' + '{}/{}'.format(\n remote_path, version_file)).strip()))\n print(green('Now Deploying Version ' +\n write_version(join(local_path, version_file))))\n rsync(local_path, remote_path, exclude, extra_opts)", - "docstring": "Rsync deploy a git repo. Write and compare version.txt" - }, - { - "code": "def get_requirements(opts):\n if opts.dev:\n name = 'requirements_dev.txt'\n elif opts.doc:\n name = 'requirements_doc.txt'\n else:\n name = 'requirements.txt'\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('\n return install_requires", - "docstring": "Get the proper requirements file based on the optional argument" - }, - { - "code": "def register_entity(self, entity_config):\n if not issubclass(entity_config, EntityConfig):\n raise ValueError('Must register entity config class of subclass EntityConfig')\n if entity_config.queryset is None:\n raise ValueError('Entity config must define queryset')\n model = entity_config.queryset.model\n self._entity_registry[model] = entity_config()\n for watching_model, entity_model_getter in entity_config.watching:\n self._entity_watching[watching_model].append((model, entity_model_getter))", - "docstring": "Registers an entity config" - }, - { - "code": "def task_id_str(task_family, params):\n param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)\n param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()\n param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]\n for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))\n param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)\n return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])", - "docstring": "Returns a canonical string used to identify a particular task\n\n :param task_family: The task family (class name) of the task\n :param params: a dict mapping parameter names to their serialized values\n :return: A unique, shortened identifier corresponding to the family and params" - }, - { - "code": "async def update(self, **kwargs):\n try:\n self.data[self.pk] = self.pk_type(kwargs['pk'])\n updated_obj = await self._meta.object_class().update(self.db, data=self.data)\n if updated_obj is None:\n raise NotFound('Object matching the given {} was not found'.format(self.pk))\n return await updated_obj.serialize()\n except Exception as ex:\n logger.exception(ex)\n raise BadRequest(ex)", - "docstring": "Corresponds to PUT request with a resource identifier, updating a single document in the database" - }, - { - "code": "def connect(self):\n logger.info(\"Connecting to Redis on {host}:{port}...\".format(\n host=self.host, port=self.port))\n super(RedisSubscriber, self).connect()\n logger.info(\"Successfully connected to Redis\")\n self.pubsub = self.client.pubsub()\n self.pubsub.subscribe(self.channel)\n logger.info(\"Subscribed to [{channel}] Redis channel\".format(\n channel=self.channel))\n t = Thread(target=self.listen)\n t.setDaemon(True)\n t.start()", - "docstring": "Connects to Redis" - }, - { - "code": "def get_int(byte_array, signed=True):\n return int.from_bytes(byte_array, byteorder='big', signed=signed)", - "docstring": "Gets the specified integer from its byte array.\n This should be used by this module alone, as it works with big endian.\n\n :param byte_array: the byte array representing th integer.\n :param signed: whether the number is signed or not.\n :return: the integer representing the given byte array." - }, - { - "code": "def on_epoch_end(self, **kwargs):\n \"step the rest of the accumulated grads if not perfectly divisible\"\n for p in (self.learn.model.parameters()):\n if p.requires_grad: p.grad.div_(self.acc_samples)\n if not self.drop_last: self.learn.opt.step()\n self.learn.opt.zero_grad()", - "docstring": "step the rest of the accumulated grads if not perfectly divisible" - }, - { - "code": "def choice_default_loader(self, pk):\n try:\n obj = Choice.objects.get(pk=pk)\n except Choice.DoesNotExist:\n return None\n else:\n self.choice_default_add_related_pks(obj)\n return obj", - "docstring": "Load a Choice from the database." - }, - { - "code": "def query_binary_data(self, target, display_mask, attr):\n reply = NVCtrlQueryBinaryDataReplyRequest(display=self.display,\n opcode=self.display.get_extension_major(extname),\n target_id=target.id(),\n target_type=target.type(),\n display_mask=display_mask,\n attr=attr)\n if not reply._data.get('flags'):\n return None\n return reply._data.get('data')", - "docstring": "Return binary data" - }, - { - "code": "def valid_host(host):\n for part in host.split(\".\"):\n if not _valid_host_part.match(part):\n return False\n return True", - "docstring": "check valid hostname" - }, - { - "code": "def migrate(self, host, port, keys, destination_db, timeout,\n copy=False, replace=False, auth=None):\n keys = list_or_args(keys, [])\n if not keys:\n raise DataError('MIGRATE requires at least one key')\n pieces = []\n if copy:\n pieces.append(Token.get_token('COPY'))\n if replace:\n pieces.append(Token.get_token('REPLACE'))\n if auth:\n pieces.append(Token.get_token('AUTH'))\n pieces.append(auth)\n pieces.append(Token.get_token('KEYS'))\n pieces.extend(keys)\n return self.execute_command('MIGRATE', host, port, '', destination_db,\n timeout, *pieces)", - "docstring": "Migrate 1 or more keys from the current Redis server to a different\n server specified by the ``host``, ``port`` and ``destination_db``.\n\n The ``timeout``, specified in milliseconds, indicates the maximum\n time the connection between the two servers can be idle before the\n command is interrupted.\n\n If ``copy`` is True, the specified ``keys`` are NOT deleted from\n the source server.\n\n If ``replace`` is True, this operation will overwrite the keys\n on the destination server if they exist.\n\n If ``auth`` is specified, authenticate to the destination server with\n the password provided." - }, - { - "code": "def navigation_info(request):\n if request.GET.get('wafer_hide_navigation') == \"1\":\n nav_class = \"wafer-invisible\"\n else:\n nav_class = \"wafer-visible\"\n context = {\n 'WAFER_NAVIGATION_VISIBILITY': nav_class,\n }\n return context", - "docstring": "Expose whether to display the navigation header and footer" - }, - { - "code": "def client(addr):\n success = False\n while not success:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect(addr)\n success = True\n except socket.error as err:\n sock.close()\n talk = SocketTalk(sock)\n return talk", - "docstring": "Return a SocketTalk client." - }, - { - "code": "def get_all_results(starting_page):\n logging.info('Retrieving all results for {}'.format(starting_page))\n page = starting_page\n results = []\n while True:\n logging.debug('Getting data from: {}'.format(page))\n data = get_page(page)\n logging.debug('JSON data: {}'.format(data))\n results = results + data['results']\n if data['next']:\n page = data['next']\n else:\n break\n return results", - "docstring": "Given starting API query for Open Humans, iterate to get all results.\n\n :param starting page: This field is the first page, starting from which\n results will be obtained." - }, - { - "code": "def register(app_name, modules):\n global INSTALLED_APPS_REGISTER\n mod_list = INSTALLED_APPS_REGISTER.get(app_name, [])\n if isinstance(modules, basestring):\n mod_list.append(modules)\n elif is_iterable(modules):\n mod_list.extend(modules)\n INSTALLED_APPS_REGISTER[app_name] = mod_list", - "docstring": "simple module registering for later usage\n we don't want to import admin.py in models.py" - }, - { - "code": "def p_flatten(self, obj, **kwargs):\n if isinstance(obj, six.string_types):\n return obj\n result = \"\"\n for i in obj:\n result += self.p_flatten(i)\n return result", - "docstring": "Flatten a list of lists of lists... of strings into a string\n\n This is usually used as the action for sequence expressions:\n\n .. code-block::\n\n my_rule <- 'a' . 'c' {p_flatten}\n\n With the input \"abc\" and no action, this rule returns [ 'a', 'b', 'c'].\n { p_flatten } procuces \"abc\".\n\n >>> parser.p_flatten(['a', ['b', 'c']])\n 'abc'" - }, - { - "code": "def outputWord(self):\n import docx\n from docx.enum.text import WD_ALIGN_PARAGRAPH\n doc = docx.Document()\n doc.styles['Normal'].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n doc.add_heading(self.title, level=0)\n if self.addTime:\n from time import localtime, strftime\n doc.add_heading(strftime(\"%Y-%m-%d %H:%M:%S\", localtime()), level=1)\n if self.p:\n doc.add_heading('Introduction',level=1)\n for p in renewliner(self.p).split('\\n'):\n doc.add_paragraph(p)\n c = count(1)\n self.listFigures(tuple())\n self.listTables(tuple())\n for section in self.sections:\n section.sectionsWord((next(c),),doc=doc)\n if self.conclusion:\n doc.add_heading('Conclusion', level=1)\n for p in renewliner(self.conclusion).split('\\n'):\n doc.add_paragraph(p)\n doc.save(self.outfile+'.docx')", - "docstring": "Output report to word docx" - }, - { - "code": "def do_gui_update(self):\n with self.update_lock:\n changed_widget_dict = {}\n self.root.repr(changed_widget_dict)\n for widget in changed_widget_dict.keys():\n html = changed_widget_dict[widget]\n __id = str(widget.identifier)\n self._send_spontaneous_websocket_message(_MSG_UPDATE + __id + ',' + to_websocket(html))\n self._need_update_flag = False", - "docstring": "This method gets called also by Timer, a new thread, and so needs to lock the update" - }, - { - "code": "def files(self):\n if self._rundir['ls'] is UNDETERMINED:\n out_stem = pathlib.Path(self.par['ioin']['output_file_stem'] + '_')\n out_dir = self.path / out_stem.parent\n if out_dir.is_dir():\n self._rundir['ls'] = set(out_dir.iterdir())\n else:\n self._rundir['ls'] = set()\n return self._rundir['ls']", - "docstring": "Set of found binary files output by StagYY." - }, - { - "code": "def fixpoint_runner(self):\n q = self.cfg.nodes\n while q != []:\n x_i = constraint_table[q[0]]\n self.analysis.fixpointmethod(q[0])\n y = constraint_table[q[0]]\n if y != x_i:\n for node in self.analysis.dep(q[0]):\n q.append(node)\n constraint_table[q[0]] = y\n q = q[1:]", - "docstring": "Work list algorithm that runs the fixpoint algorithm." - }, - { - "code": "def color(text, color=None, background=None, light=False, enabled=\"auto\"):\n colors = {\"black\": 30, \"red\": 31, \"green\": 32, \"yellow\": 33,\n \"blue\": 34, \"magenta\": 35, \"cyan\": 36, \"white\": 37}\n if enabled == \"auto\":\n enabled = Coloring().enabled()\n if not enabled:\n return text\n if color and color.startswith(\"light\"):\n light = True\n color = color[5:]\n color = color and \";{0}\".format(colors[color]) or \"\"\n background = background and \";{0}\".format(colors[background] + 10) or \"\"\n light = light and 1 or 0\n start = \"\\033[{0}{1}{2}m\".format(light, color, background)\n finish = \"\\033[1;m\"\n return \"\".join([start, text, finish])", - "docstring": "Return text in desired color if coloring enabled\n\n Available colors: black red green yellow blue magenta cyan white.\n Alternatively color can be prefixed with \"light\", e.g. lightgreen." - }, - { - "code": "def CutAtClosestPoint(self, p):\n (closest, i) = self.GetClosestPoint(p)\n tmp = [closest]\n tmp.extend(self._points[i+1:])\n return (Poly(self._points[0:i+1]),\n Poly(tmp))", - "docstring": "Let x be the point on the polyline closest to p. Then\n CutAtClosestPoint returns two new polylines, one representing\n the polyline from the beginning up to x, and one representing\n x onwards to the end of the polyline. x is the first point\n returned in the second polyline." - }, - { - "code": "def speak(self, text):\n if not self.is_valid_string(text):\n raise Exception(\"%s is not ISO-8859-1 compatible.\" % (text))\n if len(text) > 1023:\n lines = self.word_wrap(text, width=1023)\n for line in lines:\n self.queue.put(\"S%s\" % (line))\n else:\n self.queue.put(\"S%s\" % (text))", - "docstring": "The main function to convert text into speech." - }, - { - "code": "def plot_density(population,\n bins=100, new_fig=True, subplot=111, levels=None, plane='xy',\n colorlabel='Nodes per unit area', labelfontsize=16,\n color_map='Reds', no_colorbar=False, threshold=0.01,\n neurite_type=NeuriteType.basal_dendrite, **kwargs):\n fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)\n H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins,\n neurite_type=neurite_type)\n mask = H1 < threshold\n H2 = np.ma.masked_array(H1, mask)\n getattr(plt.cm, color_map).set_bad(color='white', alpha=None)\n plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2,\n (yedges1[:-1] + yedges1[1:]) / 2,\n np.transpose(H2),\n cmap=getattr(plt.cm, color_map), levels=levels)\n if not no_colorbar:\n cbar = plt.colorbar(plots)\n cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize)\n kwargs['title'] = kwargs.get('title', '')\n kwargs['xlabel'] = kwargs.get('xlabel', plane[0])\n kwargs['ylabel'] = kwargs.get('ylabel', plane[1])\n return common.plot_style(fig=fig, ax=ax, **kwargs)", - "docstring": "Plots the 2d histogram of the center\n coordinates of segments in the selected plane." - }, - { - "code": "def update_device_info(self, device_info):\n if _debug: DeviceInfoCache._debug(\"update_device_info %r\", device_info)\n if not hasattr(device_info, '_ref_count'):\n device_info._ref_count = 0\n cache_id, cache_address = getattr(device_info, '_cache_keys', (None, None))\n if (cache_id is not None) and (device_info.deviceIdentifier != cache_id):\n if _debug: DeviceInfoCache._debug(\" - device identifier updated\")\n del self.cache[cache_id]\n self.cache[device_info.deviceIdentifier] = device_info\n if (cache_address is not None) and (device_info.address != cache_address):\n if _debug: DeviceInfoCache._debug(\" - device address updated\")\n del self.cache[cache_address]\n self.cache[device_info.address] = device_info\n device_info._cache_keys = (device_info.deviceIdentifier, device_info.address)", - "docstring": "The application has updated one or more fields in the device\n information record and the cache needs to be updated to reflect the\n changes. If this is a cached version of a persistent record then this\n is the opportunity to update the database." - }, - { - "code": "def safe_to_exit(self, *args, **kargs):\n if len(self.value) == self.instance_num:\n return True\n return False", - "docstring": "Overrided to prevent user from exiting selection until\n they have selected the right amount of instances" - }, - { - "code": "def from_dict(cls, serialized):\n if serialized is None:\n return None\n macaroon = serialized.get('Macaroon')\n if macaroon is not None:\n macaroon = bakery.Macaroon.from_dict(macaroon)\n path = serialized.get('MacaroonPath')\n cookie_name_suffix = serialized.get('CookieNameSuffix')\n visit_url = serialized.get('VisitURL')\n wait_url = serialized.get('WaitURL')\n interaction_methods = serialized.get('InteractionMethods')\n return ErrorInfo(macaroon=macaroon, macaroon_path=path,\n cookie_name_suffix=cookie_name_suffix,\n visit_url=visit_url, wait_url=wait_url,\n interaction_methods=interaction_methods)", - "docstring": "Create a new ErrorInfo object from a JSON deserialized\n dictionary\n @param serialized The JSON object {dict}\n @return ErrorInfo object" - }, - { - "code": "def strip_inserts(fasta):\n for seq in parse_fasta(fasta):\n seq[1] = ''.join([b for b in seq[1] if b == '-' or b.isupper()])\n yield seq", - "docstring": "remove insertion columns from aligned fasta file" - }, - { - "code": "def execute(self):\n self.log(u\"Executing task...\")\n if self.task.audio_file is None:\n self.log_exc(u\"The task does not seem to have its audio file set\", None, True, ExecuteTaskInputError)\n if (\n (self.task.audio_file.audio_length is None) or\n (self.task.audio_file.audio_length <= 0)\n ):\n self.log_exc(u\"The task seems to have an invalid audio file\", None, True, ExecuteTaskInputError)\n task_max_audio_length = self.rconf[RuntimeConfiguration.TASK_MAX_AUDIO_LENGTH]\n if (\n (task_max_audio_length > 0) and\n (self.task.audio_file.audio_length > task_max_audio_length)\n ):\n self.log_exc(u\"The audio file of the task has length %.3f, more than the maximum allowed (%.3f).\" % (self.task.audio_file.audio_length, task_max_audio_length), None, True, ExecuteTaskInputError)\n if self.task.text_file is None:\n self.log_exc(u\"The task does not seem to have its text file set\", None, True, ExecuteTaskInputError)\n if len(self.task.text_file) == 0:\n self.log_exc(u\"The task text file seems to have no text fragments\", None, True, ExecuteTaskInputError)\n task_max_text_length = self.rconf[RuntimeConfiguration.TASK_MAX_TEXT_LENGTH]\n if (\n (task_max_text_length > 0) and\n (len(self.task.text_file) > task_max_text_length)\n ):\n self.log_exc(u\"The text file of the task has %d fragments, more than the maximum allowed (%d).\" % (len(self.task.text_file), task_max_text_length), None, True, ExecuteTaskInputError)\n if self.task.text_file.chars == 0:\n self.log_exc(u\"The task text file seems to have empty text\", None, True, ExecuteTaskInputError)\n self.log(u\"Both audio and text input file are present\")\n self.step_index = 1\n self.step_total = 0.000\n if self.task.text_file.file_format in TextFileFormat.MULTILEVEL_VALUES:\n self._execute_multi_level_task()\n else:\n self._execute_single_level_task()\n self.log(u\"Executing task... done\")", - "docstring": "Execute the task.\n The sync map produced will be stored inside the task object.\n\n :raises: :class:`~aeneas.executetask.ExecuteTaskInputError`: if there is a problem with the input parameters\n :raises: :class:`~aeneas.executetask.ExecuteTaskExecutionError`: if there is a problem during the task execution" - }, - { - "code": "def centerize(src, dst_shape, margin_color=None):\n if src.shape[:2] == dst_shape[:2]:\n return src\n centerized = np.zeros(dst_shape, dtype=src.dtype)\n if margin_color:\n centerized[:, :] = margin_color\n pad_vertical, pad_horizontal = 0, 0\n h, w = src.shape[:2]\n dst_h, dst_w = dst_shape[:2]\n if h < dst_h:\n pad_vertical = (dst_h - h) // 2\n if w < dst_w:\n pad_horizontal = (dst_w - w) // 2\n centerized[pad_vertical:pad_vertical + h,\n pad_horizontal:pad_horizontal + w] = src\n return centerized", - "docstring": "Centerize image for specified image size\n\n @param src: image to centerize\n @param dst_shape: image shape (height, width) or (height, width, channel)" - }, - { - "code": "def print_pole_mean(mean_dictionary):\n print('Plon: ' + str(round(mean_dictionary['dec'], 1)) +\n ' Plat: ' + str(round(mean_dictionary['inc'], 1)))\n print('Number of directions in mean (n): ' + str(mean_dictionary['n']))\n print('Angular radius of 95% confidence (A_95): ' +\n str(round(mean_dictionary['alpha95'], 1)))\n print('Precision parameter (k) estimate: ' +\n str(round(mean_dictionary['k'], 1)))", - "docstring": "Does a pretty job printing a Fisher mean and associated statistics for\n mean paleomagnetic poles.\n\n Parameters\n ----------\n mean_dictionary: output dictionary of pmag.fisher_mean\n\n Examples\n --------\n Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely\n using ``ipmag.print_pole_mean``\n\n >>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])\n >>> ipmag.print_pole_mean(my_mean)\n Plon: 136.3 Plat: 21.3\n Number of directions in mean (n): 4\n Angular radius of 95% confidence (A_95): 7.3\n Precision parameter (k) estimate: 159.7" - }, - { - "code": "def empty(self):\n self._selected_item = None\n self._selected_key = None\n super(ListView, self).empty()", - "docstring": "Removes all children from the list" - }, - { - "code": "def _streaming_request_iterable(self, config, requests):\n yield self.types.StreamingRecognizeRequest(streaming_config=config)\n for request in requests:\n yield request", - "docstring": "A generator that yields the config followed by the requests.\n\n Args:\n config (~.speech_v1.types.StreamingRecognitionConfig): The\n configuration to use for the stream.\n requests (Iterable[~.speech_v1.types.StreamingRecognizeRequest]):\n The input objects.\n\n Returns:\n Iterable[~.speech_v1.types.StreamingRecognizeRequest]): The\n correctly formatted input for\n :meth:`~.speech_v1.SpeechClient.streaming_recognize`." - }, - { - "code": "def join_path(self, *path):\n\t\tpath = self.directory_sep().join(path)\n\t\treturn self.normalize_path(path)", - "docstring": "Unite entries to generate a single path\n\n\t\t:param path: path items to unite\n\n\t\t:return: str" - }, - { - "code": "def _parse_cigar(self, cigar):\n ces = [m.groupdict() for m in cigar_re.finditer(cigar)]\n ref_pos = [None] * len(ces)\n tgt_pos = [None] * len(ces)\n cigar_op = [None] * len(ces)\n ref_cur = tgt_cur = 0\n for i, ce in enumerate(ces):\n ref_pos[i] = ref_cur\n tgt_pos[i] = tgt_cur\n cigar_op[i] = ce[\"op\"]\n step = int(ce[\"len\"])\n if ce[\"op\"] in \"=MINX\":\n ref_cur += step\n if ce[\"op\"] in \"=MDX\":\n tgt_cur += step\n ref_pos.append(ref_cur)\n tgt_pos.append(tgt_cur)\n return ref_pos, tgt_pos, cigar_op", - "docstring": "For a given CIGAR string, return the start positions of\n each aligned segment in ref and tgt, and a list of CIGAR operators." - }, - { - "code": "def add_pool(self, pool, match=None):\n if match is None:\n self.default_pool = pool\n else:\n self.pools.append((match, pool))", - "docstring": "Adds a new account pool. If the given match argument is\n None, the pool the default pool. Otherwise, the match argument is\n a callback function that is invoked to decide whether or not the\n given pool should be used for a host.\n\n When Exscript logs into a host, the account is chosen in the following\n order:\n\n # Exscript checks whether an account was attached to the\n :class:`Host` object using :class:`Host.set_account()`), and uses that.\n\n # If the :class:`Host` has no account attached, Exscript walks\n through all pools that were passed to :class:`Queue.add_account_pool()`.\n For each pool, it passes the :class:`Host` to the function in the\n given match argument. If the return value is True, the account\n pool is used to acquire an account.\n (Accounts within each pool are taken in a round-robin\n fashion.)\n\n # If no matching account pool is found, an account is taken\n from the default account pool.\n\n # Finally, if all that fails and the default account pool\n contains no accounts, an error is raised.\n\n Example usage::\n\n def do_nothing(conn):\n conn.autoinit()\n\n def use_this_pool(host):\n return host.get_name().startswith('foo')\n\n default_pool = AccountPool()\n default_pool.add_account(Account('default-user', 'password'))\n\n other_pool = AccountPool()\n other_pool.add_account(Account('user', 'password'))\n\n queue = Queue()\n queue.account_manager.add_pool(default_pool)\n queue.account_manager.add_pool(other_pool, use_this_pool)\n\n host = Host('localhost')\n queue.run(host, do_nothing)\n\n In the example code, the host has no account attached. As a result,\n the queue checks whether use_this_pool() returns True. Because the\n hostname does not start with 'foo', the function returns False, and\n Exscript takes the 'default-user' account from the default pool.\n\n :type pool: AccountPool\n :param pool: The account pool that is added.\n :type match: callable\n :param match: A callback to check if the pool should be used." - }, - { - "code": "def metaclass(*metaclasses):\n def _inner(cls):\n metabases = tuple(\n collections.OrderedDict(\n (c, None) for c in (metaclasses + (type(cls),))\n ).keys()\n )\n _Meta = metabases[0]\n for base in metabases[1:]:\n class _Meta(base, _Meta):\n pass\n return six.add_metaclass(_Meta)(cls)\n return _inner", - "docstring": "Create the class using all metaclasses.\n\n Args:\n metaclasses: A tuple of metaclasses that will be used to generate and\n replace a specified class.\n\n Returns:\n A decorator that will recreate the class using the specified\n metaclasses." - }, - { - "code": "def get_wellseries(self, matrix):\n res = OrderedDict()\n for col, cells in matrix.items():\n if col not in res:\n res[col] = OrderedDict()\n for row, cell in cells.items():\n res[col][row] = self.children_by_name[\n ''.join(cell)\n ]\n res[col] = WellSeries(res[col], name=col)\n return WellSeries(res)", - "docstring": "Returns the grid as a WellSeries of WellSeries" - }, - { - "code": "def load(self, filename, offset):\n try:\n self.offset = offset\n except IOError:\n self.logger.error('Unable to load EfiSystem volume')", - "docstring": "Will eventually load information for Apple_Boot volume. \\\n Not yet implemented" - }, - { - "code": "def prompt(self, timeout=-1):\n if timeout == -1:\n timeout = self.timeout\n i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)\n if i==1:\n return False\n return True", - "docstring": "Match the next shell prompt.\n\n This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`\n method. Note that if you called :meth:`login` with\n ``auto_prompt_reset=False``, then before calling :meth:`prompt` you must\n set the :attr:`PROMPT` attribute to a regex that it will use for\n matching the prompt.\n\n Calling :meth:`prompt` will erase the contents of the :attr:`before`\n attribute even if no prompt is ever matched. If timeout is not given or\n it is set to -1 then self.timeout is used.\n\n :return: True if the shell prompt was matched, False if the timeout was\n reached." - }, - { - "code": "def _set_values(self):\n for k, v in self._data.items():\n if isinstance(v, dict):\n try:\n rel_model = self._model\n for attr in k.split('__'):\n rel_model = getattr(rel_model, attr).field.related_model\n except AttributeError:\n pass\n else:\n k = k.replace('__', '_')\n if 'id' in v and v['id'] is None:\n v = None\n else:\n v = rel_model(**v)\n setattr(self, k, v)", - "docstring": "Populate instance with given." - }, - { - "code": "def connect(self, id):\n schema = ConnectionSchema()\n resp = self.service.post(self.base+str(id)+'/connect/')\n return self.service.decode(schema, resp)", - "docstring": "Open proxy connection to a device's management interface.\n\n :param id: Device ID as an int.\n :return: :class:`devices.Connection ` object\n :rtype: devices.Connection" - }, - { - "code": "def rerecord(ctx, rest):\n run('tox -e py27 -- --cassette-mode all --record --credentials {0} -s'\n .format(rest), pty=True)\n run('tox -e py27 -- --resave --scrub --credentials test_credentials {0} -s'\n .format(rest), pty=True)", - "docstring": "Rerecord tests." - }, - { - "code": "def team_abbreviation(self):\n if self._season[self._index].lower() == 'career':\n return None\n return self._team_abbreviation[self._index]", - "docstring": "Returns a ``string`` of the team's abbreviation, such as 'DET' for the\n Detroit Red Wings." - }, - { - "code": "def _interpret_lines(self, lines, compare_all=False):\n current = []\n for line in lines + ['']:\n if isinstance(line, str):\n if current and (line.startswith(self.PS1) or not line):\n try:\n if compare_all:\n self._compare(CodeAnswer(), '\\n'.join(current))\n else:\n self.evaluate('\\n'.join(current))\n except ConsoleException:\n return False\n current = []\n if line:\n print(line)\n line = self._strip_prompt(line)\n current.append(line)\n elif isinstance(line, CodeAnswer):\n assert len(current) > 0, 'Answer without a prompt'\n try:\n self._compare(line, '\\n'.join(current))\n except ConsoleException:\n return False\n current = []\n return True", - "docstring": "Interprets the set of lines.\n\n PARAMTERS:\n lines -- list of str; lines of code\n compare_all -- bool; if True, check for no output for lines that are not\n followed by a CodeAnswer\n\n RETURNS:\n bool; True if successful, False otherwise." - }, - { - "code": "def get_sensitivity(self, config_nr):\n if self.assignments['sensitivities'] is None:\n self._check_state()\n if self.can_model:\n self.model(sensitivities=True)\n cids = self.assignments['sensitivities'][config_nr]\n sens_data = [self.parman.parsets[cid] for cid in cids]\n meta_data = [self.parman.metadata[cid] for cid in cids]\n return sens_data, meta_data", - "docstring": "return a sensitivity, as well as corresponding metadata, for a given\n measurement configuration. Indices start at zero." - }, - { - "code": "def create_chunked_body_end(trailers=None):\n chunk = []\n chunk.append('0\\r\\n')\n if trailers:\n for name, value in trailers:\n chunk.append(name)\n chunk.append(': ')\n chunk.append(value)\n chunk.append('\\r\\n')\n chunk.append('\\r\\n')\n return s2b(''.join(chunk))", - "docstring": "Create the ending that terminates a chunked body." - }, - { - "code": "def resolve_cheetah_template(change_type):\n tm = cheetah_template_map()\n for t in change_type.mro():\n tmpl = tm.get(t)\n if tmpl:\n return tmpl\n raise Exception(\"No template for class %s\" % change_type.__name__)", - "docstring": "return the appropriate cheetah template class for the given change\n type, using the method-resolution-order of the change type." - }, - { - "code": "def size_filter(labeled_grid, min_size):\n out_grid = np.zeros(labeled_grid.shape, dtype=int)\n slices = find_objects(labeled_grid)\n j = 1\n for i, s in enumerate(slices):\n box = labeled_grid[s]\n size = np.count_nonzero(box.ravel() == (i + 1))\n if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:\n out_grid[np.where(labeled_grid == i + 1)] = j\n j += 1\n return out_grid", - "docstring": "Remove labeled objects that do not meet size threshold criteria.\n\n Args:\n labeled_grid: 2D output from label method.\n min_size: minimum size of object in pixels.\n\n Returns:\n labeled grid with smaller objects removed." - }, - { - "code": "def check(self):\n \"Checks EPUB integrity\"\n config = self.load_config()\n if not check_dependency_epubcheck():\n sys.exit(error('Unavailable command.'))\n epub_file = u\"%s.epub\" % config['fileroot']\n epub_path = join(CWD, 'build', epub_file)\n print success(\"Starting to check %s...\" % epub_file)\n epubcheck = u'epubcheck %s' % epub_path\n epubcheck = shell(epubcheck.encode())\n for line in epubcheck.errors():\n print error(line)\n for line in epubcheck.output():\n print line", - "docstring": "Checks EPUB integrity" - }, - { - "code": "def difficulties_by_voivodeship(voivodeship, dt=datetime.now()):\n session = requests.Session()\n session.headers.update({'User-Agent': USER_AGENT})\n session.headers.update({'X-Requested-With': 'XMLHttpRequest'})\n session.get('{}/Mapa/'.format(HOST))\n url = '{}/Mapa/PodajUtrudnieniaWWojewodztwie?KodWojewodztwa={}&_={}'.format(HOST, str(voivodeship), _datetime_to_asp_date(dt))\n response = session.get(url)\n json_data = response.json() if len(response.text) > 0 else []\n return json_data", - "docstring": "Get difficulties in voivodeship.\n\n :param voivodeship: Voivodeship numeric value.\n :param dt: Datetime for data. Default: datetime.now()\n\n :return: List of difficulties by voivodeship." - }, - { - "code": "def _set_last_aid(func):\n @functools.wraps(func)\n def new_func(self, *args, **kwargs):\n aid = func(self, *args, **kwargs)\n self.last_aid = aid\n return aid\n return new_func", - "docstring": "Decorator for setting last_aid." - }, - { - "code": "def update_note(note, **kwargs):\n note_i = _get_note(note.id)\n if note.ref_key != note_i.ref_key:\n raise HydraError(\"Cannot convert a %s note to a %s note. Please create a new note instead.\"%(note_i.ref_key, note.ref_key))\n note_i.set_ref(note.ref_key, note.ref_id)\n note_i.value = note.value\n db.DBSession.flush()\n return note_i", - "docstring": "Update a note" - }, - { - "code": "def create(self, initial_split_keys=[], column_families={}):\n table_client = self._instance._client.table_admin_client\n instance_name = self._instance.name\n families = {\n id: ColumnFamily(id, self, rule).to_pb()\n for (id, rule) in column_families.items()\n }\n table = admin_messages_v2_pb2.Table(column_families=families)\n split = table_admin_messages_v2_pb2.CreateTableRequest.Split\n splits = [split(key=_to_bytes(key)) for key in initial_split_keys]\n table_client.create_table(\n parent=instance_name,\n table_id=self.table_id,\n table=table,\n initial_splits=splits,\n )", - "docstring": "Creates this table.\n\n For example:\n\n .. literalinclude:: snippets_table.py\n :start-after: [START bigtable_create_table]\n :end-before: [END bigtable_create_table]\n\n .. note::\n\n A create request returns a\n :class:`._generated.table_pb2.Table` but we don't use\n this response.\n\n :type initial_split_keys: list\n :param initial_split_keys: (Optional) list of row keys in bytes that\n will be used to initially split the table\n into several tablets.\n\n :type column_families: dict\n :param column_failies: (Optional) A map columns to create. The key is\n the column_id str and the value is a\n :class:`GarbageCollectionRule`" - }, - { - "code": "def include_revision(revision_num, skip_factor=1.1):\n if skip_factor <= 1.0:\n return True\n return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(\n math.log(revision_num + 2.0) / math.log(skip_factor)))", - "docstring": "Decide whether to include a revision.\n\n If the number of revisions is large, we exclude some revisions to avoid\n a quadratic blowup in runtime, since the article is likely also large.\n\n We make the ratio between consecutive included revision numbers\n appproximately equal to \"factor\".\n\n Args:\n revision_num: an integer\n skip_factor: a floating point number >= 1.0\n\n Returns:\n a boolean" - }, - { - "code": "def _encode_sequence(self, inputs, token_types, valid_length=None):\n word_embedding = self.word_embed(inputs)\n type_embedding = self.token_type_embed(token_types)\n embedding = word_embedding + type_embedding\n outputs, additional_outputs = self.encoder(embedding, None, valid_length)\n return outputs, additional_outputs", - "docstring": "Generate the representation given the input sequences.\n\n This is used for pre-training or fine-tuning a BERT model." - }, - { - "code": "def print_info(info_mapping):\n if not info_mapping:\n return\n content_format = \"{:<16} : {:<}\\n\"\n content = \"\\n==================== Output ====================\\n\"\n content += content_format.format(\"Variable\", \"Value\")\n content += content_format.format(\"-\" * 16, \"-\" * 29)\n for key, value in info_mapping.items():\n if isinstance(value, (tuple, collections.deque)):\n continue\n elif isinstance(value, (dict, list)):\n value = json.dumps(value)\n elif value is None:\n value = \"None\"\n if is_py2:\n if isinstance(key, unicode):\n key = key.encode(\"utf-8\")\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n content += content_format.format(key, value)\n content += \"-\" * 48 + \"\\n\"\n logger.log_info(content)", - "docstring": "print info in mapping.\n\n Args:\n info_mapping (dict): input(variables) or output mapping.\n\n Examples:\n >>> info_mapping = {\n \"var_a\": \"hello\",\n \"var_b\": \"world\"\n }\n >>> info_mapping = {\n \"status_code\": 500\n }\n >>> print_info(info_mapping)\n ==================== Output ====================\n Key : Value\n ---------------- : ----------------------------\n var_a : hello\n var_b : world\n ------------------------------------------------" - }, - { - "code": "def standard_output_generation(self, groups, limit, points, out_of, check):\n if points < out_of:\n self.reasoning_routine(groups, check, priority_flag=limit)\n else:\n print(\"All tests passed!\")", - "docstring": "Generates the Terminal Output" - }, - { - "code": "def add(self, type, orig, replace):\n ret = libxml2mod.xmlACatalogAdd(self._o, type, orig, replace)\n return ret", - "docstring": "Add an entry in the catalog, it may overwrite existing but\n different entries." - }, - { - "code": "def generate(cls, public_keys, amount):\n threshold = len(public_keys)\n if not isinstance(amount, int):\n raise TypeError('`amount` must be a int')\n if amount < 1:\n raise AmountError('`amount` needs to be greater than zero')\n if not isinstance(public_keys, list):\n raise TypeError('`public_keys` must be an instance of list')\n if len(public_keys) == 0:\n raise ValueError('`public_keys` needs to contain at least one'\n 'owner')\n elif len(public_keys) == 1 and not isinstance(public_keys[0], list):\n if isinstance(public_keys[0], Fulfillment):\n ffill = public_keys[0]\n else:\n ffill = Ed25519Sha256(\n public_key=base58.b58decode(public_keys[0]))\n return cls(ffill, public_keys, amount=amount)\n else:\n initial_cond = ThresholdSha256(threshold=threshold)\n threshold_cond = reduce(cls._gen_condition, public_keys,\n initial_cond)\n return cls(threshold_cond, public_keys, amount=amount)", - "docstring": "Generates a Output from a specifically formed tuple or list.\n\n Note:\n If a ThresholdCondition has to be generated where the threshold\n is always the number of subconditions it is split between, a\n list of the following structure is sufficient:\n\n [(address|condition)*, [(address|condition)*, ...], ...]\n\n Args:\n public_keys (:obj:`list` of :obj:`str`): The public key of\n the users that should be able to fulfill the Condition\n that is being created.\n amount (:obj:`int`): The amount locked by the Output.\n\n Returns:\n An Output that can be used in a Transaction.\n\n Raises:\n TypeError: If `public_keys` is not an instance of `list`.\n ValueError: If `public_keys` is an empty list." - }, - { - "code": "def _read_stc(stc_file):\n hdr = _read_hdr_file(stc_file)\n stc_dtype = dtype([('segment_name', 'a256'),\n ('start_stamp', ', _.ERD (first)\n - , _.ETC (first)\n - , __.ERD (second and subsequent)\n - , __.ETC (second and subsequent)\n\n is formatted with \"%03d\" format specifier and starts at 1 (initial\n value being 0 and omitted for compatibility with the previous versions)." - }, - { - "code": "def priority(self, item):\n return '%.1f' % max(self.cache[item.pk][0] / self.max_entries, 0.1)", - "docstring": "The priority of the item depends of the number of entries published\n in the cache divided by the maximum of entries." - }, - { - "code": "def calc_min_cell_interdist(self, x, y, z):\n min_cell_interdist = np.zeros(self.POPULATION_SIZE)\n for i in range(self.POPULATION_SIZE):\n cell_interdist = np.sqrt((x[i] - x)**2\n + (y[i] - y)**2\n + (z[i] - z)**2)\n cell_interdist[i] = np.inf\n min_cell_interdist[i] = cell_interdist.min()\n return min_cell_interdist", - "docstring": "Calculate cell interdistance from input coordinates.\n\n\n Parameters\n ----------\n x, y, z : numpy.ndarray\n xyz-coordinates of each cell-body.\n\n\n Returns\n -------\n min_cell_interdist : np.nparray\n For each cell-body center, the distance to nearest neighboring cell" - }, - { - "code": "def _make_elastic_range(begin, end):\n starting_factor = max(1, (end - begin) // 100)\n factor = _iter_factors(starting_factor)\n left_half, right_half = [], []\n left_val, right_val = begin, end\n right_val = end\n while left_val < right_val:\n left_half.append(left_val)\n right_half.append(right_val)\n next_factor = next(factor)\n left_val = begin + next_factor\n right_val = end - next_factor\n if left_val == right_val:\n left_half.append(left_val)\n right_half.reverse()\n return left_half + right_half", - "docstring": "Generate an S-curved range of pages.\n\n Start from both left and right, adding exponentially growing indexes,\n until the two trends collide." - }, - { - "code": "def readInternalC(self):\n v = self._read32()\n v >>= 4\n internal = v & 0x7FF\n if v & 0x800:\n internal -= 4096\n return internal * 0.0625", - "docstring": "Return internal temperature value in degrees celsius." - }, - { - "code": "def send(self, cumulative_counters=None, gauges=None, counters=None):\n if not gauges and not cumulative_counters and not counters:\n return\n data = {\n 'cumulative_counter': cumulative_counters,\n 'gauge': gauges,\n 'counter': counters,\n }\n _logger.debug('Sending datapoints to SignalFx: %s', data)\n for metric_type, datapoints in data.items():\n if not datapoints:\n continue\n if not isinstance(datapoints, list):\n raise TypeError('Datapoints not of type list %s', datapoints)\n for datapoint in datapoints:\n self._add_extra_dimensions(datapoint)\n self._add_to_queue(metric_type, datapoint)\n self._start_thread()", - "docstring": "Send the given metrics to SignalFx.\n\n Args:\n cumulative_counters (list): a list of dictionaries representing the\n cumulative counters to report.\n gauges (list): a list of dictionaries representing the gauges to\n report.\n counters (list): a list of dictionaries representing the counters\n to report." - }, - { - "code": "def to_flattened(self, base_frame=None):\n if base_frame is None:\n base_frame = self.base_frame\n flat = {}\n for node in self.nodes:\n if node == base_frame:\n continue\n transform, geometry = self.get(\n frame_to=node, frame_from=base_frame)\n flat[node] = {\n 'transform': transform.tolist(),\n 'geometry': geometry\n }\n return flat", - "docstring": "Export the current transform graph as a flattened" - }, - { - "code": "def list_commands(self, ctx):\n rv = defaults.list_commands(ctx)\n if self._commands_dir:\n for filename in os.listdir(self._commands_dir):\n if _is_command_file(filename) and filename[:-3] not in rv:\n rv.append(filename[:-3])\n rv.sort()\n return rv", - "docstring": "List commands from the commands dir and default group" - }, - { - "code": "async def start(self):\n self._is_running = True\n while self._is_running:\n try:\n zmq_msg = await self._socket.recv_multipart()\n message = Message()\n message.ParseFromString(zmq_msg[-1])\n await self._msg_router.route_msg(message)\n except DecodeError as e:\n LOGGER.warning('Unable to decode: %s', e)\n except zmq.ZMQError as e:\n LOGGER.warning('Unable to receive: %s', e)\n return\n except asyncio.CancelledError:\n self._is_running = False", - "docstring": "Starts receiving messages on the underlying socket and passes them\n to the message router." - }, - { - "code": "def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):\n if varnames is not None:\n varnames = [s.strip() for s in list_strings(varnames)]\n dlist = collections.defaultdict(list)\n for task in self.select_tasks(nids=nids, wslice=wslice):\n dstruct = task.input.structure.as_dict(fmt=\"abivars\")\n for vname in varnames:\n value = task.input.get(vname, None)\n if value is None:\n value = dstruct.get(vname, None)\n if value is not None:\n dlist[vname].append((task, value))\n for vname in varnames:\n tv_list = dlist[vname]\n if not tv_list:\n stream.write(\"[%s]: Found 0 tasks with this variable\\n\" % vname)\n else:\n stream.write(\"[%s]: Found %s tasks with this variable\\n\" % (vname, len(tv_list)))\n for i, (task, value) in enumerate(tv_list):\n stream.write(\" %s --> %s\\n\" % (str(value), task))\n stream.write(\"\\n\")\n else:\n lines = []\n for task in self.select_tasks(nids=nids, wslice=wslice):\n s = task.make_input(with_header=True)\n if task.deps:\n s += \"\\n\\nDependencies:\\n\" + \"\\n\".join(str(dep) for dep in task.deps)\n else:\n s += \"\\n\\nDependencies: None\"\n lines.append(2*\"\\n\" + 80 * \"=\" + \"\\n\" + s + 2*\"\\n\")\n stream.writelines(lines)", - "docstring": "Print the input of the tasks to the given stream.\n\n Args:\n varnames:\n List of Abinit variables. If not None, only the variable in varnames\n are selected and printed.\n nids:\n List of node identifiers. By defaults all nodes are shown\n wslice:\n Slice object used to select works.\n stream:\n File-like object, Default: sys.stdout" - }, - { - "code": "def fit(self, data):\n _raise_error_if_not_sframe(data, \"data\")\n fitted_state = {}\n feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)\n if not feature_columns:\n raise RuntimeError(\"No valid feature columns specified in transformation.\")\n fitted_state['features'] = feature_columns\n fitted_state['fitted'] = True\n self.__proxy__.update(fitted_state)\n return self", - "docstring": "Fits the transformer using the given data." - }, - { - "code": "def visible_fields(self):\n form_visible_fields = self.form.visible_fields()\n if self.render_fields:\n fields = self.render_fields\n else:\n fields = [field.name for field in form_visible_fields]\n filtered_fields = [field for field in fields if field not in self.exclude_fields]\n return [field for field in form_visible_fields if field.name in filtered_fields]", - "docstring": "Returns the reduced set of visible fields to output from the form.\n\n This method respects the provided ``fields`` configuration _and_ exlcudes\n all fields from the ``exclude`` configuration.\n\n If no ``fields`` where provided when configuring this fieldset, all visible\n fields minus the excluded fields will be returned.\n\n :return: List of bound field instances or empty tuple." - }, - { - "code": "def _FlushCache(cls, format_categories):\n if definitions.FORMAT_CATEGORY_ARCHIVE in format_categories:\n cls._archive_remainder_list = None\n cls._archive_scanner = None\n cls._archive_store = None\n if definitions.FORMAT_CATEGORY_COMPRESSED_STREAM in format_categories:\n cls._compressed_stream_remainder_list = None\n cls._compressed_stream_scanner = None\n cls._compressed_stream_store = None\n if definitions.FORMAT_CATEGORY_FILE_SYSTEM in format_categories:\n cls._file_system_remainder_list = None\n cls._file_system_scanner = None\n cls._file_system_store = None\n if definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE in format_categories:\n cls._storage_media_image_remainder_list = None\n cls._storage_media_image_scanner = None\n cls._storage_media_image_store = None\n if definitions.FORMAT_CATEGORY_VOLUME_SYSTEM in format_categories:\n cls._volume_system_remainder_list = None\n cls._volume_system_scanner = None\n cls._volume_system_store = None", - "docstring": "Flushes the cached objects for the specified format categories.\n\n Args:\n format_categories (set[str]): format categories." - }, - { - "code": "def parse_string(self):\n aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower())\n if aliased_value is not None:\n return aliased_value\n return String(self.current_token.value)", - "docstring": "Parse a regular unquoted string from the token stream." - }, - { - "code": "def dest_fpath(self, source_fpath: str) -> str:\n relative_fpath = os.path.join(*source_fpath.split(os.sep)[1:])\n relative_dirpath = os.path.dirname(relative_fpath)\n source_fname = relative_fpath.split(os.sep)[-1]\n base_fname = source_fname.split('.')[0]\n dest_fname = f'{base_fname}.json'\n return os.path.join(self.dest_dir, relative_dirpath, dest_fname)", - "docstring": "Calculates full path for end json-api file from source file full\n path." - }, - { - "code": "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", - "docstring": "Useful for merging other files in a custom logic." - }, - { - "code": "def set_schedule(self, schedule):\n self.Tmax = schedule['tmax']\n self.Tmin = schedule['tmin']\n self.steps = int(schedule['steps'])\n self.updates = int(schedule['updates'])", - "docstring": "Takes the output from `auto` and sets the attributes" - }, - { - "code": "def handle(self, sock, read_data, path, headers):\n \"Just waits, and checks for other actions to replace us\"\n for i in range(self.timeout // self.check_interval):\n eventlet.sleep(self.check_interval)\n action = self.balancer.resolve_host(self.host)\n if not isinstance(action, Spin):\n return action.handle(sock, read_data, path, headers)\n action = Static(self.balancer, self.host, self.matched_host, type=\"timeout\")\n return action.handle(sock, read_data, path, headers)", - "docstring": "Just waits, and checks for other actions to replace us" - }, - { - "code": "def set(self, param, value):\n self._shouldOwn(param)\n try:\n value = param.typeConverter(value)\n except ValueError as e:\n raise ValueError('Invalid param value given for param \"%s\". %s' % (param.name, e))\n self._paramMap[param] = value", - "docstring": "Sets a parameter in the embedded param map." - }, - { - "code": "async def get_all(self, url, params=None):\n if not params:\n params = {}\n items = []\n next_page_token = None\n while True:\n if next_page_token:\n params['pageToken'] = next_page_token\n response = await self.get_json(url, params=params)\n items.append(response)\n next_page_token = response.get('nextPageToken')\n if not next_page_token:\n break\n return items", - "docstring": "Aggregate data from all pages of an API query.\n\n Args:\n url (str): Google API endpoint URL.\n params (dict): (optional) URL query parameters.\n\n Returns:\n list: Parsed JSON query response results." - }, - { - "code": "def run_git_concurrently(base_dir):\n os.chdir(base_dir)\n git_dirs = get_list_of_git_directories()\n print(\"Processing %d git repos: %s\" % (len(git_dirs), ', '.join(git_dirs)))\n widgets = [Percentage(),\n ' ', Bar(),\n ' ', Counter(),\n ' ', AdaptiveETA()]\n pbar = ProgressBar(widgets=widgets, maxval=len(git_dirs))\n pbar.start()\n threads = {git_dir:GitPuller(git_dir) for git_dir in git_dirs}\n for thread in threads.values():\n thread.start()\n while True:\n pbar.update(len([t for t in threads.values() if not t.is_alive()]))\n if all([not t.is_alive() for t in threads.values()]):\n break\n time.sleep(0.2)\n table = PrettyTable([\"repo\", \"local\", \"pull\"])\n table.align[\"repo\"] = \"l\"\n table.align[\"local\"] = \"l\"\n table.align[\"pull\"] = \"l\"\n for git_dir in sorted(threads):\n thread = threads[git_dir]\n if thread.local_ok:\n if thread.has_uncommitted_changes:\n local_changes_text = colored(\n 'Uncommitted changes', 'green', attrs=['bold'])\n else:\n local_changes_text = colored('OK', 'green')\n else:\n local_changes_text = colored('Problem', 'red')\n if thread.git_pull_ok:\n if thread.is_up_to_date:\n pull_text = colored('OK', 'green')\n else:\n pull_text = colored('Changed', 'green', attrs=['bold'])\n else:\n pull_text = colored('Problem', 'red')\n table.add_row([git_dir, local_changes_text, pull_text])\n print(table)\n for git_dir in sorted(threads):\n if not threads[git_dir].git_pull_ok:\n thread = threads[git_dir]\n print colored('%s: ' % git_dir, 'red')\n print thread.git_pull_output", - "docstring": "Runs the 'git status' and 'git pull' commands in threads and reports\n the results in a pretty table." - }, - { - "code": "def update_trial_stats(self, trial, result):\n assert trial in self._live_trials\n assert self._get_result_time(result) >= 0\n delta = self._get_result_time(result) - \\\n self._get_result_time(self._live_trials[trial])\n assert delta >= 0\n self._completed_progress += delta\n self._live_trials[trial] = result", - "docstring": "Update result for trial. Called after trial has finished\n an iteration - will decrement iteration count.\n\n TODO(rliaw): The other alternative is to keep the trials\n in and make sure they're not set as pending later." - }, - { - "code": "def _read_bytes_from_non_framed_body(self, b):\n _LOGGER.debug(\"starting non-framed body read\")\n bytes_to_read = self.body_length\n _LOGGER.debug(\"%d bytes requested; reading %d bytes\", b, bytes_to_read)\n ciphertext = self.source_stream.read(bytes_to_read)\n if len(self.output_buffer) + len(ciphertext) < self.body_length:\n raise SerializationError(\"Total message body contents less than specified in body description\")\n if self.verifier is not None:\n self.verifier.update(ciphertext)\n tag = deserialize_tag(stream=self.source_stream, header=self._header, verifier=self.verifier)\n aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string(\n content_type=self._header.content_type, is_final_frame=True\n )\n associated_data = assemble_content_aad(\n message_id=self._header.message_id,\n aad_content_string=aad_content_string,\n seq_num=1,\n length=self.body_length,\n )\n self.decryptor = Decryptor(\n algorithm=self._header.algorithm,\n key=self._derived_data_key,\n associated_data=associated_data,\n iv=self._unframed_body_iv,\n tag=tag,\n )\n plaintext = self.decryptor.update(ciphertext)\n plaintext += self.decryptor.finalize()\n self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier)\n return plaintext", - "docstring": "Reads the requested number of bytes from a streaming non-framed message body.\n\n :param int b: Number of bytes to read\n :returns: Decrypted bytes from source stream\n :rtype: bytes" - }, - { - "code": "def detect(self, obj):\n links = obj.get(LINKS_KEY, {})\n for detector in [LATEST, DRAFT_3]:\n if detector.draft.curies_rel in links:\n return detector.detect(obj)\n return LATEST.detect(obj)", - "docstring": "Identifies the HAL draft level of a given JSON object." - }, - { - "code": "def columnize(items, width=None, file=sys.stdout):\n if not items:\n return\n if width is None:\n width = shutil.get_terminal_size()[0] if file is sys.stdout else 80\n items = [rendering.vtmlrender(x) for x in items]\n maxcol = max(items, key=len)\n colsize = len(maxcol) + 2\n cols = width // colsize\n if cols < 2:\n for x in items:\n print(x, file=file)\n return\n lines = math.ceil(len(items) / cols)\n for i in range(lines):\n row = items[i:None:lines]\n print(*[x.ljust(colsize) for x in row], sep='', file=file)", - "docstring": "Smart display width handling when showing a list of stuff." - }, - { - "code": "def search_prefix(self):\n if 'operator' in request.json:\n operator = request.json['operator']\n else:\n operator = 'equals'\n attr = XhrController.extract_prefix_attr(request.json)\n n = 0\n q = {}\n for key, val in attr.items():\n if n == 0:\n q = {\n 'operator': operator,\n 'val1': key,\n 'val2': val\n }\n else:\n q = {\n 'operator': 'and',\n 'val1': {\n 'operator': operator,\n 'val1': key,\n 'val2': val\n },\n 'val2': q\n }\n n += 1\n search_opts = {}\n if 'children_depth' in request.json:\n search_opts['children_depth'] = request.json['children_depth']\n if 'parents_depth' in request.json:\n search_opts['parents_depth'] = request.json['parents_depth']\n if 'include_neighbors' in request.json:\n search_opts['include_neighbors'] = request.json['include_neighbors']\n if 'max_result' in request.json:\n search_opts['max_result'] = request.json['max_result']\n if 'offset' in request.json:\n search_opts['offset'] = request.json['offset']\n try:\n result = Prefix.search(q, search_opts)\n except NipapError, e:\n return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})\n return json.dumps(result, cls=NipapJSONEncoder)", - "docstring": "Search prefixes. Does not yet incorporate all the functions of the\n search_prefix API function due to difficulties with transferring\n a complete 'dict-to-sql' encoded data structure.\n\n Instead, a list of prefix attributes can be given which will be\n matched with the 'equals' operator if notheing else is specified. If\n multiple attributes are given, they will be combined with the 'and'\n operator. Currently, it is not possible to specify different\n operators for different attributes." - }, - { - "code": "def is_relative(modname, from_file):\n if not os.path.isdir(from_file):\n from_file = os.path.dirname(from_file)\n if from_file in sys.path:\n return False\n try:\n stream, _, _ = imp.find_module(modname.split(\".\")[0], [from_file])\n if stream:\n stream.close()\n return True\n except ImportError:\n return False", - "docstring": "return true if the given module name is relative to the given\n file name\n\n :type modname: str\n :param modname: name of the module we are interested in\n\n :type from_file: str\n :param from_file:\n path of the module from which modname has been imported\n\n :rtype: bool\n :return:\n true if the module has been imported relatively to `from_file`" - }, - { - "code": "def log_likelihood(self):\n r\n return _tram.log_likelihood_lower_bound(\n self.log_lagrangian_mult, self.biased_conf_energies,\n self.count_matrices, self.btrajs, self.dtrajs, self.state_counts,\n None, None, None, None, None)", - "docstring": "r\"\"\"\n Returns the value of the log-likelihood of the converged TRAM estimate." - }, - { - "code": "def getUsers(context, roles, allow_empty=True):\n mtool = getToolByName(context, 'portal_membership')\n pairs = allow_empty and [['', '']] or []\n users = mtool.searchForMembers(roles=roles)\n for user in users:\n uid = user.getId()\n fullname = user.getProperty('fullname')\n if not fullname:\n fullname = uid\n pairs.append((uid, fullname))\n pairs.sort(lambda x, y: cmp(x[1], y[1]))\n return DisplayList(pairs)", - "docstring": "Present a DisplayList containing users in the specified\n list of roles" - }, - { - "code": "def _process_features(self, limit=None):\n if self.test_mode:\n graph = self.testgraph\n else:\n graph = self.graph\n model = Model(graph)\n LOG.info(\"Processing features\")\n line_counter = 0\n geno = Genotype(graph)\n raw = '/'.join((self.rawdir, self.files['features']['file']))\n with open(raw, 'r', encoding=\"iso-8859-1\") as csvfile:\n filereader = csv.reader(csvfile, delimiter='\\t', quotechar='\\\"')\n for row in filereader:\n line_counter += 1\n (genomic_feature_id, feature_so_id,\n genomic_feature_abbreviation, genomic_feature_name,\n genomic_feature_type, mutagen, mutagee, construct_id,\n construct_name, construct_so_id, talen_crispr_id,\n talen_crispr_nam\n ) = row\n if self.test_mode and (\n genomic_feature_id not in self.test_ids['allele']):\n continue\n genomic_feature_id = 'ZFIN:' + genomic_feature_id.strip()\n model.addIndividualToGraph(\n genomic_feature_id, genomic_feature_name, feature_so_id)\n model.addSynonym(\n genomic_feature_id, genomic_feature_abbreviation)\n if construct_id is not None and construct_id != '':\n construct_id = 'ZFIN:' + construct_id.strip()\n geno.addConstruct(\n construct_id, construct_name, construct_so_id)\n geno.addSequenceDerivesFrom(\n genomic_feature_id, construct_id)\n self.id_label_map[\n genomic_feature_id] = genomic_feature_abbreviation\n self.id_label_map[construct_id] = construct_name\n if not self.test_mode and limit is not None and line_counter > limit:\n break\n LOG.info(\"Done with features\")\n return", - "docstring": "This module provides information for the intrinsic\n and extrinsic genotype features of zebrafish.\n All items here are 'alterations', and are therefore instances.\n\n sequence alteration ID, SO type, abbreviation, and relationship to\n the affected gene, with the gene's ID, symbol,\n and SO type (gene/pseudogene).\n\n Triples created:\n a class:\n :param limit:\n :return:" - }, - { - "code": "def get_change(self, change_id):\n uri = '/%s/change/%s' % (self.Version, change_id)\n response = self.make_request('GET', uri)\n body = response.read()\n boto.log.debug(body)\n if response.status >= 300:\n raise exception.DNSServerError(response.status,\n response.reason,\n body)\n e = boto.jsonresponse.Element()\n h = boto.jsonresponse.XmlHandler(e, None)\n h.parse(body)\n return e", - "docstring": "Get information about a proposed set of changes, as submitted\n by the change_rrsets method.\n Returns a Python data structure with status information about the\n changes.\n\n :type change_id: str\n :param change_id: The unique identifier for the set of changes.\n This ID is returned in the response to the change_rrsets method." - }, - { - "code": "def pauseProducing(self):\n if not self._running:\n return\n self._running = False\n for consumer in self._consumers.values():\n yield consumer.channel.basic_cancel(consumer_tag=consumer.tag)\n _legacy_twisted_log.msg(\"Paused retrieval of messages for the server queue\")", - "docstring": "Pause the reception of messages by canceling all existing consumers.\n This does not disconnect from the server.\n\n Message reception can be resumed with :meth:`resumeProducing`.\n\n Returns:\n Deferred: fired when the production is paused." - }, - { - "code": "def chunk_list(l, n):\n return [l[i:i + n] for i in range(0, len(l), n)]", - "docstring": "Return `n` size lists from a given list `l`" - }, - { - "code": "def _get_or_create_s3_bucket(s3, name):\n exists = True\n try:\n s3.meta.client.head_bucket(Bucket=name)\n except botocore.exceptions.ClientError as e:\n error_code = int(e.response[\"Error\"][\"Code\"])\n if error_code == 404:\n exists = False\n else:\n raise\n if not exists:\n s3.create_bucket(Bucket=name)\n return s3.Bucket(name)", - "docstring": "Get an S3 bucket resource after making sure it exists" - }, - { - "code": "def shellcode(executables, use_defaults=True, shell='bash', complete_arguments=None):\n if complete_arguments is None:\n complete_options = '-o nospace -o default' if use_defaults else '-o nospace'\n else:\n complete_options = \" \".join(complete_arguments)\n if shell == 'bash':\n quoted_executables = [quote(i) for i in executables]\n executables_list = \" \".join(quoted_executables)\n code = bashcode % dict(complete_opts=complete_options, executables=executables_list)\n else:\n code = \"\"\n for executable in executables:\n code += tcshcode % dict(executable=executable)\n return code", - "docstring": "Provide the shell code required to register a python executable for use with the argcomplete module.\n\n :param str executables: Executables to be completed (when invoked exactly with this name\n :param bool use_defaults: Whether to fallback to readline's default completion when no matches are generated.\n :param str shell: Name of the shell to output code for (bash or tcsh)\n :param complete_arguments: Arguments to call complete with\n :type complete_arguments: list(str) or None" - }, - { - "code": "def register(self, typ):\n def _func(cls):\n if typ in self._class:\n raise ValueError(\"duplicated type name '%s'\" % typ)\n cls.plugin_type = typ\n self._class[typ] = cls\n return cls\n return _func", - "docstring": "register a plugin" - }, - { - "code": "def get_tile(self, x, y, zoom):\n status, data = self.http_client.get_png(\n ROOT_TILE_URL % self.map_layer + '/%s/%s/%s.png' % (zoom, x, y),\n params={'appid': self.API_key})\n img = Image(data, ImageTypeEnum.PNG)\n return Tile(x, y, zoom, self.map_layer, img)", - "docstring": "Retrieves the tile having the specified coordinates and zoom level\n\n :param x: horizontal tile number in OWM tile reference system\n :type x: int\n :param y: vertical tile number in OWM tile reference system\n :type y: int\n :param zoom: zoom level for the tile\n :type zoom: int\n :returns: a `pyowm.tiles.Tile` instance" - }, - { - "code": "def seal(mock):\n _frankeinstainize(mock)\n for attr in dir(mock):\n try:\n m = getattr(mock, attr)\n except AttributeError:\n continue\n if not isinstance(m, NonCallableMock):\n continue\n if m._mock_new_parent is mock:\n seal(m)", - "docstring": "Disable the automatic generation of \"submocks\"\n\n Given an input Mock, seals it to ensure no further mocks will be generated\n when accessing an attribute that was not already defined.\n\n Submocks are defined as all mocks which were created DIRECTLY from the\n parent. If a mock is assigned to an attribute of an existing mock,\n it is not considered a submock." - }, - { - "code": "def how_many(self):\n if self.linkdates != []:\n if max(self.linkdates) <= list(time.localtime()):\n currentdate = max(self.linkdates)\n else:\n currentdate = list(time.localtime())\n print((\"This entry has its date set in the future. \"\n \"I will use your current local time as its date \"\n \"instead.\"),\n file=sys.stderr, flush=True)\n stop = sys.maxsize\n else:\n currentdate = [1, 1, 1, 0, 0]\n firstsync = self.retrieve_config('firstsync', '1')\n if firstsync == 'all':\n stop = sys.maxsize\n else:\n stop = int(firstsync)\n return currentdate, stop", - "docstring": "Ascertain where to start downloading, and how many entries." - }, - { - "code": "def resource_path(opts):\n resources = _load(opts.resources, opts.output_dir)\n if opts.resource_name not in resources:\n sys.stderr.write('Invalid resource name: {}\\n'.format(opts.resource_name))\n return 1\n print(resources[opts.resource_name].destination)", - "docstring": "Return the full path to a named resource." - }, - { - "code": "def get_file_paths(self, id_list):\n if id_list is None:\n return []\n try:\n path_array = self._table[id_list - 1]['path']\n except IndexError:\n print(\"IndexError \", len(self._table), id_list)\n path_array = []\n return [path for path in path_array]", - "docstring": "Get a list of file paths based of a set of ids\n\n Parameters\n ----------\n\n id_list : list\n List of integer file keys\n\n Returns list of file paths" - }, - { - "code": "def decimal128_to_decimal(b):\n \"decimal128 bytes to Decimal\"\n v = decimal128_to_sign_digits_exponent(b)\n if isinstance(v, Decimal):\n return v\n sign, digits, exponent = v\n return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))", - "docstring": "decimal128 bytes to Decimal" - }, - { - "code": "def delete_node(self, node_id):\n node = self.get_node(node_id)\n for e in node['edges']:\n self.delete_edge_by_id(e)\n edges = [edge_id for edge_id, edge in list(self.edges.items()) if edge['vertices'][1] == node_id]\n for e in edges:\n self.delete_edge_by_id(e)\n del self.nodes[node_id]\n self._num_nodes -= 1", - "docstring": "Removes the node identified by node_id from the graph." - }, - { - "code": "def _random_edge_iterator(graph, n_edges: int) -> Iterable[Tuple[BaseEntity, BaseEntity, int, Mapping]]:\n edges = list(graph.edges())\n edge_sample = random.sample(edges, n_edges)\n for u, v in edge_sample:\n keys = list(graph[u][v])\n k = random.choice(keys)\n yield u, v, k, graph[u][v][k]", - "docstring": "Get a random set of edges from the graph and randomly samples a key from each.\n\n :type graph: pybel.BELGraph\n :param n_edges: Number of edges to randomly select from the given graph" - }, - { - "code": "def parseUrl(url):\n scheme, netloc, url, params, query, fragment = urllib.parse.urlparse(url)\n query_dict = {\n k: sorted(v) if len(v) > 1 else v[0]\n for k, v in list(urllib.parse.parse_qs(query).items())\n }\n return {\n 'scheme': scheme,\n 'netloc': netloc,\n 'url': url,\n 'params': params,\n 'query': query_dict,\n 'fragment': fragment,\n }", - "docstring": "Return a dict containing scheme, netloc, url, params, query, fragment keys.\n\n query is a dict where the values are always lists. If the query key appears only\n once in the URL, the list will have a single value." - }, - { - "code": "def delete(table, session, conds):\n with session.begin_nested():\n archive_conds_list = _get_conditions_list(table, conds)\n session.execute(\n sa.delete(table.ArchiveTable, whereclause=_get_conditions(archive_conds_list))\n )\n conds_list = _get_conditions_list(table, conds, archive=False)\n session.execute(\n sa.delete(table, whereclause=_get_conditions(conds_list))\n )", - "docstring": "Performs a hard delete on a row, which means the row is deleted from the Savage\n table as well as the archive table.\n\n :param table: the model class which inherits from\n :class:`~savage.models.user_table.SavageModelMixin` and specifies the model\n of the user table from which we are querying\n :param session: a sqlalchemy session with connections to the database\n :param conds: a list of dictionary of key value pairs where keys are columns in the table\n and values are values the column should take on. If specified, this query will\n only return rows where the columns meet all the conditions. The columns specified\n in this dictionary must be exactly the unique columns that versioning pivots around." - }, - { - "code": "async def rows(self, offs, size=None, iden=None):\n if iden is not None:\n self.setOffset(iden, offs)\n for i, (indx, byts) in enumerate(self._items.rows(offs)):\n if size is not None and i >= size:\n return\n yield indx, byts", - "docstring": "Yield a number of raw items from the CryoTank starting at a given offset.\n\n Args:\n offs (int): The index of the desired datum (starts at 0)\n size (int): The max number of items to yield.\n\n Yields:\n ((indx, bytes)): Index and msgpacked bytes." - }, - { - "code": "def _process_mhci(mhc_file, normal=False):\n results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])\n with open(mhc_file, 'r') as mf:\n peptides = set()\n for line in mf:\n if not line.startswith('HLA'):\n continue\n line = line.strip().split('\\t')\n allele = line[0]\n pept = line[5]\n pred = line[7]\n if float(pred) > 5.00 and not normal:\n continue\n results.loc[len(results)] = [allele, pept, pred, pept]\n results.drop_duplicates(inplace=True)\n return results", - "docstring": "Process the results from running IEDB MHCI binding predictions into a pandas dataframe.\n\n :param str mhc_file: Output file containing netmhciipan mhci:peptide binding predictions\n :param bool normal: Is this processing the results of a normal?\n :return: Results in a tabular format\n :rtype: pandas.DataFrame" - }, - { - "code": "def binarize(df, category_classes, all_classes=True, drop=True,\n astype=None, inplace=True, min_freq=None):\n if type(category_classes) is not dict:\n columns = set(category_classes)\n category_classes = {column: df[column].unique() for column in columns}\n else:\n columns = category_classes.keys()\n df_new = df if inplace else df.drop(columns, axis=1)\n for category in columns:\n classes = category_classes[category]\n for i in range(len(classes)-1 if not all_classes else len(classes)):\n c = df[category] == classes[i]\n if not min_freq or c.sum() >= min_freq:\n if astype is not None:\n c = c.astype(astype)\n df_new['%s_%s' % (category, str(classes[i]).replace(' ', '_'))] = c\n if drop and inplace:\n df_new.drop(columns, axis=1, inplace=True)\n return df_new", - "docstring": "Binarize specified categoricals. Works inplace!\n\n Args:\n - df: the DataFrame whose columns to binarize\n - category_classes: either a dict of (column : [class1, class2, ...]) pairs\n or a collection of column names, in which case classes are\n given using df[column].unique()\n - all_classes: when False, the last class is skipped\n - drop: when True, the original categorical columns are dropped\n - astype: a type for the resulting binaries, e.g. np.float32.\n When None, use the defualt (bool).\n - inplace: whether to modify the DataFrame inplace\n\n Returns:\n the DataFrame with binarized columns" - }, - { - "code": "def update_catalog_extent(self, current_extent):\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')\n self.br.update_boot_system_use(struct.pack('=L', current_extent))", - "docstring": "A method to update the extent associated with this Boot Catalog.\n\n Parameters:\n current_extent - New extent to associate with this Boot Catalog\n Returns:\n Nothing." - }, - { - "code": "def _parse_container(tokens, index, for_or_if=None):\n items = [Atom(Token(*tokens[index]))]\n index += 1\n num_tokens = len(tokens)\n while index < num_tokens:\n tok = Token(*tokens[index])\n if tok.token_string in ',)]}':\n if for_or_if == 'for':\n return (ListComprehension(items), index - 1)\n elif for_or_if == 'if':\n return (IfExpression(items), index - 1)\n items.append(Atom(tok))\n if tok.token_string == ')':\n return (Tuple(items), index)\n elif tok.token_string == ']':\n return (List(items), index)\n elif tok.token_string == '}':\n return (DictOrSet(items), index)\n elif tok.token_string in '([{':\n (container, index) = _parse_container(tokens, index)\n items.append(container)\n elif tok.token_string == 'for':\n (container, index) = _parse_container(tokens, index, 'for')\n items.append(container)\n elif tok.token_string == 'if':\n (container, index) = _parse_container(tokens, index, 'if')\n items.append(container)\n else:\n items.append(Atom(tok))\n index += 1\n return (None, None)", - "docstring": "Parse a high-level container, such as a list, tuple, etc." - }, - { - "code": "def parse_pin(name_str):\n if len(name_str) < 1:\n raise ValueError(\"Expecting pin name to be at least 4 charcters.\")\n if name_str[0] != 'P':\n raise ValueError(\"Expecting pin name to start with P\")\n pin_str = name_str[1:].split('/')[0]\n if not pin_str.isdigit():\n raise ValueError(\"Expecting numeric pin number.\")\n return int(pin_str)", - "docstring": "Parses a string and returns a pin-num." - }, - { - "code": "def plot_chain(sampler, p=None, **kwargs):\n if p is None:\n npars = sampler.chain.shape[-1]\n for pp in six.moves.range(npars):\n _plot_chain_func(sampler, pp, **kwargs)\n fig = None\n else:\n fig = _plot_chain_func(sampler, p, **kwargs)\n return fig", - "docstring": "Generate a diagnostic plot of the sampler chains.\n\n Parameters\n ----------\n sampler : `emcee.EnsembleSampler`\n Sampler containing the chains to be plotted.\n p : int (optional)\n Index of the parameter to plot. If omitted, all chains are plotted.\n last_step : bool (optional)\n Whether to plot the last step of the chain or the complete chain\n (default).\n\n Returns\n -------\n figure : `matplotlib.figure.Figure`\n Figure" - }, - { - "code": "def issued_at(self):\n issued_at = self._issued_at\n if issued_at is None:\n self._issued_at = int(time.time())\n return self._issued_at", - "docstring": "Time when access token was requested, as seconds since epoch.\n\n Note:\n Accessing this property when there wasn't any request attempts\n will return current time.\n\n Returns:\n int" - }, - { - "code": "def _write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir):\n with open(argfile, \"w\") as out_handle:\n if argfile.endswith(\".json\"):\n record_name, record_attrs = _get_record_attrs(out_keys)\n if record_name:\n if parallel in [\"multi-batch\"]:\n recs = _nested_cwl_record(out, record_attrs, input_files)\n elif parallel in [\"single-split\", \"multi-combined\", \"multi-parallel\", \"batch-single\",\n \"single-single\"]:\n recs = [_collapse_to_cwl_record_single(utils.to_single_data(xs), record_attrs, input_files)\n for xs in out]\n else:\n samples = [utils.to_single_data(xs) for xs in out]\n recs = [_collapse_to_cwl_record(samples, record_attrs, input_files)]\n json.dump(_combine_cwl_records(recs, record_name, parallel),\n out_handle, sort_keys=True, indent=4, separators=(', ', ': '))\n elif parallel in [\"single-split\", \"multi-combined\", \"batch-split\"]:\n json.dump(_convert_to_cwl_json([utils.to_single_data(xs) for xs in out], fnargs, input_files),\n out_handle, sort_keys=True, indent=4, separators=(', ', ': '))\n else:\n json.dump(_convert_to_cwl_json(utils.to_single_data(utils.to_single_data(out)), fnargs, input_files),\n out_handle, sort_keys=True, indent=4, separators=(', ', ': '))\n else:\n yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)", - "docstring": "Write output argfile, preparing a CWL ready JSON or YAML representation of the world." - }, - { - "code": "def get_available_languages(self, obj):\n return obj.available_languages if obj is not None else self.model.objects.none()", - "docstring": "Returns available languages for current object." - }, - { - "code": "def set_energy_range(self, logemin, logemax):\n if logemin is None:\n logemin = self.log_energies[0]\n else:\n imin = int(utils.val_to_edge(self.log_energies, logemin)[0])\n logemin = self.log_energies[imin]\n if logemax is None:\n logemax = self.log_energies[-1]\n else:\n imax = int(utils.val_to_edge(self.log_energies, logemax)[0])\n logemax = self.log_energies[imax]\n self._loge_bounds = np.array([logemin, logemax])\n self._roi_data['loge_bounds'] = np.copy(self.loge_bounds)\n for c in self.components:\n c.set_energy_range(logemin, logemax)\n return self._loge_bounds", - "docstring": "Set the energy bounds of the analysis. This restricts the\n evaluation of the likelihood to the data that falls in this\n range. Input values will be rounded to the closest bin edge\n value. If either argument is None then the lower or upper\n bound of the analysis instance will be used.\n\n Parameters\n ----------\n\n logemin : float\n Lower energy bound in log10(E/MeV).\n\n logemax : float\n Upper energy bound in log10(E/MeV).\n\n Returns\n -------\n\n eminmax : array\n Minimum and maximum energy in log10(E/MeV)." - }, - { - "code": "def caution_title_header_element(feature, parent):\n _ = feature, parent\n header = caution_title_header['string_format']\n return header.capitalize()", - "docstring": "Retrieve caution title header string from definitions." - }, - { - "code": "def exec_file(filename, return_locals=False, is_deploy_code=False):\n if filename not in PYTHON_CODES:\n with open(filename, 'r') as f:\n code = f.read()\n code = compile(code, filename, 'exec')\n PYTHON_CODES[filename] = code\n data = {\n '__file__': filename,\n 'state': pseudo_state,\n }\n exec(PYTHON_CODES[filename], data)\n return data", - "docstring": "Execute a Python file and optionally return it's attributes as a dict." - }, - { - "code": "def get_content_modified_time(cls, abspath):\n stat_result = os.stat(abspath)\n modified = datetime.datetime.utcfromtimestamp(\n stat_result[stat.ST_MTIME])\n return modified", - "docstring": "Returns the time that ``abspath`` was last modified.\n\n May be overridden in subclasses. Should return a `~datetime.datetime`\n object or None." - }, - { - "code": "def delete_files(sender, **kwargs):\n instance = kwargs['instance']\n if not hasattr(instance.distribution, 'path'):\n return\n if not os.path.exists(instance.distribution.path):\n return\n is_referenced = (\n instance.__class__.objects\n .filter(distribution=instance.distribution)\n .exclude(pk=instance._get_pk_val())\n .exists())\n if is_referenced:\n return\n try:\n instance.distribution.storage.delete(instance.distribution.path)\n except Exception:\n logger.exception(\n 'Error when trying to delete file %s of package %s:' % (\n instance.pk, instance.distribution.path))", - "docstring": "Signal callback for deleting old files when database item is deleted" - }, - { - "code": "def __getJoin(self, web):\n join = web.findAll(\"a\", {\"class\": \"dropdown-item\"})\n for j in join:\n try:\n if \"Joined GitHub\" in j.text:\n self.join = j[\"href\"][-10:]\n except IndexError as error:\n print(\"There was an error with the user \" + self.name)\n print(error)\n except AttributeError as error:\n print(\"There was an error with the user \" + self.name)\n print(error)", - "docstring": "Scrap the join date from a GitHub profile.\n\n :param web: parsed web.\n :type web: BeautifulSoup node." - }, - { - "code": "def render(self, **context):\n\t\tlocalns = self.envs.copy()\n\t\tlocalns.update(context)\n\t\ttry:\n\t\t\texec(str(self.code), None, localns)\n\t\t\treturn localns[Liquid.COMPLIED_RENDERED_STR]\n\t\texcept Exception:\n\t\t\tstacks = list(reversed(traceback.format_exc().splitlines()))\n\t\t\tfor stack in stacks:\n\t\t\t\tstack = stack.strip()\n\t\t\t\tif stack.startswith('File \"\"'):\n\t\t\t\t\tlineno = int(stack.split(', ')[1].split()[-1])\n\t\t\t\t\tsource = []\n\t\t\t\t\tif 'NameError:' in stacks[0]:\n\t\t\t\t\t\tsource.append('Do you forget to provide the data?')\n\t\t\t\t\timport math\n\t\t\t\t\tsource.append('\\nCompiled source (use debug mode to see full source):')\n\t\t\t\t\tsource.append('---------------------------------------------------')\n\t\t\t\t\tnlines = len(self.code.codes)\n\t\t\t\t\tnbit = int(math.log(nlines, 10)) + 3\n\t\t\t\t\tfor i, line in enumerate(self.code.codes):\n\t\t\t\t\t\tif i - 7 > lineno or i + 9 < lineno: continue\n\t\t\t\t\t\tif i + 1 != lineno:\n\t\t\t\t\t\t\tsource.append(' ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip())\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsource.append('* ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip())\n\t\t\t\t\traise LiquidRenderError(\n\t\t\t\t\t\tstacks[0], \n\t\t\t\t\t\trepr(self.code.codes[lineno - 1]) + \n\t\t\t\t\t\t'\\n' + '\\n'.join(source) + \n\t\t\t\t\t\t'\\n\\nPREVIOUS EXCEPTION:\\n------------------\\n' + \n\t\t\t\t\t\t'\\n'.join(stacks) + '\\n' +\n\t\t\t\t\t\t'\\nCONTEXT:\\n------------------\\n' +\n\t\t\t\t\t\t'\\n'.join(\n\t\t\t\t\t\t\t' ' + key + ': ' + str(val) \n\t\t\t\t\t\t\tfor key, val in localns.items() if not key.startswith('_liquid_') and not key.startswith('__')\n\t\t\t\t\t\t) + '\\n'\n\t\t\t\t\t)\n\t\t\traise", - "docstring": "Render this template by applying it to `context`.\n\t\t@params:\n\t\t\t`context`: a dictionary of values to use in this rendering.\n\t\t@returns:\n\t\t\tThe rendered string" - }, - { - "code": "def topology_mdtraj(traj):\n import mdtraj as md\n top = {}\n top['atom_types'] = [a.element.symbol for a in traj.topology.atoms]\n top['atom_names'] = [a.name for a in traj.topology.atoms]\n top['bonds'] = [(a.index, b.index) for a, b in traj.topology.bonds]\n top['secondary_structure'] = md.compute_dssp(traj[0])[0]\n top['residue_types'] = [r.name for r in traj.topology.residues ]\n top['residue_indices'] = [ [a.index for a in r.atoms] for r in traj.topology.residues ]\n return top", - "docstring": "Generate topology spec for the MolecularViewer from mdtraj.\n\n :param mdtraj.Trajectory traj: the trajectory\n :return: A chemview-compatible dictionary corresponding to the topology defined in mdtraj." - }, - { - "code": "def count_leases_by_owner(self, leases):\n owners = [l.owner for l in leases]\n return dict(Counter(owners))", - "docstring": "Returns a dictionary of leases by current owner." - }, - { - "code": "def mutate_rows(self, rows, retry=DEFAULT_RETRY):\n retryable_mutate_rows = _RetryableMutateRowsWorker(\n self._instance._client,\n self.name,\n rows,\n app_profile_id=self._app_profile_id,\n timeout=self.mutation_timeout,\n )\n return retryable_mutate_rows(retry=retry)", - "docstring": "Mutates multiple rows in bulk.\n\n For example:\n\n .. literalinclude:: snippets_table.py\n :start-after: [START bigtable_mutate_rows]\n :end-before: [END bigtable_mutate_rows]\n\n The method tries to update all specified rows.\n If some of the rows weren't updated, it would not remove mutations.\n They can be applied to the row separately.\n If row mutations finished successfully, they would be cleaned up.\n\n Optionally, a ``retry`` strategy can be specified to re-attempt\n mutations on rows that return transient errors. This method will retry\n until all rows succeed or until the request deadline is reached. To\n specify a ``retry`` strategy of \"do-nothing\", a deadline of ``0.0``\n can be specified.\n\n :type rows: list\n :param rows: List or other iterable of :class:`.DirectRow` instances.\n\n :type retry: :class:`~google.api_core.retry.Retry`\n :param retry:\n (Optional) Retry delay and deadline arguments. To override, the\n default value :attr:`DEFAULT_RETRY` can be used and modified with\n the :meth:`~google.api_core.retry.Retry.with_delay` method or the\n :meth:`~google.api_core.retry.Retry.with_deadline` method.\n\n :rtype: list\n :returns: A list of response statuses (`google.rpc.status_pb2.Status`)\n corresponding to success or failure of each row mutation\n sent. These will be in the same order as the `rows`." - }, - { - "code": "def make_node(cls, node, *args):\n if node is None:\n node = cls()\n assert isinstance(node, SymbolARGUMENT) or isinstance(node, cls)\n if not isinstance(node, cls):\n return cls.make_node(None, node, *args)\n for arg in args:\n assert isinstance(arg, SymbolARGUMENT)\n node.appendChild(arg)\n return node", - "docstring": "This will return a node with an argument_list." - }, - { - "code": "def graph_to_gluon(self, graph, ctx):\n sym, arg_params, aux_params = self.from_onnx(graph)\n metadata = self.get_graph_metadata(graph)\n data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']]\n data_inputs = [symbol.var(data_name) for data_name in data_names]\n from ....gluon import SymbolBlock\n net = SymbolBlock(outputs=sym, inputs=data_inputs)\n net_params = net.collect_params()\n for param in arg_params:\n if param in net_params:\n net_params[param].shape = arg_params[param].shape\n net_params[param]._load_init(arg_params[param], ctx=ctx)\n for param in aux_params:\n if param in net_params:\n net_params[param].shape = aux_params[param].shape\n net_params[param]._load_init(aux_params[param], ctx=ctx)\n return net", - "docstring": "Construct SymbolBlock from onnx graph.\n\n Parameters\n ----------\n graph : onnx protobuf object\n The loaded onnx graph\n ctx : Context or list of Context\n Loads the model into one or many context(s).\n\n Returns\n -------\n sym_block :gluon.nn.SymbolBlock\n The returned gluon SymbolBlock" - }, - { - "code": "def get_assets_by_record_type(self, asset_record_type=None):\n return AssetList(self._provider_session.get_assets_by_record_type(asset_record_type),\n self._config_map)", - "docstring": "Gets an ``AssetList`` containing the given asset record ``Type``.\n\n In plenary mode, the returned list contains all known assets or\n an error results. Otherwise, the returned list may contain only\n those assets that are accessible through this session.\n\n arg: asset_record_type (osid.type.Type): an asset record type\n return: (osid.repository.AssetList) - the returned ``Asset\n list``\n raise: NullArgument - ``asset_record_type`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def retry(*dargs, **dkw):\n if len(dargs) == 1 and callable(dargs[0]):\n return retry()(dargs[0])\n else:\n def wrap(f):\n if asyncio and asyncio.iscoroutinefunction(f):\n r = AsyncRetrying(*dargs, **dkw)\n elif tornado and hasattr(tornado.gen, 'is_coroutine_function') \\\n and tornado.gen.is_coroutine_function(f):\n r = TornadoRetrying(*dargs, **dkw)\n else:\n r = Retrying(*dargs, **dkw)\n return r.wraps(f)\n return wrap", - "docstring": "Wrap a function with a new `Retrying` object.\n\n :param dargs: positional arguments passed to Retrying object\n :param dkw: keyword arguments passed to the Retrying object" - }, - { - "code": "def find_nested(self):\n result = {}\n def add_if_nested():\n if parent.contains_interval(child):\n if parent not in result:\n result[parent] = set()\n result[parent].add(child)\n long_ivs = sorted(self.all_intervals, key=Interval.length, reverse=True)\n for i, parent in enumerate(long_ivs):\n for child in long_ivs[i + 1:]:\n add_if_nested()\n return result", - "docstring": "Returns a dictionary mapping parent intervals to sets of\n intervals overlapped by and contained in the parent.\n\n Completes in O(n^2) time.\n :rtype: dict of [Interval, set of Interval]" - }, - { - "code": "def as_security_error(node, secid):\n assert node.Name == 'securityError'\n src = XmlHelper.get_child_value(node, 'source')\n code = XmlHelper.get_child_value(node, 'code')\n cat = XmlHelper.get_child_value(node, 'category')\n msg = XmlHelper.get_child_value(node, 'message')\n subcat = XmlHelper.get_child_value(node, 'subcategory')\n return SecurityError(security=secid, source=src, code=code, category=cat, message=msg, subcategory=subcat)", - "docstring": "convert the securityError element to a SecurityError" - }, - { - "code": "def traverse(self, attr_name = None, attr_value = None):\n for sprite in self.sprites:\n if (attr_name is None) or \\\n (attr_value is None and hasattr(sprite, attr_name)) or \\\n (attr_value is not None and getattr(sprite, attr_name, None) == attr_value):\n yield sprite\n for child in sprite.traverse(attr_name, attr_value):\n yield child", - "docstring": "traverse the whole sprite tree and return child sprites which have the\n attribute and it's set to the specified value.\n If falue is None, will return all sprites that have the attribute" - }, - { - "code": "def stop_app(app_id, is_finished=False):\n state = constant.STATE_APP_STOPPED if is_finished else constant.STATE_APP_STOPPING\n app_update_state(app_id, state)", - "docstring": "update app state to 'Stopped'" - }, - { - "code": "def askInitial():\n return inquirer.prompt([\n inquirer.Text(\n 'inputPath', message=\"What's the path of your input file (eg input.csv)\"),\n inquirer.List(\n 'year',\n message=\"What year are you in\",\n choices=[1, 2, 3, 4]\n ),\n inquirer.Checkbox(\n 'whatToDo',\n message=\"What can I do for you (select with your spacebar)\",\n choices=[\n \"Get your weighted average\",\n \"Get your rank in the year\",\n \"Reformat results by module and output to csv\",\n \"Plot the results by module\"\n ]),\n ])", - "docstring": "Asks the user for what it wants the script to do\n\n Returns:\n [dictionary] -- answers to the questions" - }, - { - "code": "def get_optimization_coordinates(self):\n coor_array = self.fields.get(\"Opt point 1 Geometries\")\n if coor_array is None:\n return []\n else:\n return np.reshape(coor_array, (-1, len(self.molecule.numbers), 3))", - "docstring": "Return the coordinates of the geometries at each point in the optimization" - }, - { - "code": "def unary_operator(op):\n valid_ops = {'~'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n def unary_operator(self):\n if isinstance(self, NumericalExpression):\n return NumExprFilter.create(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFilter.create(\"{op}x_0\".format(op=op), (self,))\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator", - "docstring": "Factory function for making unary operator methods for Filters." - }, - { - "code": "def _correlation_normalization(self, corr):\n time1 = time.time()\n (sv, e, av) = corr.shape\n for i in range(sv):\n start = 0\n while start < e:\n cur_val = corr[i, start: start + self.epochs_per_subj, :]\n cur_val = .5 * np.log((cur_val + 1) / (1 - cur_val))\n corr[i, start: start + self.epochs_per_subj, :] = \\\n zscore(cur_val, axis=0, ddof=0)\n start += self.epochs_per_subj\n corr = np.nan_to_num(corr)\n time2 = time.time()\n logger.debug(\n 'within-subject normalization for %d voxels '\n 'using numpy zscore function, takes %.2f s' %\n (sv, (time2 - time1))\n )\n return corr", - "docstring": "Do within-subject normalization.\n\n This method uses scipy.zscore to normalize the data,\n but is much slower than its C++ counterpart.\n It is doing in-place z-score.\n\n Parameters\n ----------\n corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]\n the correlation values of all subjects in all epochs\n for the assigned values, in row-major\n\n Returns\n -------\n corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]\n the normalized correlation values of all subjects in all epochs\n for the assigned values, in row-major" - }, - { - "code": "def list_by_ids(self, ids):\n ids = utils.coerce_to_list(ids)\n uri = \"/%s?ids=%s\" % (self.uri_base, \",\".join(ids))\n curr_prkey = self.plural_response_key\n self.plural_response_key = \"\"\n ret = self._list(uri)\n self.plural_response_key = curr_prkey\n return ret", - "docstring": "If you wish to retrieve a list of messages from this queue and know the\n IDs of those messages, you can pass in a list of those IDs, and only\n the matching messages will be returned. This avoids pulling down all\n the messages in a queue and filtering on the client side." - }, - { - "code": "def decode_async_options(options):\n async_options = copy.deepcopy(options)\n eta = async_options.get('task_args', {}).get('eta')\n if eta:\n from datetime import datetime\n async_options['task_args']['eta'] = datetime.fromtimestamp(eta)\n callbacks = async_options.get('callbacks', {})\n if callbacks:\n async_options['callbacks'] = decode_callbacks(callbacks)\n if '__context_checker' in options:\n _checker = options['__context_checker']\n async_options['_context_checker'] = path_to_reference(_checker)\n if '__process_results' in options:\n _processor = options['__process_results']\n async_options['_process_results'] = path_to_reference(_processor)\n return async_options", - "docstring": "Decode Async options from JSON decoding." - }, - { - "code": "def discard_queue_messages(self):\n\t\tzmq_stream_queue = self.handler().stream()._send_queue\n\t\twhile not zmq_stream_queue.empty():\n\t\t\ttry:\n\t\t\t\tzmq_stream_queue.get(False)\n\t\t\texcept queue.Empty:\n\t\t\t\tcontinue\n\t\t\tzmq_stream_queue.task_done()", - "docstring": "Sometimes it is necessary to drop undelivered messages. These messages may be stored in different\n\t\tcaches, for example in a zmq socket queue. With different zmq flags we can tweak zmq sockets and\n\t\tcontexts no to keep those messages. But inside ZMQStream class there is a queue that can not be\n\t\tcleaned other way then the way it does in this method. So yes, it is dirty to access protected\n\t\tmembers, and yes it can be broken at any moment. And yes without correct locking procedure there\n\t\tis a possibility of unpredicted behaviour. But still - there is no other way to drop undelivered\n\t\tmessages\n\n\t\tDiscussion of the problem: https://github.com/zeromq/pyzmq/issues/1095\n\n\t\t:return: None" - }, - { - "code": "def find_instance_and_eni_by_ip(vpc_info, ip):\n for instance in vpc_info['instances']:\n for eni in instance.interfaces:\n for pa in eni.private_ip_addresses:\n if pa.private_ip_address == ip:\n return instance, eni\n raise VpcRouteSetError(\"Could not find instance/eni for '%s' \"\n \"in VPC '%s'.\" % (ip, vpc_info['vpc'].id))", - "docstring": "Given a specific IP address, find the EC2 instance and ENI.\n\n We need this information for setting the route.\n\n Returns instance and emi in a tuple." - }, - { - "code": "def identify(self, req, resp, resource, uri_kwargs):\n try:\n return req.get_header('X-Api-Key', True)\n except (KeyError, HTTPMissingHeader):\n pass", - "docstring": "Initialize X-Api-Key authentication middleware." - }, - { - "code": "def aesthetics(cls):\n main = cls.DEFAULT_AES.keys() | cls.REQUIRED_AES\n other = {'group'}\n if 'color' in main:\n other.add('colour')\n if 'outlier_color' in main:\n other.add('outlier_colour')\n return main | other", - "docstring": "Return all the aesthetics for this geom\n\n geoms should not override this method." - }, - { - "code": "def _plain_authentication(self, login, password, authz_id=b\"\"):\n if isinstance(login, six.text_type):\n login = login.encode(\"utf-8\")\n if isinstance(password, six.text_type):\n password = password.encode(\"utf-8\")\n params = base64.b64encode(b'\\0'.join([authz_id, login, password]))\n code, data = self.__send_command(\"AUTHENTICATE\", [b\"PLAIN\", params])\n if code == \"OK\":\n return True\n return False", - "docstring": "SASL PLAIN authentication\n\n :param login: username\n :param password: clear password\n :return: True on success, False otherwise." - }, - { - "code": "def declareWorker(self, *args, **kwargs):\n return self._makeApiCall(self.funcinfo[\"declareWorker\"], *args, **kwargs)", - "docstring": "Declare a worker\n\n Declare a worker, supplying some details about it.\n\n `declareWorker` allows updating one or more properties of a worker as long as the required scopes are\n possessed.\n\n This method takes input: ``v1/update-worker-request.json#``\n\n This method gives output: ``v1/worker-response.json#``\n\n This method is ``experimental``" - }, - { - "code": "def convert_entry_to_path(path):\n if not isinstance(path, Mapping):\n raise TypeError(\"expecting a mapping, received {0!r}\".format(path))\n if not any(key in path for key in [\"file\", \"path\"]):\n raise ValueError(\"missing path-like entry in supplied mapping {0!r}\".format(path))\n if \"file\" in path:\n path = vistir.path.url_to_path(path[\"file\"])\n elif \"path\" in path:\n path = path[\"path\"]\n return path", - "docstring": "Convert a pipfile entry to a string" - }, - { - "code": "def get(self, source_id=None, profile_id=None, profile_reference=None):\n query_params = {}\n query_params[\"source_id\"] = _validate_source_id(source_id)\n if profile_id:\n query_params[\"profile_id\"] = _validate_profile_id(profile_id)\n if profile_reference:\n query_params[\"profile_reference\"] = _validate_profile_reference(profile_reference)\n response = self.client.get('profile', query_params)\n return response.json()", - "docstring": "Retrieve the profile information associated with profile id.\n\n Args:\n source_id: \n source id\n profile_id: \n profile id\n\n Returns\n profile information" - }, - { - "code": "def resolve_url(url, desktop_user_agent=None, mobile_user_agent=None):\n if not desktop_user_agent:\n desktop_user_agent = DESKTOP_USER_AGENT\n if not mobile_user_agent:\n mobile_user_agent = MOBILE_USER_AGENT\n input_urls = set()\n parsed = urlparse(url_with_protocol(url))\n netloc = parsed.netloc\n if netloc.startswith('www.'):\n netloc = netloc[4:]\n input_urls.add('http://%s%s' % (netloc, parsed.path if parsed.path else '/'))\n input_urls.add('http://www.%s%s' % (netloc, parsed.path if parsed.path else '/'))\n resolved_urls = set()\n for input_url in input_urls:\n desktop_request = requests.get(input_url, headers={'User-Agent': desktop_user_agent})\n resolved_urls.add(desktop_request.url)\n mobile_request = requests.get(input_url, headers={'User-Agent': mobile_user_agent})\n resolved_urls.add(mobile_request.url)\n return list(resolved_urls)", - "docstring": "Url Resolver\n Given a url a list of resolved urls is returned for desktop and mobile user agents" - }, - { - "code": "def basic_consume(self, queue='', consumer_tag='', no_local=False,\n no_ack=False, exclusive=False, nowait=False,\n callback=None, arguments=None, on_cancel=None):\n args = AMQPWriter()\n args.write_short(0)\n args.write_shortstr(queue)\n args.write_shortstr(consumer_tag)\n args.write_bit(no_local)\n args.write_bit(no_ack)\n args.write_bit(exclusive)\n args.write_bit(nowait)\n args.write_table(arguments or {})\n self._send_method((60, 20), args)\n if not nowait:\n consumer_tag = self.wait(allowed_methods=[\n (60, 21),\n ])\n self.callbacks[consumer_tag] = callback\n if on_cancel:\n self.cancel_callbacks[consumer_tag] = on_cancel\n if no_ack:\n self.no_ack_consumers.add(consumer_tag)\n return consumer_tag", - "docstring": "Start a queue consumer\n\n This method asks the server to start a \"consumer\", which is a\n transient request for messages from a specific queue.\n Consumers last as long as the channel they were created on, or\n until the client cancels them.\n\n RULE:\n\n The server SHOULD support at least 16 consumers per queue,\n unless the queue was declared as private, and ideally,\n impose no limit except as defined by available resources.\n\n PARAMETERS:\n queue: shortstr\n\n Specifies the name of the queue to consume from. If\n the queue name is null, refers to the current queue\n for the channel, which is the last declared queue.\n\n RULE:\n\n If the client did not previously declare a queue,\n and the queue name in this method is empty, the\n server MUST raise a connection exception with\n reply code 530 (not allowed).\n\n consumer_tag: shortstr\n\n Specifies the identifier for the consumer. The\n consumer tag is local to a connection, so two clients\n can use the same consumer tags. If this field is empty\n the server will generate a unique tag.\n\n RULE:\n\n The tag MUST NOT refer to an existing consumer. If\n the client attempts to create two consumers with\n the same non-empty tag the server MUST raise a\n connection exception with reply code 530 (not\n allowed).\n\n no_local: boolean\n\n do not deliver own messages\n\n If the no-local field is set the server will not send\n messages to the client that published them.\n\n no_ack: boolean\n\n no acknowledgement needed\n\n If this field is set the server does not expect\n acknowledgments for messages. That is, when a message\n is delivered to the client the server automatically and\n silently acknowledges it on behalf of the client. This\n functionality increases performance but at the cost of\n reliability. Messages can get lost if a client dies\n before it can deliver them to the application.\n\n exclusive: boolean\n\n request exclusive access\n\n Request exclusive consumer access, meaning only this\n consumer can access the queue.\n\n RULE:\n\n If the server cannot grant exclusive access to the\n queue when asked, - because there are other\n consumers active - it MUST raise a channel\n exception with return code 403 (access refused).\n\n nowait: boolean\n\n do not send a reply method\n\n If set, the server will not respond to the method. The\n client should not wait for a reply method. If the\n server could not complete the method it will raise a\n channel or connection exception.\n\n callback: Python callable\n\n function/method called with each delivered message\n\n For each message delivered by the broker, the\n callable will be called with a Message object\n as the single argument. If no callable is specified,\n messages are quietly discarded, no_ack should probably\n be set to True in that case." - }, - { - "code": "def guess_server_name():\n if os.environ.get('CSCSERVICE') == 'sisu': return \"sisu\"\n elif os.environ.get('SLURM_JOB_PARTITION') == 'halvan': return \"halvan\"\n elif os.environ.get('SNIC_RESOURCE') == 'milou': return \"milou\"\n elif os.environ.get('LAPTOP') == 'macbook_air': return \"macbook_air\"\n else: return \"unknown\"", - "docstring": "We often use the same servers, which one are we running on now ?" - }, - { - "code": "def get_phonopy_structure(pmg_structure):\n symbols = [site.specie.symbol for site in pmg_structure]\n return PhonopyAtoms(symbols=symbols, cell=pmg_structure.lattice.matrix,\n scaled_positions=pmg_structure.frac_coords)", - "docstring": "Convert a pymatgen Structure object to a PhonopyAtoms object.\n\n Args:\n pmg_structure (pymatgen Structure): A Pymatgen structure object." - }, - { - "code": "def lengths( self ):\n return( np.array( [ math.sqrt( sum( row**2 ) ) for row in self.matrix ] ) )", - "docstring": "The cell lengths.\n\n Args:\n None\n\n Returns:\n (np.array(a,b,c)): The cell lengths." - }, - { - "code": "def load_trajs(fn, meta='meta.pandas.pickl', key_to_path=None):\n if key_to_path is None:\n key_to_path = default_key_to_path\n if isinstance(meta, str):\n meta = load_meta(meta_fn=meta)\n trajs = {}\n for k in meta.index:\n trajs[k] = np.load(os.path.join(fn, key_to_path(k)))\n return meta, trajs", - "docstring": "Load trajectory-like data\n\n Data is expected to be stored as if saved by ``save_trajs``.\n\n This method finds trajectories based on the ``meta`` dataframe.\n If you remove a file (trajectory) from disk, be sure to remove\n its row from the dataframe. If you remove a row from the dataframe,\n be aware that that trajectory (file) will not be loaded, even if\n it exists on disk.\n\n Parameters\n ----------\n fn : str\n Where the data is saved. This should be a directory containing\n one file per trajectory.\n meta : pd.DataFrame or str\n The DataFrame of metadata. If this is a string, it is interpreted\n as a filename and the dataframe is loaded from disk.\n\n Returns\n -------\n meta : pd.DataFrame\n The DataFrame of metadata. If you passed in a string (filename)\n to the ``meta`` input, this will be the loaded DataFrame. If\n you gave a DataFrame object, this will just be a reference back\n to that object\n trajs : dict\n Dictionary of trajectory-like np.ndarray's keyed on the values\n of ``meta.index``." - }, - { - "code": "def from_file(cls, file_path, validate=True):\n return xmlmap.load_xmlobject_from_file(file_path, xmlclass=cls, validate=validate)", - "docstring": "Creates a Python object from a XML file\n\n :param file_path: Path to the XML file\n :param validate: XML should be validated against the embedded XSD definition\n :type validate: Boolean\n :returns: the Python object" - }, - { - "code": "def complete_config(config):\n if not config.has_section('run'):\n config.add_section('run')\n values = {\n 'basedir': os.getcwd(),\n 'task_control': 'control.yaml',\n }\n for k, v in values.items():\n if not config.has_option('run', k):\n config.set('run', k, v)\n return config", - "docstring": "Complete config with default values" - }, - { - "code": "def getValueByName(node, name):\n try:\n value = node.xpath(\"*[local-name() = '%s']\" % name)[0].text.strip()\n except:\n return None\n return value", - "docstring": "A helper function to pull the values out of those annoying namespace\n prefixed tags" - }, - { - "code": "def make_transaction(self):\n if self.pk:\n raise CannotRecreateTransactionOnRecurredCost(\n 'The transaction for this recurred cost has already been created. You cannot create it again.'\n )\n amount = self.recurring_cost.get_amount(self.billing_cycle)\n if not amount:\n return None\n self.transaction = Transaction.objects.create(\n description='Created by recurring cost',\n date=self.billing_cycle.date_range.lower\n )\n splits = self.recurring_cost.splits.all().split(amount)\n self.transaction.legs.add(Leg.objects.create(\n transaction=self.transaction,\n amount=Money(amount, self.recurring_cost.currency),\n account=self.recurring_cost.to_account,\n ))\n for split, split_amount in splits:\n if split_amount:\n self.transaction.legs.add(Leg.objects.create(\n transaction=self.transaction,\n amount=Money(split_amount * -1, self.recurring_cost.currency),\n account=split.from_account,\n ))\n return self.transaction", - "docstring": "Create the transaction for this RecurredCost\n\n May only be used to create the RecurredCost's initial transaction.\n\n Returns:\n Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero." - }, - { - "code": "def total_cost_function(self, item_a, item_b, time_a, time_b):\n distances = np.zeros(len(self.weights))\n for c, component in enumerate(self.cost_function_components):\n distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])\n total_distance = np.sum(self.weights * distances)\n return total_distance", - "docstring": "Calculate total cost function between two items.\n\n Args:\n item_a: STObject\n item_b: STObject\n time_a: Timestep in item_a at which cost function is evaluated\n time_b: Timestep in item_b at which cost function is evaluated\n\n Returns:\n The total weighted distance between item_a and item_b" - }, - { - "code": "def strtype(self):\n if self.kind is not None:\n return \"{}({})\".format(self.dtype, self.kind)\n else:\n return self.dtype", - "docstring": "Returns a string representing the type and kind of this value element." - }, - { - "code": "def storage_items(self):\n if not self._module:\n return {}.items()\n self._storage_init()\n items = []\n module_name = self._module.module_full_name\n for key in self._storage.storage_keys(module_name):\n value = self._storage.storage_get(module_name, key)\n items.add((key, value))\n return items", - "docstring": "Return key, value pairs of the stored data for the module.\n\n Keys will contain the following metadata entries:\n - '_ctime': storage creation timestamp\n - '_mtime': storage last modification timestamp" - }, - { - "code": "def qteStartRecordingHook(self, msgObj):\n if self.qteRecording:\n self.qteMain.qteStatus('Macro recording already enabled')\n return\n self.qteRecording = True\n self.qteMain.qteStatus('Macro recording started')\n self.recorded_keysequence = QtmacsKeysequence()\n self.qteMain.qtesigKeyparsed.connect(self.qteKeyPress)\n self.qteMain.qtesigAbort.connect(self.qteStopRecordingHook)", - "docstring": "Commence macro recording.\n\n Macros are recorded by connecting to the 'keypressed' signal\n it emits.\n\n If the recording has already commenced, or if this method was\n called during a macro replay, then return immediately." - }, - { - "code": "def fingerprint(self):\n hasher = sha1()\n hasher.update(self.stable_name().encode('utf-8'))\n hasher.update(self._options_fingerprint(self.options_scope).encode('utf-8'))\n hasher.update(self.implementation_version_str().encode('utf-8'))\n for dep in self.subsystem_closure_iter():\n hasher.update(self._options_fingerprint(dep.options_scope).encode('utf-8'))\n return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8')", - "docstring": "Returns a fingerprint for the identity of the task.\n\n A task fingerprint is composed of the options the task is currently running under.\n Useful for invalidating unchanging targets being executed beneath changing task\n options that affect outputted artifacts.\n\n A task's fingerprint is only valid after the task has been fully initialized." - }, - { - "code": "def extrude(self, uem, reference, collar=0.0, skip_overlap=False):\n if collar == 0. and not skip_overlap:\n return uem\n collars, overlap_regions = [], []\n if collar > 0.:\n for segment in reference.itersegments():\n t = segment.start\n collars.append(Segment(t - .5 * collar, t + .5 * collar))\n t = segment.end\n collars.append(Segment(t - .5 * collar, t + .5 * collar))\n if skip_overlap:\n for (segment1, track1), (segment2, track2) in reference.co_iter(reference):\n if segment1 == segment2 and track1 == track2:\n continue\n overlap_regions.append(segment1 & segment2)\n segments = collars + overlap_regions\n return Timeline(segments=segments).support().gaps(support=uem)", - "docstring": "Extrude reference boundary collars from uem\n\n reference |----| |--------------| |-------------|\n uem |---------------------| |-------------------------------|\n extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|\n\n Parameters\n ----------\n uem : Timeline\n Evaluation map.\n reference : Annotation\n Reference annotation.\n collar : float, optional\n When provided, set the duration of collars centered around\n reference segment boundaries that are extruded from both reference\n and hypothesis. Defaults to 0. (i.e. no collar).\n skip_overlap : bool, optional\n Set to True to not evaluate overlap regions.\n Defaults to False (i.e. keep overlap regions).\n\n Returns\n -------\n extruded_uem : Timeline" - }, - { - "code": "def do_join(self, cmdargs, nick, msgtype, send, c):\n if not cmdargs:\n send(\"Join what?\")\n return\n if cmdargs == '0':\n send(\"I'm sorry, Dave. I'm afraid I can't do that.\")\n return\n if not cmdargs.startswith(('\n cmdargs = '\n cmd = cmdargs.split()\n if cmd[0] in self.channels and not (len(cmd) > 1 and cmd[1] == \"force\"):\n send(\"%s is already a member of %s\" % (self.config['core']['nick'], cmd[0]))\n return\n c.join(cmd[0])\n self.send(cmd[0], nick, \"Joined at the request of \" + nick, msgtype)", - "docstring": "Join a channel.\n\n | Checks if bot is already joined to channel." - }, - { - "code": "def pascal_row(n):\n result = [1]\n x, numerator = 1, n\n for denominator in range(1, n // 2 + 1):\n x *= numerator\n x /= denominator\n result.append(x)\n numerator -= 1\n if n & 1 == 0:\n result.extend(reversed(result[:-1]))\n else:\n result.extend(reversed(result))\n return result", - "docstring": "Returns n-th row of Pascal's triangle" - }, - { - "code": "def zoom_in(self):\n if self._scalefactor <= self._sfmax:\n self._scalefactor += 1\n self.scale_image()\n self._adjust_scrollbar(self._scalestep)\n self.sig_zoom_changed.emit(self.get_scaling())", - "docstring": "Scale the image up by one scale step." - }, - { - "code": "def fetch_csv(self,\n url,\n pre_func=None,\n post_func=None,\n date_column='date',\n date_format=None,\n timezone=pytz.utc.zone,\n symbol=None,\n mask=True,\n symbol_column=None,\n special_params_checker=None,\n country_code=None,\n **kwargs):\n if country_code is None:\n country_code = self.default_fetch_csv_country_code(\n self.trading_calendar,\n )\n csv_data_source = PandasRequestsCSV(\n url,\n pre_func,\n post_func,\n self.asset_finder,\n self.trading_calendar.day,\n self.sim_params.start_session,\n self.sim_params.end_session,\n date_column,\n date_format,\n timezone,\n symbol,\n mask,\n symbol_column,\n data_frequency=self.data_frequency,\n country_code=country_code,\n special_params_checker=special_params_checker,\n **kwargs\n )\n self.data_portal.handle_extra_source(csv_data_source.df,\n self.sim_params)\n return csv_data_source", - "docstring": "Fetch a csv from a remote url and register the data so that it is\n queryable from the ``data`` object.\n\n Parameters\n ----------\n url : str\n The url of the csv file to load.\n pre_func : callable[pd.DataFrame -> pd.DataFrame], optional\n A callback to allow preprocessing the raw data returned from\n fetch_csv before dates are paresed or symbols are mapped.\n post_func : callable[pd.DataFrame -> pd.DataFrame], optional\n A callback to allow postprocessing of the data after dates and\n symbols have been mapped.\n date_column : str, optional\n The name of the column in the preprocessed dataframe containing\n datetime information to map the data.\n date_format : str, optional\n The format of the dates in the ``date_column``. If not provided\n ``fetch_csv`` will attempt to infer the format. For information\n about the format of this string, see :func:`pandas.read_csv`.\n timezone : tzinfo or str, optional\n The timezone for the datetime in the ``date_column``.\n symbol : str, optional\n If the data is about a new asset or index then this string will\n be the name used to identify the values in ``data``. For example,\n one may use ``fetch_csv`` to load data for VIX, then this field\n could be the string ``'VIX'``.\n mask : bool, optional\n Drop any rows which cannot be symbol mapped.\n symbol_column : str\n If the data is attaching some new attribute to each asset then this\n argument is the name of the column in the preprocessed dataframe\n containing the symbols. This will be used along with the date\n information to map the sids in the asset finder.\n country_code : str, optional\n Country code to use to disambiguate symbol lookups.\n **kwargs\n Forwarded to :func:`pandas.read_csv`.\n\n Returns\n -------\n csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV\n A requests source that will pull data from the url specified." - }, - { - "code": "def jsbuild_prompt():\n print(BOKEHJS_BUILD_PROMPT)\n mapping = {\"1\": True, \"2\": False}\n value = input(\"Choice? \")\n while value not in mapping:\n print(\"Input '%s' not understood. Valid choices: 1, 2\\n\" % value)\n value = input(\"Choice? \")\n return mapping[value]", - "docstring": "Prompt users whether to build a new BokehJS or install an existing one.\n\n Returns:\n bool : True, if a new build is requested, False otherwise" - }, - { - "code": "def get_connection(self, provider_name='default'):\n try:\n return self._providers[provider_name].get_connection()\n except KeyError:\n raise AssertionError(f'No Provider registered with name {provider_name}')", - "docstring": "Fetch connection from Provider" - }, - { - "code": "def post_process_images(self, doctree):\n super(AbstractSlideBuilder, self).post_process_images(doctree)\n relative_base = (\n ['..'] *\n doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')\n )\n for node in doctree.traverse(nodes.image):\n if node.get('candidates') is None:\n node['candidates'] = ('*',)\n if node['uri'].startswith(self.outdir):\n node['uri'] = '/'.join(\n relative_base + [\n node['uri'][len(self.outdir) + 1:]\n ]\n )", - "docstring": "Pick the best candidate for all image URIs." - }, - { - "code": "def entropy(p, w):\n if w is None:\n w = np.ones(len(p))\n h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])\n h_max = math.log(np.sum(w))\n return h_p, h_max", - "docstring": "Computes the entropy for a discrete probability distribution function, as\n represented by a histogram, `p`, with bin sizes `w`,\n\n h_p = Sum -1 * p_i * ln(p_i / w_i)\n\n Also computes the maximum allowed entropy for a histogram with bin sizes `w`.\n\n h_max = ln( Sum w_i )\n\n and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.\n\n Both `p` and `w` must be Numpy arrays.\n\n If `p` is normalized to 1 ( Sum p_i * w_i = 1), then\n the normalized entropy is equal toh_p / h_max and will\n be in the range [0, 1].\n\n For example, if `p` is a completely flat PDF (a uniform distribution), then\n the normalized entropy will equal 1, indicating maximum amount of disorder.\n (This is easily shown for the case where w_i = 1.)\n\n If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,\n indicating no disorder.\n\n One can use this entropy measurement to search for signals in the spectrogram.\n First we need to build a histogram of the measured power values in the spectrogram.\n This histogram represents an estimate of the probability distribution function of the\n observed power in the spectrogram.\n\n If the spectrogram is entirely noise, the resulting histogram should be quite flat and\n the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal\n in the spectrogram, then the histogram will not be flat and the normalized entropy will\n be less than 1.\n\n The decision that needs to be made is the number of bins and the bin size. And unfortunately,\n the resulting entropy calculated will depend on the binning.\n\n Based on testing and interpretibility, we recommend to use a fixed number of bins that either\n span the full range of the power values in the spectrogram (0 to spectrogram.max()),\n or span a fixed range (for example, from 0 to 500).\n\n For example, you may set the range equal to the range of the values in the spectrogram.\n\n bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.\n p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)\n w = np.diff(bin_edges)\n h_p, h_max = ibmseti.features.entropy(p,w)\n\n If you choose to fix the range of the histogram, it is highly recommended that you use\n `numpy.clip` to ensure that any of the values in the spectrogram that are greater than\n your largest bin are not thrown away!\n\n For example, if you decide on a fixed range between 0 and 500, and your spectrogram\n contains a value of 777, the following code would produce a histogram where that 777 value\n is not present in the count.\n\n bin_edges = range(0,501)\n p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)\n w = np.diff(bin_edges)\n h_p, h_max = ibmseti.features.entropy(p,w)\n\n But if you clip the spectrogram, you can interpret the last bin as being \"the number\n of spectrogram values equal to or greater than the lower bin edge\".\n\n bin_edges = range(0,501)\n p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)\n w = np.diff(bin_edges)\n h_p, h_max = ibmseti.features.entropy(p,w)\n\n You can also choose to fix the number of bins\n\n bins = 50\n p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)\n w = np.diff(bin_edges)\n h_p, h_max = ibmseti.features.entropy(p,w)\n\n It is suggested to use any of the following measures as features:\n\n bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)\n entropy, max_entropy, normalized_entropy.\n\n Automatic Binning:\n\n While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this\n is a good approach for entropy calculation -- especially when wishing to compare the value\n across different spectrogram. The automatic binning tends to remove disorder in\n the set of values, making the histogram smoother and more ordered than the data actually are.\n This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in\n numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks\n with astroML. However, nothing is ruled out. In preliminary testing,\n the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more\n sensitive to a simulated signal than using fixed binning. However, it's unclear how to\n interpret the results because \"h_p/h_max\" *increased* with the presence of a signal and exceeded 1.\n\n **It is likely that the calculation of h_max is done incorrectly. Please check my work!**\n\n It may even be that the total number of bins created by the Bayesian Block method would\n be a suitable feature. For a completely flat distribution, there will only be one bin. If the\n data contains significant variation in power levels, the Bayesian Block method will produce more\n bins. More testing is required and your mileage may vary.\n\n import astroML.plotting\n\n bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())\n p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)\n w = np.diff(bin_edges)\n\n h_p, h_max = ibmseti.features.entropy(p,w)\n\n Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!\n\n \"Entropy\" of raw data.\n\n If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you\n may set `w` = None and the calculation will assume w_i = 1 for all i.\n\n For example,\n\n h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)" - }, - { - "code": "def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS):\n if n == 1:\n return Partition([1] * len(self.get_dm(False)))\n if embed_dim is None:\n embed_dim = n\n if algo == spectral.SPECTRAL:\n self._coords = self.spectral_embedding(embed_dim)\n elif algo == spectral.KPCA:\n self._coords = self.kpca_embedding(embed_dim)\n elif algo == spectral.ZELNIKMANOR:\n self._coords = self.spectral_embedding_(embed_dim)\n else:\n raise OptionError(algo, list(spectral.reverse.values()))\n if method == methods.KMEANS:\n p = self.kmeans(n, self._coords.df.values)\n elif method == methods.GMM:\n p = self.gmm(n, self._coords.df.values)\n elif method == methods.WARD:\n linkmat = fastcluster.linkage(self._coords.values, 'ward')\n p = _hclust(linkmat, n)\n else:\n raise OptionError(method, list(methods.reverse.values()))\n if self._verbosity > 0:\n print('Using clustering method: {}'.format(methods.reverse[method]))\n return p", - "docstring": "Cluster the embedded coordinates using spectral clustering\n\n Parameters\n ----------\n n: int\n The number of clusters to return\n embed_dim: int\n The dimensionality of the underlying coordinates\n Defaults to same value as n\n algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR)\n Type of embedding to use\n method: enum value (methods.KMEANS | methods.GMM)\n The clustering method to use\n\n Returns\n -------\n Partition: Partition object describing the data partition" - }, - { - "code": "def reference(node):\n o = nodes.reference()\n o['refuri'] = node.destination\n if node.title:\n o['name'] = node.title\n for n in MarkDown(node):\n o += n\n return o", - "docstring": "A hyperlink. Note that alt text doesn't work, since there's no apparent way to do that in docutils" - }, - { - "code": "def delete_image(self, subreddit, name=None, header=False):\n subreddit = six.text_type(subreddit)\n if name and header:\n raise TypeError('Both name and header cannot be set.')\n elif name:\n data = {'img_name': name}\n url = self.config['delete_sr_image']\n self.evict(self.config['stylesheet'].format(subreddit=subreddit))\n else:\n data = True\n url = self.config['delete_sr_header']\n url = url.format(subreddit=subreddit)\n return self.request_json(url, data=data)", - "docstring": "Delete an image from the subreddit.\n\n :param name: The name of the image if removing a CSS image.\n :param header: When true, delete the subreddit header.\n :returns: The json response from the server." - }, - { - "code": "def _new_url_record(cls, request: Request) -> URLRecord:\n url_record = URLRecord()\n url_record.url = request.url_info.url\n url_record.status = Status.in_progress\n url_record.try_count = 0\n url_record.level = 0\n return url_record", - "docstring": "Return new empty URLRecord." - }, - { - "code": "def get_hkr_state(self):\n self.update()\n try:\n return {\n 126.5: 'off',\n 127.0: 'on',\n self.eco_temperature: 'eco',\n self.comfort_temperature: 'comfort'\n }[self.target_temperature]\n except KeyError:\n return 'manual'", - "docstring": "Get the thermostate state." - }, - { - "code": "def _deserialize_dict(data, boxed_type):\n return {k: _deserialize(v, boxed_type)\n for k, v in six.iteritems(data)}", - "docstring": "Deserializes a dict and its elements.\n\n :param data: dict to deserialize.\n :type data: dict\n :param boxed_type: class literal.\n\n :return: deserialized dict.\n :rtype: dict" - }, - { - "code": "def setPrivates(self, fieldDict) :\n for priv in self.privates :\n if priv in fieldDict :\n setattr(self, priv, fieldDict[priv])\n else :\n setattr(self, priv, None)\n if self._id is not None :\n self.URL = \"%s/%s\" % (self.documentsURL, self._id)", - "docstring": "will set self._id, self._rev and self._key field." - }, - { - "code": "def _transstat(status, grouppath, dictpath, line):\n if status == 0:\n raise MTLParseError(\n \"Status should not be '%s' after reading line:\\n%s\"\n % (STATUSCODE[status], line))\n elif status == 1:\n currentdict = dictpath[-1]\n currentgroup = _getgroupname(line)\n grouppath.append(currentgroup)\n currentdict[currentgroup] = {}\n dictpath.append(currentdict[currentgroup])\n elif status == 2:\n currentdict = dictpath[-1]\n newkey, newval = _getmetadataitem(line)\n if newkey == 'SCENE_CENTER_TIME' and newval.startswith('\"') \\\n and newval.endswith('\"'):\n newval = newval[1:-1]\n currentdict[newkey] = _postprocess(newval)\n elif status == 3:\n oldgroup = _getendgroupname(line)\n if oldgroup != grouppath[-1]:\n raise MTLParseError(\n \"Reached line '%s' while reading group '%s'.\"\n % (line.strip(), grouppath[-1]))\n del grouppath[-1]\n del dictpath[-1]\n try:\n currentgroup = grouppath[-1]\n except IndexError:\n currentgroup = None\n elif status == 4:\n if grouppath:\n raise MTLParseError(\n \"Reached end before end of group '%s'\" % grouppath[-1])\n return grouppath, dictpath", - "docstring": "Executes processing steps when reading a line" - }, - { - "code": "def __purge():\n global __receivers\n newreceivers = collections.defaultdict(list)\n for signal, receivers in six.iteritems(__receivers):\n alive = [x for x in receivers if not __is_dead(x)]\n newreceivers[signal] = alive\n __receivers = newreceivers", - "docstring": "Remove all dead signal receivers from the global receivers collection.\n\n Note:\n It is assumed that the caller holds the __lock." - }, - { - "code": "def getColorName(c):\n c = np.array(getColor(c))\n mdist = 99.0\n kclosest = \"\"\n for key in colors.keys():\n ci = np.array(getColor(key))\n d = np.linalg.norm(c - ci)\n if d < mdist:\n mdist = d\n kclosest = str(key)\n return kclosest", - "docstring": "Find the name of a color.\n\n .. hint:: |colorpalette| |colorpalette.py|_" - }, - { - "code": "def decodedFileID(self, cachedFilePath):\n fileDir, fileName = os.path.split(cachedFilePath)\n assert fileDir == self.localCacheDir, 'Can\\'t decode uncached file names'\n return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')", - "docstring": "Decode a cached fileName back to a job store file ID.\n\n :param str cachedFilePath: Path to the cached file\n :return: The jobstore file ID associated with the file\n :rtype: str" - }, - { - "code": "def commit_output(cls, shard_ctx, iterator):\n outs = tuple(iterator)\n shard_ctx._state.writer_state[\"outs\"] = outs", - "docstring": "Saves output references when a shard finishes.\n\n Inside end_shard(), an output writer can optionally use this method\n to persist some references to the outputs from this shard\n (e.g a list of filenames)\n\n Args:\n shard_ctx: map_job_context.ShardContext for this shard.\n iterator: an iterator that yields json serializable\n references to the outputs from this shard.\n Contents from the iterator can be accessible later via\n map_job.Job.get_outputs." - }, - { - "code": "def autosave(self, index):\n finfo = self.stack.data[index]\n document = finfo.editor.document()\n if not document.changed_since_autosave or finfo.newly_created:\n return\n autosave_filename = self.get_autosave_filename(finfo.filename)\n logger.debug('Autosaving %s to %s', finfo.filename, autosave_filename)\n try:\n self.stack._write_to_file(finfo, autosave_filename)\n document.changed_since_autosave = False\n except EnvironmentError as error:\n action = (_('Error while autosaving {} to {}')\n .format(finfo.filename, autosave_filename))\n msgbox = AutosaveErrorDialog(action, error)\n msgbox.exec_if_enabled()", - "docstring": "Autosave a file.\n\n Do nothing if the `changed_since_autosave` flag is not set or the file\n is newly created (and thus not named by the user). Otherwise, save a\n copy of the file with the name given by `self.get_autosave_filename()`\n and clear the `changed_since_autosave` flag. Errors raised when saving\n are silently ignored.\n\n Args:\n index (int): index into self.stack.data" - }, - { - "code": "def __update(self):\n self.reset()\n if not os.path.exists(self.IRQ_FILE):\n return self.stats\n try:\n with open(self.IRQ_FILE) as irq_proc:\n time_since_update = getTimeSinceLastUpdate('irq')\n self.__header(irq_proc.readline())\n for line in irq_proc.readlines():\n irq_line = self.__humanname(line)\n current_irqs = self.__sum(line)\n irq_rate = int(\n current_irqs - self.lasts.get(irq_line)\n if self.lasts.get(irq_line)\n else 0 // time_since_update)\n irq_current = {\n 'irq_line': irq_line,\n 'irq_rate': irq_rate,\n 'key': self.get_key(),\n 'time_since_update': time_since_update\n }\n self.stats.append(irq_current)\n self.lasts[irq_line] = current_irqs\n except (OSError, IOError):\n pass\n return self.stats", - "docstring": "Load the IRQ file and update the internal dict." - }, - { - "code": "def _ascii_find_urls(bytes, mimetype, extra_tokens=True):\n tokens = _tokenize(bytes, mimetype, extra_tokens=extra_tokens)\n return tokens", - "docstring": "This function finds URLs inside of ASCII bytes." - }, - { - "code": "def handleError(self, test, err, capt=None):\n if not hasattr(test.test, \"testcase_guid\"):\n if err[0] == errors.BlockedTest:\n raise SkipTest(err[1])\n return True\n elif err[0] == errors.DeprecatedTest:\n raise SkipTest(err[1])\n return True\n elif err[0] == errors.SkipTest:\n raise SkipTest(err[1])\n return True", - "docstring": "If the database plugin is not present, we have to handle capturing\n \"errors\" that shouldn't be reported as such in base." - }, - { - "code": "def complete_handshake(self):\n if self.__timer:\n self.__timer.cancel()\n self.__timer_expired = False\n self.__handshake_complete = True", - "docstring": "Tells `Packetizer` that the handshake has completed." - }, - { - "code": "def main():\n arguments = docopt(__doc__)\n if arguments['init']:\n return_code = init()\n sys.exit(return_code)\n run_converter(arguments)", - "docstring": "Parses command line options and runs nbinteract." - }, - { - "code": "def get_adjustments(self,\n zero_qtr_data,\n requested_qtr_data,\n last_per_qtr,\n dates,\n assets,\n columns,\n **kwargs):\n split_adjusted_cols_for_group = [\n self.name_map[col.name]\n for col in columns\n if self.name_map[col.name] in self._split_adjusted_column_names\n ]\n split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(\n dates\n )\n return super(SplitAdjustedEstimatesLoader, self).get_adjustments(\n zero_qtr_data,\n requested_qtr_data,\n last_per_qtr,\n dates,\n assets,\n columns,\n split_adjusted_cols_for_group=split_adjusted_cols_for_group,\n split_adjusted_asof_idx=split_adjusted_asof_idx\n )", - "docstring": "Calculates both split adjustments and overwrites for all sids." - }, - { - "code": "def getStatusMsg(self):\n if 'status' in self._response.keys():\n if (self._response['status'] is not None) and ('msg' in self._response['status'].keys()) and (self._response['status']['msg'] is not None):\n return self._response['status']['msg']\n else:\n return ''", - "docstring": "Returns the message of the status or an empty string if it does not exist\n\n :return:\n Status message of the response" - }, - { - "code": "def is_open(location=None, attr=None):\n obj = utils.is_open(location)\n if obj is False:\n return False\n if attr is not None:\n return getattr(obj, attr)\n return obj", - "docstring": "Returns False if the location is closed, or the OpeningHours object\n to show the location is currently open." - }, - { - "code": "def _get_db_fields(self, obj):\n for field in obj.indexes:\n yield field, self._zeo_key(field)", - "docstring": "Return list of database dictionaries, which are used as indexes for\n each attributes.\n\n Args:\n cached (bool, default True): Use cached connection to database.\n\n Returns:\n list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`." - }, - { - "code": "def recursive_update(self, k, d):\n u = self.__getitem__(k)\n self.store[k] = _recursive_update(u, d)", - "docstring": "Recursively update a top-level option in the run control\n\n Parameters\n ----------\n k : string\n the top-level key\n d : dictionary or similar\n the dictionary to use for updating" - }, - { - "code": "def get_embedded_tweet(tweet):\n if tweet.retweeted_tweet is not None:\n return tweet.retweeted_tweet\n elif tweet.quoted_tweet is not None:\n return tweet.quoted_tweet\n else:\n return None", - "docstring": "Get the retweeted Tweet OR the quoted Tweet and return it as a dictionary\n\n Args:\n tweet (Tweet): A Tweet object (not simply a dict)\n\n Returns:\n dict (or None, if the Tweet is neither a quote tweet or a Retweet):\n a dictionary representing the quote Tweet or the Retweet" - }, - { - "code": "def acquire_restore(lock, state):\n if hasattr(lock, '_acquire_restore'):\n lock._acquire_restore(state)\n elif hasattr(lock, 'acquire'):\n lock.acquire()\n else:\n raise TypeError('expecting Lock/RLock')", - "docstring": "Acquire a lock and restore its state." - }, - { - "code": "def do_file_show(client, args):\n for src_uri in args.uris:\n client.download_file(src_uri, sys.stdout.buffer)\n return True", - "docstring": "Output file contents to stdout" - }, - { - "code": "def _spark_map(fun, indexed_param_grid, sc, seed, X_bc):\n def _wrap_random_state(split_index, partition):\n prng = np.random.RandomState(seed + split_index)\n yield map(partial(fun, prng=prng, X=X_bc), partition)\n par_param_grid = sc.parallelize(indexed_param_grid)\n indexed_results = par_param_grid.mapPartitionsWithIndex(\n _wrap_random_state\n ).collect()\n return [item for sublist in indexed_results for item in sublist]", - "docstring": "We cannot pass a RandomState instance to each spark worker since it will\n behave identically across partitions. Instead, we explictly handle the\n partitions with a newly seeded instance.\n\n The seed for each partition will be the \"seed\" (MonteCarloProfile.seed) +\n \"split_index\" which is the partition index.\n\n Following this trick:\n https://wegetsignal.wordpress.com/2015/05/08/\n generating-random-numbers-for-rdd-in-spark/" - }, - { - "code": "def _get_annotation_heading(self, handler, route, heading=None):\n if hasattr(handler, '_doctor_heading'):\n return handler._doctor_heading\n heading = ''\n handler_path = str(handler)\n try:\n handler_file_name = handler_path.split('.')[-2]\n except IndexError:\n handler_file_name = 'handler'\n if handler_file_name.startswith('handler'):\n class_name = handler_path.split('.')[-1]\n internal = False\n for word in CAMEL_CASE_RE.findall(class_name):\n if word == 'Internal':\n internal = True\n continue\n elif word.startswith(('List', 'Handler', 'Resource')):\n break\n heading += '%s ' % (word,)\n if internal:\n heading = heading.strip()\n heading += ' (Internal)'\n else:\n heading = ' '.join(handler_file_name.split('_')).title()\n if 'internal' in route:\n heading += ' (Internal)'\n return heading.strip()", - "docstring": "Returns the heading text for an annotation.\n\n Attempts to get the name of the heading from the handler attribute\n `schematic_title` first.\n\n If `schematic_title` it is not present, it attempts to generate\n the title from the class path.\n This path: advertiser_api.handlers.foo_bar.FooListHandler\n would translate to 'Foo Bar'\n\n If the file name with the resource is generically named handlers.py\n or it doesn't have a full path then we attempt to get the resource\n name from the class name.\n So FooListHandler and FooHandler would translate to 'Foo'.\n If the handler class name starts with 'Internal', then that will\n be appended to the heading.\n So InternalFooListHandler would translate to 'Foo (Internal)'\n\n :param mixed handler: The handler class. Will be a flask resource class\n :param str route: The route to the handler.\n :returns: The text for the heading as a string." - }, - { - "code": "def post_resource(self, path, body=None, json=None):\n url = '%s%s' % (path, self._param_list())\n headers = {\n 'Accept': 'application/json;odata=minimalmetadata'\n }\n if json:\n headers['Content-Type'] = 'application/json'\n body = json_dumps(json)\n response = O365_DAO().postURL(self._url(url), headers, body)\n if not (response.status == 200 or response.status == 201 or\n response.status == 204):\n raise DataFailureException(url, response.status, response.data)\n return json_loads(response.data)", - "docstring": "O365 POST method." - }, - { - "code": "def _build(self, leaves):\n new, odd = [], None\n if len(leaves) % 2 == 1:\n odd = leaves.pop(-1)\n for i in range(0, len(leaves), 2):\n newnode = Node(leaves[i].val + leaves[i + 1].val)\n newnode.l, newnode.r = leaves[i], leaves[i + 1]\n leaves[i].side, leaves[i + 1].side, leaves[i].p, leaves[i + 1].p = 'L', 'R', newnode, newnode\n leaves[i].sib, leaves[i + 1].sib = leaves[i + 1], leaves[i]\n new.append(newnode)\n if odd:\n new.append(odd)\n return new", - "docstring": "Private helper function to create the next aggregation level and put all references in place." - }, - { - "code": "def _random_starts(fun,\n parameters,\n jac,\n args,\n nstarts,\n random_state,\n data_gen=None):\n if nstarts < 1:\n raise ValueError(\"nstarts has to be greater than or equal to 1\")\n anyrand = any(flatten(map_recursive(lambda p: p.is_random, parameters),\n returns_shapes=False))\n if not anyrand:\n log.info(\"No random parameters, not doing any random starts\")\n params = map_recursive(lambda p: p.value, parameters, output_type=list)\n return params\n log.info(\"Evaluating random starts...\")\n if jac is True:\n call_fun = lambda *fargs: fun(*fargs)[0]\n else:\n call_fun = fun\n def fun_eval():\n batch = next(data_gen) if data_gen else ()\n params = map_recursive(lambda p: p.rvs(random_state), parameters,\n output_type=list)\n obj = call_fun(*chain(params, batch, args))\n return obj, params\n sample_gen = (fun_eval() for _ in range(nstarts))\n obj, params = min(sample_gen, key=lambda t: t[0])\n log.info(\"Best start found with objective = {}\".format(obj))\n return flatten(params, returns_shapes=False)", - "docstring": "Generate and evaluate random starts for Parameter objects." - }, - { - "code": "def remove_finished_work_units(self, work_spec_name, work_unit_names):\n return self._remove_some_work_units(\n work_spec_name, work_unit_names, suffix=_FINISHED)", - "docstring": "Remove some finished work units.\n\n If `work_unit_names` is :const:`None` (which must be passed\n explicitly), all finished work units in `work_spec_name` are\n removed; otherwise only the specific named work units will be.\n\n :param str work_spec_name: name of the work spec\n :param list work_unit_names: names of the work units, or\n :const:`None` for all in `work_spec_name`\n :return: number of work units removed" - }, - { - "code": "def forget(empowered, powerupClass, interface):\n className = fullyQualifiedName(powerupClass)\n withThisName = _StoredByName.className == className\n items = empowered.store.query(_StoredByName, withThisName)\n if items.count() == 0:\n template = \"No named powerups for {} (interface: {})\".format\n raise ValueError(template(powerupClass, interface))\n for stored in items:\n empowered.powerDown(stored, interface)\n stored.deleteFromStore()", - "docstring": "Forgets powerups previously stored with ``remember``.\n\n :param empowered: The Empowered (Store or Item) to be powered down.\n :type empowered: ``axiom.item.Empowered``\n :param powerupClass: The class for which powerups will be forgotten.\n :type powerupClass: class\n :param interface: The interface the powerups were installed for.\n :type interface: ``zope.interface.Interface``\n :returns: ``None``\n :raises ValueError: Class wasn't previously remembered." - }, - { - "code": "def thermostat_state(self):\n current_state = self.thermostat_info.active_state\n state = self.get_thermostat_state_by_id(current_state)\n if not state:\n self._logger.debug('Manually set temperature, no Thermostat '\n 'State chosen!')\n return state", - "docstring": "The state of the thermostat programming\n\n :return: A thermostat state object of the current setting" - }, - { - "code": "def view_fullreport(token, dstore):\n from openquake.calculators.reportwriter import ReportWriter\n return ReportWriter(dstore).make_report()", - "docstring": "Display an .rst report about the computation" - }, - { - "code": "def _convert_string_array(data, encoding, errors, itemsize=None):\n if encoding is not None and len(data):\n data = Series(data.ravel()).str.encode(\n encoding, errors).values.reshape(data.shape)\n if itemsize is None:\n ensured = ensure_object(data.ravel())\n itemsize = max(1, libwriters.max_len_string_array(ensured))\n data = np.asarray(data, dtype=\"S{size}\".format(size=itemsize))\n return data", - "docstring": "we take a string-like that is object dtype and coerce to a fixed size\n string type\n\n Parameters\n ----------\n data : a numpy array of object dtype\n encoding : None or string-encoding\n errors : handler for encoding errors\n itemsize : integer, optional, defaults to the max length of the strings\n\n Returns\n -------\n data in a fixed-length string dtype, encoded to bytes if needed" - }, - { - "code": "def get(self):\n now = time.time()\n while self._clients and self._clients[0][0] < now:\n _, (client, last_wait) = heapq.heappop(self._clients)\n connect_start = time.time()\n try:\n client.echo(\"test\")\n self._client_ids.remove(client.pool_id)\n yield client\n except (ConnectionError, TimeoutError):\n timer = time.time() - connect_start\n wait = min(int(last_wait * self._multiplier), self._max_wait)\n heapq.heappush(self._clients, (time.time() + wait, (client, wait)))\n log.info(\n \"%r is still down after a %s second attempt to connect. Retrying in %ss.\",\n client,\n timer,\n wait,\n )", - "docstring": "Get any clients ready to be used.\n\n :returns: Iterable of redis clients" - }, - { - "code": "def arguments(self):\n if not self.is_function:\n raise ValueError('expected function value, got %s' % (self._kind,))\n it = ffi.lib.LLVMPY_FunctionArgumentsIter(self)\n parents = self._parents.copy()\n parents.update(function=self)\n return _ArgumentsIterator(it, parents)", - "docstring": "Return an iterator over this function's arguments.\n The iterator will yield a ValueRef for each argument." - }, - { - "code": "def remove(self, item):\n if item in self:\n super(NGram, self).remove(item)\n del self.length[item]\n for ngram in self.splititem(item):\n del self._grams[ngram][item]", - "docstring": "Remove an item from the set. Inverts the add operation.\n\n >>> from ngram import NGram\n >>> n = NGram(['spam', 'eggs'])\n >>> n.remove('spam')\n >>> list(n)\n ['eggs']" - }, - { - "code": "def _describe_tree(self, prefix, with_transform):\n extra = ': \"%s\"' % self.name if self.name is not None else ''\n if with_transform:\n extra += (' [%s]' % self.transform.__class__.__name__)\n output = ''\n if len(prefix) > 0:\n output += prefix[:-3]\n output += ' +--'\n output += '%s%s\\n' % (self.__class__.__name__, extra)\n n_children = len(self.children)\n for ii, child in enumerate(self.children):\n sub_prefix = prefix + (' ' if ii+1 == n_children else ' |')\n output += child._describe_tree(sub_prefix, with_transform)\n return output", - "docstring": "Helper function to actuall construct the tree" - }, - { - "code": "def states(self,\n Nmax=50, omega_min=None, omega_max=None, return_missing=False):\n r\n S = 1/Integer(2)\n available = []\n not_available = []\n for N in range(1, Nmax+1):\n for L in range(N):\n Jmin = abs(L-S)\n Jmax = L+S\n Jpos = [Jmin+i for i in range(Jmax-Jmin+1)]\n for J in Jpos:\n try:\n state = State(self.element, self.isotope, N, L, J)\n available += [state]\n except:\n not_available += [(self.element, self.isotope,\n N, L, J)]\n if omega_min is not None:\n available = [s for s in available if s.omega >= omega_min]\n if omega_max is not None:\n available = [s for s in available if s.omega <= omega_max]\n available = [(s.omega, s) for s in available]\n available = sorted(available)\n available = [s[1] for s in available]\n if return_missing:\n return available, not_available\n else:\n return available", - "docstring": "r\"\"\"Find all states of available in an atom.\n\n This function returns all available states up to the fine structure\n (ordered by energy) such that the principal quantum number is N<=Nmax.\n Nmax is 50 by default.\n\n >>> atom=Atom(\"Rb\",85)\n >>> states=atom.states()\n >>> print states\n [85Rb 5S_1/2, 85Rb 5P_1/2, 85Rb 5P_3/2, 85Rb 4D_5/2, 85Rb 4D_3/2, 85Rb 6S_1/2, 85Rb 6P_1/2, 85Rb 6P_3/2, 85Rb 5D_3/2, 85Rb 5D_5/2, 85Rb 7S_1/2, 85Rb 7P_1/2, 85Rb 7P_3/2, 85Rb 6D_3/2, 85Rb 7D_3/2, 85Rb 14S_1/2, 85Rb 15S_1/2, 85Rb 16S_1/2, 85Rb 17S_1/2, 85Rb 18S_1/2, 85Rb 19S_1/2, 85Rb 20S_1/2, 85Rb 21S_1/2, 85Rb 22S_1/2, 85Rb 23S_1/2, 85Rb 24S_1/2, 85Rb 25S_1/2, 85Rb 26S_1/2, 85Rb 27S_1/2, 85Rb 28S_1/2, 85Rb 29S_1/2, 85Rb 30S_1/2, 85Rb 31S_1/2, 85Rb 32S_1/2, 85Rb 33S_1/2, 85Rb 34S_1/2, 85Rb 35S_1/2, 85Rb 36S_1/2, 85Rb 37S_1/2, 85Rb 38S_1/2, 85Rb 39S_1/2, 85Rb 40S_1/2, 85Rb 41S_1/2, 85Rb 42S_1/2, 85Rb 43S_1/2, 85Rb 44S_1/2, 85Rb 45S_1/2, 85Rb 46S_1/2, 85Rb 47S_1/2, 85Rb 48S_1/2, 85Rb 49S_1/2, 85Rb 50S_1/2]\n\n If an omega_max is provided any state with an energy higher than\n hbar*omega will not be returned. If an omega_min is provided any state\n with an energy lower than hbar*omega will not be returned.\n\n >>> atom.states(omega_min=1.00845e15*2*pi, omega_max=1.0086e+15*2*pi)\n [85Rb 49S_1/2, 85Rb 50S_1/2]\n\n If return_missing=True then the function will return a 2-tuple composed\n by a list of the states available, and a list of the valid states not\n available.\n\n >>> available,not_available=atom.states(Nmax=5,return_missing=True)\n >>> print available\n [85Rb 5S_1/2, 85Rb 5P_1/2, 85Rb 5P_3/2, 85Rb 4D_5/2, 85Rb 4D_3/2, 85Rb 5D_3/2, 85Rb 5D_5/2]\n >>> print not_available\n [('Rb', 85, 1, 0, 1/2), ('Rb', 85, 2, 0, 1/2), ('Rb', 85, 2, 1, 1/2), ('Rb', 85, 2, 1, 3/2), ('Rb', 85, 3, 0, 1/2), ('Rb', 85, 3, 1, 1/2), ('Rb', 85, 3, 1, 3/2), ('Rb', 85, 3, 2, 3/2), ('Rb', 85, 3, 2, 5/2), ('Rb', 85, 4, 0, 1/2), ('Rb', 85, 4, 1, 1/2), ('Rb', 85, 4, 1, 3/2), ('Rb', 85, 4, 3, 5/2), ('Rb', 85, 4, 3, 7/2), ('Rb', 85, 5, 3, 5/2), ('Rb', 85, 5, 3, 7/2), ('Rb', 85, 5, 4, 7/2), ('Rb', 85, 5, 4, 9/2)]" - }, - { - "code": "def yearInfo2yearDay(yearInfo):\n yearInfo = int(yearInfo)\n res = 29 * 12\n leap = False\n if yearInfo % 16 != 0:\n leap = True\n res += 29\n yearInfo //= 16\n for i in range(12 + leap):\n if yearInfo % 2 == 1:\n res += 1\n yearInfo //= 2\n return res", - "docstring": "calculate the days in a lunar year from the lunar year's info\n\n >>> yearInfo2yearDay(0) # no leap month, and every month has 29 days.\n 348\n >>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days.\n 377\n >>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days.\n 360\n >>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days.\n 390\n >>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days.\n >>> yearInfo2yearDay((2**12-1)*16+1)\n 389" - }, - { - "code": "def add_team_member(name, team_name, profile=\"github\"):\n team = get_team(team_name, profile=profile)\n if not team:\n log.error('Team %s does not exist', team_name)\n return False\n try:\n client = _get_client(profile)\n organization = client.get_organization(\n _get_config_value(profile, 'org_name')\n )\n team = organization.get_team(team['id'])\n member = client.get_user(name)\n except UnknownObjectException:\n log.exception('Resource not found: %s', team['id'])\n return False\n try:\n headers, data = team._requester.requestJsonAndCheck(\n \"PUT\",\n team.url + \"/memberships/\" + member._identity,\n input={'role': 'member'},\n parameters={'role': 'member'}\n )\n except github.GithubException:\n log.exception('Error in adding a member to a team')\n return False\n return True", - "docstring": "Adds a team member to a team with team_name.\n\n name\n The name of the team member to add.\n\n team_name\n The name of the team of which to add the user.\n\n profile\n The name of the profile configuration to use. Defaults to ``github``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion github.add_team_member 'user_name' 'team_name'\n\n .. versionadded:: 2016.11.0" - }, - { - "code": "def _get_grps_n_codes(self, usr_set):\n codes = usr_set.intersection(self.code2nt)\n for grp in usr_set.intersection(self.grp2codes):\n codes.update(self.grp2codes[grp])\n return codes", - "docstring": "Get codes, given codes or groups." - }, - { - "code": "def read_audit_log(self, symbol=None, message=None):\n query = {}\n if symbol:\n if isinstance(symbol, six.string_types):\n query['symbol'] = {'$regex': symbol}\n else:\n query['symbol'] = {'$in': list(symbol)}\n if message is not None:\n query['message'] = message\n def _pop_id(x):\n x.pop('_id')\n return x\n return [_pop_id(x) for x in self._audit.find(query, sort=[('_id', -1)])]", - "docstring": "Return the audit log associated with a given symbol\n\n Parameters\n ----------\n symbol : `str`\n symbol name for the item" - }, - { - "code": "def _get_from_send_queue(self):\n try:\n packet = self.transmit.get(block=False)\n self.logger.info('Sending packet')\n self.logger.debug(packet)\n return packet\n except queue.Empty:\n pass\n return None", - "docstring": "Get message from send queue, if one exists" - }, - { - "code": "def callback(cfunc):\n return C.c_voidp.from_address(C.cast(cfunc, C.c_voidp).value)", - "docstring": "Turn a ctypes CFUNCTYPE instance into a value which can be passed into PyROOT" - }, - { - "code": "def service_absent(name, namespace='default', **kwargs):\n ret = {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''}\n service = __salt__['kubernetes.show_service'](name, namespace, **kwargs)\n if service is None:\n ret['result'] = True if not __opts__['test'] else None\n ret['comment'] = 'The service does not exist'\n return ret\n if __opts__['test']:\n ret['comment'] = 'The service is going to be deleted'\n ret['result'] = None\n return ret\n res = __salt__['kubernetes.delete_service'](name, namespace, **kwargs)\n if res['code'] == 200:\n ret['result'] = True\n ret['changes'] = {\n 'kubernetes.service': {\n 'new': 'absent', 'old': 'present'}}\n ret['comment'] = res['message']\n else:\n ret['comment'] = 'Something went wrong, response: {0}'.format(res)\n return ret", - "docstring": "Ensures that the named service is absent from the given namespace.\n\n name\n The name of the service\n\n namespace\n The name of the namespace" - }, - { - "code": "def dicts_to_dict(dictionaries, key_subfieldname):\n result = {}\n for d in dictionaries:\n result[d[key_subfieldname]] = d\n return result", - "docstring": "Convert a list of dictionaries into a dictionary of dictionaries.\n\n key_subfieldname must exist in each Record's subfields and have a value,\n which will be used as the key for the new dictionary. If a key is duplicated,\n the earlier value will be overwritten." - }, - { - "code": "def get_guest_property(self, name):\n if not isinstance(name, basestring):\n raise TypeError(\"name can only be an instance of type basestring\")\n (value, timestamp, flags) = self._call(\"getGuestProperty\",\n in_p=[name])\n return (value, timestamp, flags)", - "docstring": "Reads an entry from the machine's guest property store.\n\n in name of type str\n The name of the property to read.\n\n out value of type str\n The value of the property. If the property does not exist then this\n will be empty.\n\n out timestamp of type int\n The time at which the property was last modified, as seen by the\n server process.\n\n out flags of type str\n Additional property parameters, passed as a comma-separated list of\n \"name=value\" type entries.\n\n raises :class:`VBoxErrorInvalidVmState`\n Machine session is not open." - }, - { - "code": "def _get_app_module(self):\n def configure(binder):\n binder.bind(ServiceApplication, to=self, scope=singleton)\n binder.bind(Config, to=self.config, scope=singleton)\n return configure", - "docstring": "Returns a module which binds the current app and configuration.\n\n :return: configuration callback\n :rtype: Callable" - }, - { - "code": "def _progress_hook(self, blocknum, blocksize, totalsize):\n read = blocknum * blocksize\n if totalsize > 0:\n percent = read * 1e2 / totalsize\n s = \"\\r%d%% %*d / %d\" % (\n percent, len(str(totalsize)), read, totalsize)\n sys.stdout.write(s)\n if read >= totalsize:\n sys.stdout.write(\"\\n\")\n else:\n sys.stdout.write(\"read %d\\n\" % read)", - "docstring": "Progress hook for urlretrieve." - }, - { - "code": "def get_type_for_inputs(table):\n return [\n dict(\n type=INPUT_TYPES.get(\n type(field_type.type), rc.TEXT_INPUT.value\n ),\n name=name,\n isPrimaryKey=(name in table.primary_key),\n props=None,\n ) for name, field_type in table.c.items()\n ]", - "docstring": "Return information about table's fields in dictionary type.\n\n :param table: sa.Table - the current table\n :return: list - list of the dictionaries" - }, - { - "code": "def strip_prefix_from_items(prefix, items):\n items_no_prefix = []\n for item in items:\n if item.startswith(prefix):\n items_no_prefix.append(item[len(prefix):])\n else:\n items_no_prefix.append(item)\n return items_no_prefix", - "docstring": "Strips out the prefix from each of the items if it is present.\n\n Args:\n prefix: the string for that you wish to strip from the beginning of each\n of the items.\n items: a list of strings that may or may not contain the prefix you want\n to strip out.\n\n Returns:\n items_no_prefix: a copy of the list of items (same order) without the\n prefix (if present)." - }, - { - "code": "def moveGamepadFocusToNeighbor(self, eDirection, ulFrom):\n fn = self.function_table.moveGamepadFocusToNeighbor\n result = fn(eDirection, ulFrom)\n return result", - "docstring": "Changes the Gamepad focus from one overlay to one of its neighbors. Returns VROverlayError_NoNeighbor if there is no\n neighbor in that direction" - }, - { - "code": "def trim_common_suffixes(strs, min_len=0):\n if len(strs) < 2:\n return 0, strs\n rev_strs = [s[::-1] for s in strs]\n trimmed, rev_strs = trim_common_prefixes(rev_strs, min_len)\n if trimmed:\n strs = [s[::-1] for s in rev_strs]\n return trimmed, strs", - "docstring": "trim common suffixes\n\n >>> trim_common_suffixes('A', 1)\n (0, 'A')" - }, - { - "code": "def zincrby(self, name, value, amount=1):\n with self.pipe as pipe:\n return pipe.zincrby(self.redis_key(name),\n value=self.valueparse.encode(value),\n amount=amount)", - "docstring": "Increment the score of the item by `value`\n\n :param name: str the name of the redis key\n :param value:\n :param amount:\n :return:" - }, - { - "code": "def log_output(f):\n @wraps(f)\n def wrapper_fn(*args, **kwargs):\n res = f(*args, **kwargs)\n logging.debug(\"Logging result %s.\", res)\n return res\n return wrapper_fn", - "docstring": "Logs the output value." - }, - { - "code": "def DEFINE_multi_float(\n name, default, help, lower_bound=None, upper_bound=None,\n flag_values=FLAGS, **args):\n parser = FloatParser(lower_bound, upper_bound)\n serializer = ArgumentSerializer()\n DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)", - "docstring": "Registers a flag whose value can be a list of arbitrary floats.\n\n Use the flag on the command line multiple times to place multiple\n float values into the list. The 'default' may be a single float\n (which will be converted into a single-element list) or a list of\n floats.\n\n Args:\n name: A string, the flag name.\n default: The default value of the flag.\n help: A help string.\n lower_bound: float, min values of the flag.\n upper_bound: float, max values of the flag.\n flag_values: FlagValues object with which the flag will be registered.\n **args: Dictionary with extra keyword args that are passed to the\n Flag __init__." - }, - { - "code": "def refresh_actions(self):\n self.options_menu.clear()\n if self.undocked_window is None:\n additional_actions = [MENU_SEPARATOR,\n self.undock_action,\n self.close_plugin_action]\n else:\n additional_actions = [MENU_SEPARATOR,\n self.dock_action]\n self.plugin_actions = self.get_plugin_actions() + additional_actions\n add_actions(self.options_menu, self.plugin_actions)", - "docstring": "Create options menu." - }, - { - "code": "def get_xml(pmc_id):\n if pmc_id.upper().startswith('PMC'):\n pmc_id = pmc_id[3:]\n params = {}\n params['verb'] = 'GetRecord'\n params['identifier'] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id\n params['metadataPrefix'] = 'pmc'\n res = requests.get(pmc_url, params)\n if not res.status_code == 200:\n logger.warning(\"Couldn't download %s\" % pmc_id)\n return None\n xml_bytes = res.content\n tree = ET.XML(xml_bytes, parser=UTB())\n xmlns = \"http://www.openarchives.org/OAI/2.0/\"\n err_tag = tree.find('{%s}error' % xmlns)\n if err_tag is not None:\n err_code = err_tag.attrib['code']\n err_text = err_tag.text\n logger.warning('PMC client returned with error %s: %s'\n % (err_code, err_text))\n return None\n else:\n return xml_bytes.decode('utf-8')", - "docstring": "Returns XML for the article corresponding to a PMC ID." - }, - { - "code": "def ramp_up_sp(self):\n self._ramp_up_sp, value = self.get_attr_int(self._ramp_up_sp, 'ramp_up_sp')\n return value", - "docstring": "Writing sets the ramp up setpoint. Reading returns the current value. Units\n are in milliseconds and must be positive. When set to a non-zero value, the\n motor speed will increase from 0 to 100% of `max_speed` over the span of this\n setpoint. The actual ramp time is the ratio of the difference between the\n `speed_sp` and the current `speed` and max_speed multiplied by `ramp_up_sp`." - }, - { - "code": "def text(message: Text,\n default: Text = \"\",\n validate: Union[Type[Validator],\n Callable[[Text], bool],\n None] = None,\n qmark: Text = DEFAULT_QUESTION_PREFIX,\n style: Optional[Style] = None,\n **kwargs: Any) -> Question:\n merged_style = merge_styles([DEFAULT_STYLE, style])\n validator = build_validator(validate)\n def get_prompt_tokens():\n return [(\"class:qmark\", qmark),\n (\"class:question\", ' {} '.format(message))]\n p = PromptSession(get_prompt_tokens,\n style=merged_style,\n validator=validator,\n **kwargs)\n p.default_buffer.reset(Document(default))\n return Question(p.app)", - "docstring": "Prompt the user to enter a free text message.\n\n This question type can be used to prompt the user for some text input.\n\n Args:\n message: Question text\n\n default: Default value will be returned if the user just hits\n enter.\n\n validate: Require the entered value to pass a validation. The\n value can not be submited until the validator accepts\n it (e.g. to check minimum password length).\n\n This can either be a function accepting the input and\n returning a boolean, or an class reference to a\n subclass of the prompt toolkit Validator class.\n\n qmark: Question prefix displayed in front of the question.\n By default this is a `?`\n\n style: A custom color and style for the question parts. You can\n configure colors as well as font types for different elements.\n\n Returns:\n Question: Question instance, ready to be prompted (using `.ask()`)." - }, - { - "code": "def _to_rest_includes(models, includes):\n included = []\n includes = includes or []\n if not isinstance(models, list):\n models = [models]\n for include in includes:\n for model in models:\n rel = getattr(model, include)\n if hasattr(rel, 'model') and rel.model:\n rel_models = [rel.model]\n elif hasattr(rel, 'models') and rel.models:\n rel_models = rel.models\n for rel_model in rel_models:\n if rel_model in models or rel_model in included:\n continue\n else:\n included.append(rel_model)\n for idx, val in enumerate(included):\n included[idx] = _to_rest(val)\n return included", - "docstring": "Fetch the models to be included\n\n The includes should follow a few basic rules:\n\n * the include MUST not already be an array member\n of the included array (no dupes)\n\n * the include MUST not be the same as the primary\n data if the primary data is a single resource\n object (no dupes)\n\n * the include MUST not be an array member of the\n primary data if the primary data an array of\n resource objects (no dupes)\n\n Basically, each included array member should be the only\n instance of that resource object in the entire restified\n data." - }, - { - "code": "def make_app(include_packages: Sequence[str] = ()) -> Flask:\n for package_name in include_packages:\n import_submodules(package_name)\n app = Flask(__name__)\n @app.errorhandler(ServerError)\n def handle_invalid_usage(error: ServerError) -> Response:\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n @app.route('/')\n def index() -> Response:\n return send_file('config_explorer.html')\n @app.route('/api/config/')\n def api_config() -> Response:\n class_name = request.args.get('class', '')\n get_choices = request.args.get('get_choices', None)\n config = configure(class_name)\n try:\n choice5 = choices(class_name)\n except ValueError:\n choice5 = []\n if get_choices and choice5:\n return jsonify({\n \"className\": class_name,\n \"choices\": choice5\n })\n else:\n return jsonify({\n \"className\": class_name,\n \"config\": config.to_json()\n })\n return app", - "docstring": "Creates a Flask app that serves up a simple configuration wizard." - }, - { - "code": "def get_host_keys(hostname, sshdir):\n hostkey = None\n try:\n host_keys = load_host_keys(os.path.join(sshdir, 'known_hosts'))\n except IOError:\n host_keys = {}\n if hostname in host_keys:\n hostkeytype = host_keys[hostname].keys()[0]\n hostkey = host_keys[hostname][hostkeytype]\n return hostkey", - "docstring": "get host key" - }, - { - "code": "def expose_finish(self, *args):\n gldrawable = self.get_gl_drawable()\n if not gldrawable:\n return\n if gldrawable.is_double_buffered():\n gldrawable.swap_buffers()\n else:\n glFlush()\n gldrawable.gl_end()", - "docstring": "Finish drawing process" - }, - { - "code": "def delete_issue_link(self, id):\n url = self._get_url('issueLink') + \"/\" + id\n return self._session.delete(url)", - "docstring": "Delete a link between two issues.\n\n :param id: ID of the issue link to delete" - }, - { - "code": "def cloud_init(names, host=None, quiet=False, **kwargs):\n if quiet:\n log.warning(\"'quiet' argument is being deprecated. Please migrate to --quiet\")\n return __salt__['lxc.init'](names=names, host=host,\n saltcloud_mode=True, quiet=quiet, **kwargs)", - "docstring": "Wrapper for using lxc.init in saltcloud compatibility mode\n\n names\n Name of the containers, supports a single name or a comma delimited\n list of names.\n\n host\n Minion to start the container on. Required.\n\n path\n path to the container parent\n default: /var/lib/lxc (system default)\n\n .. versionadded:: 2015.8.0\n\n saltcloud_mode\n init the container with the saltcloud opts format instead" - }, - { - "code": "def _adjust_inferential_results_for_parameter_constraints(self,\n constraints):\n if constraints is not None:\n inferential_attributes = [\"standard_errors\",\n \"tvalues\",\n \"pvalues\",\n \"robust_std_errs\",\n \"robust_t_stats\",\n \"robust_p_vals\"]\n assert all([hasattr(self, x) for x in inferential_attributes])\n assert hasattr(self, \"params\")\n all_names = self.params.index.tolist()\n for series in [getattr(self, x) for x in inferential_attributes]:\n for pos in constraints:\n series.loc[all_names[pos]] = np.nan\n return None", - "docstring": "Ensure that parameters that were constrained during estimation do not\n have any values showed for inferential results. After all, no inference\n was performed.\n\n Parameters\n ----------\n constraints : list of ints, or None.\n If list, should contain the positions in the array of all estimated\n parameters that were constrained to their initial values.\n\n Returns\n -------\n None." - }, - { - "code": "def save( self ):\n packets = self.__enumerate_packets( )\n delete_io( self.hash )\n for packet in packets:\n packet['hash'] = self.hash\n insert_io( packet )\n return self", - "docstring": "Save method for the CallDescriptor.\n\n If the CallDescriptor matches a past CallDescriptor it updates the existing\n database record corresponding to the hash. If it doesn't already exist it'll\n be INSERT'd." - }, - { - "code": "def DrainTaskSchedulerQueueForClient(self, client, max_count=None):\n if max_count is None:\n max_count = self.max_queue_size\n if max_count <= 0:\n return []\n client = rdf_client.ClientURN(client)\n start_time = time.time()\n if data_store.RelationalDBEnabled():\n action_requests = data_store.REL_DB.LeaseClientActionRequests(\n client.Basename(),\n lease_time=rdfvalue.Duration.FromSeconds(self.message_expiry_time),\n limit=max_count)\n result = [\n rdf_flow_objects.GRRMessageFromClientActionRequest(r)\n for r in action_requests\n ]\n else:\n new_tasks = queue_manager.QueueManager(token=self.token).QueryAndOwn(\n queue=client.Queue(),\n limit=max_count,\n lease_seconds=self.message_expiry_time)\n initial_ttl = rdf_flows.GrrMessage().task_ttl\n check_before_sending = []\n result = []\n for task in new_tasks:\n if task.task_ttl < initial_ttl - 1:\n check_before_sending.append(task)\n else:\n result.append(task)\n if check_before_sending:\n with queue_manager.QueueManager(token=self.token) as manager:\n status_found = manager.MultiCheckStatus(check_before_sending)\n for task in check_before_sending:\n if task not in status_found:\n result.append(task)\n else:\n manager.DeQueueClientRequest(task)\n stats_collector_instance.Get().IncrementCounter(\"grr_messages_sent\",\n len(result))\n if result:\n logging.debug(\"Drained %d messages for %s in %s seconds.\", len(result),\n client,\n time.time() - start_time)\n return result", - "docstring": "Drains the client's Task Scheduler queue.\n\n 1) Get all messages in the client queue.\n 2) Sort these into a set of session_ids.\n 3) Use data_store.DB.ResolvePrefix() to query all requests.\n 4) Delete all responses for retransmitted messages (if needed).\n\n Args:\n client: The ClientURN object specifying this client.\n max_count: The maximum number of messages we will issue for the client.\n If not given, uses self.max_queue_size .\n\n Returns:\n The tasks respresenting the messages returned. If we can not send them,\n we can reschedule them for later." - }, - { - "code": "def get(property_name):\n config = _read_config(_USER_CONFIG_FILE)\n section = _MAIN_SECTION_NAME\n try:\n property_value = config.get(section, property_name)\n except (NoOptionError, NoSectionError) as error:\n try:\n config = _read_config(_SYSTEM_CONFIG_FILE)\n property_value = config.get(section, property_name)\n except (NoOptionError, NoSectionError) as error:\n raise NoConfigOptionError(error)\n return property_value", - "docstring": "Returns the value of the specified configuration property.\n Property values stored in the user configuration file take\n precedence over values stored in the system configuration\n file.\n\n :param property_name: The name of the property to retrieve.\n :return: The value of the property." - }, - { - "code": "def distinct(xs):\n seen = set()\n return [x for x in xs if x not in seen and not seen.add(x)]", - "docstring": "Get the list of distinct values with preserving order." - }, - { - "code": "def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False):\n dim = dataset.get_dimension(dim, strict=True)\n irregular = cls.irregular(dataset, dim)\n if irregular or expanded:\n if irregular:\n data = dataset.data[dim.name]\n else:\n data = util.expand_grid_coords(dataset, dim)\n if edges and data.shape == dataset.data[dataset.vdims[0].name].shape:\n data = cls._infer_interval_breaks(data, axis=1)\n data = cls._infer_interval_breaks(data, axis=0)\n return data\n data = dataset.data[dim.name]\n if ordered and np.all(data[1:] < data[:-1]):\n data = data[::-1]\n shape = cls.shape(dataset, True)\n if dim in dataset.kdims:\n idx = dataset.get_dimension_index(dim)\n isedges = (dim in dataset.kdims and len(shape) == dataset.ndims\n and len(data) == (shape[dataset.ndims-idx-1]+1))\n else:\n isedges = False\n if edges and not isedges:\n data = cls._infer_interval_breaks(data)\n elif not edges and isedges:\n data = data[:-1] + np.diff(data)/2.\n return data", - "docstring": "Returns the coordinates along a dimension. Ordered ensures\n coordinates are in ascending order and expanded creates\n ND-array matching the dimensionality of the dataset." - }, - { - "code": "def create(cls, name, user=None, network_element=None, domain_name=None,\n zone=None, executable=None):\n ref_list = []\n if user:\n pass\n if network_element:\n ref_list.append(network_element.href)\n if domain_name:\n ref_list.append(domain_name.href)\n if zone:\n ref_list.append(zone.href)\n if executable:\n pass\n json = {'name': name,\n 'ref': ref_list}\n return ElementCreator(cls, json)", - "docstring": "Create a match expression\n\n :param str name: name of match expression\n :param str user: name of user or user group\n :param Element network_element: valid network element type, i.e. host, network, etc\n :param DomainName domain_name: domain name network element\n :param Zone zone: zone to use\n :param str executable: name of executable or group\n :raises ElementNotFound: specified object does not exist\n :return: instance with meta\n :rtype: MatchExpression" - }, - { - "code": "def pposition(hd, details=False):\n p = re.split(r\"[^\\d\\-+.]*\", hd)\n if len(p) not in [2, 6]:\n raise ValueError(\"Input must contain either 2 or 6 numbers.\")\n if len(p) == 2:\n x, y = float(p[0]), float(p[1])\n if details:\n numvals = 2\n raw_x = p[0]\n raw_y = p[1]\n elif len(p) == 6:\n x_p = phmsdms(\" \".join(p[:3]))\n x = sexa2deci(x_p['sign'], *x_p['vals'])\n y_p = phmsdms(\" \".join(p[3:]))\n y = sexa2deci(y_p['sign'], *y_p['vals'])\n if details:\n raw_x = x_p\n raw_y = y_p\n numvals = 6\n if details:\n result = dict(x=x, y=y, numvals=numvals, raw_x=raw_x,\n raw_y=raw_y)\n else:\n result = x, y\n return result", - "docstring": "Parse string into angular position.\n\n A string containing 2 or 6 numbers is parsed, and the numbers are\n converted into decimal numbers. In the former case the numbers are\n assumed to be floats. In the latter case, the numbers are assumed\n to be sexagesimal.\n\n Parameters\n ----------\n hd: str\n String containing 2 or 6 numbers. The numbers can be spearated\n with character or characters other than \".\", \"-\", \"+\".\n\n The string must contain either 2 or 6 numbers.\n\n details: bool\n The detailed result from parsing the string is returned. See\n \"Returns\" section below.\n\n Default is False.\n\n Returns\n -------\n x: (float, float) or dict\n A tuple containing decimal equivalents of the parsed numbers. If\n the string contains 6 numbers then they are assumed be\n sexagesimal components.\n\n If ``details`` is True then a dictionary with the following keys\n is returned:\n\n x: float\n The first number.\n y: float\n The second number\n numvals: int\n Number of items parsed; 2 or 6.\n raw_x: dict\n The result returned by ``phmsdms`` for the first number.\n raw_y: dict\n The result returned by ``phmsdms`` for the second number.\n\n It is up to the user to interpret the units of the numbers\n returned.\n\n Raises\n ------\n ValueError:\n The exception is raised if the string cannot be interpreted as a\n sequence of 2 or 6 numbers.\n\n Examples\n --------\n The position of M100 reported by SIMBAD is\n \"12 22 54.899 +15 49 20.57\". This can be easily parsed in the\n following manner.\n\n >>> from angles import pposition\n >>> ra, de = pposition(\"12 22 54.899 +15 49 20.57\")\n >>> ra\n 12.38191638888889\n >>> de\n 15.822380555555556" - }, - { - "code": "def insert_in_project(self, project, params={}, **options): \n path = \"/projects/%s/sections/insert\" % (project)\n return self.client.post(path, params, **options)", - "docstring": "Move sections relative to each other in a board view. One of\n `before_section` or `after_section` is required.\n \n Sections cannot be moved between projects.\n \n At this point in time, moving sections is not supported in list views, only board views.\n \n Returns an empty data block.\n\n Parameters\n ----------\n project : {Id} The project in which to reorder the given section\n [data] : {Object} Data for the request\n - section : {Id} The section to reorder\n - [before_section] : {Id} Insert the given section immediately before the section specified by this parameter.\n - [after_section] : {Id} Insert the given section immediately after the section specified by this parameter." - }, - { - "code": "def _receive(self):\n result = self._talk.get()\n if not result:\n self._logger.error('Failed to receive')\n return result", - "docstring": "Receive a chunk of request from client." - }, - { - "code": "def join_ext(name, extension):\n if extension[0] == EXT:\n ret = name + extension\n else:\n ret = name + EXT + extension\n return ret", - "docstring": "Joins a given name with an extension. If the extension doesn't have a '.'\n it will add it for you" - }, - { - "code": "def api_call(\n self,\n api_method: str,\n *,\n http_verb: str = \"POST\",\n files: dict = None,\n data: dict = None,\n params: dict = None,\n json: dict = None,\n ):\n if json is not None and http_verb != \"POST\":\n msg = \"Json data can only be submitted as POST requests. GET requests should use the 'params' argument.\"\n raise err.SlackRequestError(msg)\n api_url = self._get_url(api_method)\n headers = {\n \"User-Agent\": self._get_user_agent(),\n \"Authorization\": \"Bearer {}\".format(self.token),\n }\n if files is not None:\n form_data = aiohttp.FormData()\n for k, v in files.items():\n if isinstance(v, str):\n form_data.add_field(k, open(v, \"rb\"))\n else:\n form_data.add_field(k, v)\n if data is not None:\n for k, v in data.items():\n form_data.add_field(k, str(v))\n data = form_data\n req_args = {\n \"headers\": headers,\n \"data\": data,\n \"params\": params,\n \"json\": json,\n \"ssl\": self.ssl,\n \"proxy\": self.proxy,\n }\n if self._event_loop is None:\n self._set_event_loop()\n future = asyncio.ensure_future(\n self._send(http_verb=http_verb, api_url=api_url, req_args=req_args),\n loop=self._event_loop,\n )\n if self.run_async:\n return future\n return self._event_loop.run_until_complete(future)", - "docstring": "Create a request and execute the API call to Slack.\n\n Args:\n api_method (str): The target Slack API method.\n e.g. 'chat.postMessage'\n http_verb (str): HTTP Verb. e.g. 'POST'\n files (dict): Files to multipart upload.\n e.g. {imageORfile: file_objectORfile_path}\n data: The body to attach to the request. If a dictionary is\n provided, form-encoding will take place.\n e.g. {'key1': 'value1', 'key2': 'value2'}\n params (dict): The URL parameters to append to the URL.\n e.g. {'key1': 'value1', 'key2': 'value2'}\n json (dict): JSON for the body to attach to the request\n (if files or data is not specified).\n e.g. {'key1': 'value1', 'key2': 'value2'}\n\n Returns:\n (SlackResponse)\n The server's response to an HTTP request. Data\n from the response can be accessed like a dict.\n If the response included 'next_cursor' it can\n be iterated on to execute subsequent requests.\n\n Raises:\n SlackApiError: The following Slack API call failed:\n 'chat.postMessage'.\n SlackRequestError: Json data can only be submitted as\n POST requests." - }, - { - "code": "def drop_keyspace(name, connections=None):\n if not _allow_schema_modification():\n return\n if connections:\n if not isinstance(connections, (list, tuple)):\n raise ValueError('Connections must be a list or a tuple.')\n def _drop_keyspace(name, connection=None):\n cluster = get_cluster(connection)\n if name in cluster.metadata.keyspaces:\n execute(\"DROP KEYSPACE {0}\".format(metadata.protect_name(name)), connection=connection)\n if connections:\n for connection in connections:\n _drop_keyspace(name, connection)\n else:\n _drop_keyspace(name)", - "docstring": "Drops a keyspace, if it exists.\n\n *There are plans to guard schema-modifying functions with an environment-driven conditional.*\n\n **This function should be used with caution, especially in production environments.\n Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**\n\n :param str name: name of keyspace to drop\n :param list connections: List of connection names" - }, - { - "code": "def add_link(self, name, desc, layout, node_1, node_2):\n existing_link = get_session().query(Link).filter(Link.name==name, Link.network_id==self.id).first()\n if existing_link is not None:\n raise HydraError(\"A link with name %s is already in network %s\"%(name, self.id))\n l = Link()\n l.name = name\n l.description = desc\n l.layout = json.dumps(layout) if layout is not None else None\n l.node_a = node_1\n l.node_b = node_2\n get_session().add(l)\n self.links.append(l)\n return l", - "docstring": "Add a link to a network. Links are what effectively\n define the network topology, by associating two already\n existing nodes." - }, - { - "code": "def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align=\"start\", encode_type=\"one_hot\"):\n if ignore_stop_codons:\n vocab = CODONS\n neutral_vocab = STOP_CODONS + [\"NNN\"]\n else:\n vocab = CODONS + STOP_CODONS\n neutral_vocab = [\"NNN\"]\n seq_vec = [str(seq).replace(\"U\", \"T\") for seq in seq_vec]\n return encodeSequence(seq_vec,\n vocab=vocab,\n neutral_vocab=neutral_vocab,\n maxlen=maxlen,\n seq_align=seq_align,\n pad_value=\"NNN\",\n encode_type=encode_type)", - "docstring": "Convert the Codon sequence into 1-hot-encoding numpy array\n\n # Arguments\n seq_vec: List of strings/DNA sequences\n ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding.\n maxlen: Maximum sequence length. See `pad_sequences` for more detail\n seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail\n encode_type: can be `\"one_hot\"` or `token` for token encoding of codons (incremental integer ).\n\n # Returns\n numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)`" - }, - { - "code": "def extract_log_level_from_environment(k, default):\n return LOG_LEVELS.get(os.environ.get(k)) or int(os.environ.get(k, default))", - "docstring": "Gets the log level from the environment variable." - }, - { - "code": "def unicode_symbol(self, *, invert_color: bool = False) -> str:\n symbol = self.symbol().swapcase() if invert_color else self.symbol()\n return UNICODE_PIECE_SYMBOLS[symbol]", - "docstring": "Gets the Unicode character for the piece." - }, - { - "code": "def get_graph(self, parse_direction=False):\n g = nx.DiGraph()\n for way_id, (tags, nodes) in self.ways.items():\n if tags.get('oneway') == '-1':\n nodes = reversed(nodes)\n tags['oneway'] = 'yes'\n oneway = tags.get('oneway') == 'yes'\n for n0, n1 in tools.pairwise(nodes):\n g.add_edge(n0, n1, attr_dict=tags)\n if parse_direction:\n g[n0][n1]['_direction'] = 'forward'\n if not oneway:\n g.add_edge(n1, n0, attr_dict=tags)\n if parse_direction:\n g[n1][n0]['_direction'] = 'backward'\n g.node[n0].update(self._node_properties(n0))\n g.node[n1].update(self._node_properties(n1))\n return g", - "docstring": "Return the networkx directed graph of received data" - }, - { - "code": "def get_columns(self, font):\n font = self.get_font(font)\n return self.fonts[six.text_type(font)]['columns']", - "docstring": "Return the number of columns for the given font." - }, - { - "code": "def get_data_context(context_type, options, *args, **kwargs):\n if context_type == \"SqlAlchemy\":\n return SqlAlchemyDataContext(options, *args, **kwargs)\n elif context_type == \"PandasCSV\":\n return PandasCSVDataContext(options, *args, **kwargs)\n else:\n raise ValueError(\"Unknown data context.\")", - "docstring": "Return a data_context object which exposes options to list datasets and get a dataset from\n that context. This is a new API in Great Expectations 0.4, and is subject to rapid change.\n\n :param context_type: (string) one of \"SqlAlchemy\" or \"PandasCSV\"\n :param options: options to be passed to the data context's connect method.\n :return: a new DataContext object" - }, - { - "code": "def set_maxrad(self,maxrad, distribution_skip=True):\n self.maxrad = maxrad\n self.apply_constraint(UpperLimit(self.Rsky,maxrad,\n name='Max Rsky'),\n overwrite=True,\n distribution_skip=distribution_skip)", - "docstring": "Adds a constraint that rejects everything with Rsky > maxrad\n\n Requires ``Rsky`` attribute, which should always have units.\n\n :param maxrad:\n The maximum angular value of Rsky.\n :type maxrad:\n :class:`astropy.units.Quantity`\n\n :param distribution_skip:\n This is by default ``True``. *To be honest, I'm not\n exactly sure why. Might be important, might not\n (don't remember).*" - }, - { - "code": "def get_flake8_options(config_dir='.'):\n if FLAKE8_CONFIG_NAME in os.listdir(config_dir):\n flake8_config_path = FLAKE8_CONFIG_NAME\n else:\n flake8_config_path = DEFAULT_FLAKE8_CONFIG_PATH\n return ['--config={}'.format(flake8_config_path)]", - "docstring": "Checks for local config overrides for `flake8`\n and add them in the correct `flake8` `options` format.\n\n :param config_dir:\n :return: List[str]" - }, - { - "code": "def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):\n real_labels = convert_rgb_to_symmetric_real(labels)\n dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)\n weights = weights_fn(labels)\n loss_num = weights * dml_loss_value\n loss_den = weights_nonzero(weights)\n if reduce_sum:\n loss_num = tf.reduce_sum(loss_num)\n loss_den = tf.reduce_sum(loss_den)\n return loss_num, loss_den", - "docstring": "Discretized mixture of logistics loss.\n\n Args:\n pred: A [batch, height, width, num_mixtures*10] tensor of floats\n comprising one unconstrained mixture probability, three means\n (one per channel), three standard deviations (one per channel),\n and three coefficients which linearly parameterize dependence across\n channels.\n labels: A [batch, height, width, channels] tensor of 8-bit pixel\n intensities. The computation assumes channels is 3.\n weights_fn: A function of labels, returning a Tensor of shape\n [batch, height, width] which weights each loss term. Default is to scale\n each loss term by 1/3 so that they capture the average across channels.\n reduce_sum: A boolean, to return scalar loss instead of per position.\n\n Returns:\n Tuple of loss tensors for numerator and denominator, each a scalar if\n reduce_sum else of shape [batch, height, width]. The sum of their divisions\n is the number of nats for each pixel in labels." - }, - { - "code": "def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):\n assert len(inputs) == len(outputs), (\n 'Inputs and outputs should have the same number of elements.')\n inputs[0].update(g0_inputs)\n outputs[0].update(g0_inputs)\n for i in range(1, len(inputs)):\n device_name = inputs[i]['x'].device\n with tf.device(device_name):\n with tf.variable_scope('step%d' % i):\n for k, v in g0_inputs.iteritems():\n if k not in inputs[i]:\n v_copy = clone_variable(k, v)\n inputs[i][k] = v_copy\n outputs[i][k] = v_copy\n return inputs, outputs", - "docstring": "Clone variables unused by the attack on all GPUs. Specifically, the\n ground-truth label, y, has to be preserved until the training step.\n\n :param inputs: A list of dictionaries as the inputs to each step.\n :param outputs: A list of dictionaries as the outputs of each step.\n :param g0_inputs: Initial variables to be cloned.\n :return: Updated inputs and outputs." - }, - { - "code": "def call(self, klass):\n self._resolve(klass).run()\n if self._command:\n self._command.line(\"Seeded: %s\" % klass.__name__)", - "docstring": "Seed the given connection from the given class.\n\n :param klass: The Seeder class\n :type klass: class" - }, - { - "code": "def RetryUpload(self, job, job_id, error):\n if self.IsErrorRetryable(error):\n retry_count = 0\n sleep_interval = config.CONFIG[\"BigQuery.retry_interval\"]\n while retry_count < config.CONFIG[\"BigQuery.retry_max_attempts\"]:\n time.sleep(sleep_interval.seconds)\n logging.info(\"Retrying job_id: %s\", job_id)\n retry_count += 1\n try:\n response = job.execute()\n return response\n except errors.HttpError as e:\n if self.IsErrorRetryable(e):\n sleep_interval *= config.CONFIG[\"BigQuery.retry_multiplier\"]\n logging.exception(\"Error with job: %s, will retry in %s\", job_id,\n sleep_interval)\n else:\n raise BigQueryJobUploadError(\n \"Can't retry error code %s. Giving up\"\n \" on job: %s.\" % (e.resp.status, job_id))\n else:\n raise BigQueryJobUploadError(\"Can't retry error code %s. Giving up on \"\n \"job: %s.\" % (error.resp.status, job_id))\n raise BigQueryJobUploadError(\n \"Giving up on job:%s after %s retries.\" % (job_id, retry_count))", - "docstring": "Retry the BigQuery upload job.\n\n Using the same job id protects us from duplicating data on the server. If we\n fail all of our retries we raise.\n\n Args:\n job: BigQuery job object\n job_id: ID string for this upload job\n error: errors.HttpError object from the first error\n\n Returns:\n API response object on success, None on failure\n Raises:\n BigQueryJobUploadError: if we can't get the bigquery job started after\n retry_max_attempts" - }, - { - "code": "def _execute(self):\n data = self.seed_fn()\n for transform in self.transforms:\n data = transform(data)\n return list(data)", - "docstring": "Run the query, generating data from the `seed_fn` and performing transforms on the results." - }, - { - "code": "def get_ids(self, request_data, parameter_name='ids'):\n if parameter_name not in request_data:\n raise ParseError(\"`{}` parameter is required\".format(parameter_name))\n ids = request_data.get(parameter_name)\n if not isinstance(ids, list):\n raise ParseError(\"`{}` parameter not a list\".format(parameter_name))\n if not ids:\n raise ParseError(\"`{}` parameter is empty\".format(parameter_name))\n if any(map(lambda id: not isinstance(id, int), ids)):\n raise ParseError(\"`{}` parameter contains non-integers\".format(parameter_name))\n return ids", - "docstring": "Extract a list of integers from request data." - }, - { - "code": "def _load_data():\n lines = dragonmapper.data.load_data_file('transcriptions.csv')\n pinyin_map, zhuyin_map, ipa_map = {}, {}, {}\n for line in lines:\n p, z, i = line.split(',')\n pinyin_map[p] = {'Zhuyin': z, 'IPA': i}\n zhuyin_map[z] = {'Pinyin': p, 'IPA': i}\n ipa_map[i] = {'Pinyin': p, 'Zhuyin': z}\n return pinyin_map, zhuyin_map, ipa_map", - "docstring": "Load the transcription mapping data into a dictionary." - }, - { - "code": "def _get_field_doc(self, field):\n fieldspec = dict()\n fieldspec['type'] = field.__class__.__name__\n fieldspec['required'] = field.required\n fieldspec['validators'] = [{validator.__class__.__name__: validator.__dict__} for validator in field.validators]\n return fieldspec", - "docstring": "Return documentation for a field in the representation." - }, - { - "code": "def update_chars(self):\n self.prior_char, self.char = self.char, next(self.characters, '\\n')\n self.idx += 1", - "docstring": "Update the current charters in the tokenizer." - }, - { - "code": "def close(self):\n if self._recv_thread:\n self._recv_thread.kill()\n self._recv_thread = None\n if self._send_thread:\n self._send_thread.kill()\n self._send_thread = None\n if self._sock:\n self._sock.close()\n self._sock = None\n super(TCPTendril, self).close()", - "docstring": "Close the connection. Kills the send and receive threads, as\n well as closing the underlying socket." - }, - { - "code": "def ls(manager: Manager, offset: Optional[int], limit: Optional[int]):\n q = manager.session.query(Edge)\n if offset:\n q = q.offset(offset)\n if limit > 0:\n q = q.limit(limit)\n for e in q:\n click.echo(e.bel)", - "docstring": "List edges." - }, - { - "code": "def put(self, url, data=None):\n self.conn.request(\"PUT\", url, data)\n return self._process_response()", - "docstring": "Send a HTTP PUT request to a URL and return the result." - }, - { - "code": "def read_excel(\n filename,\n dataset_class=dataset.pandas_dataset.PandasDataset,\n expectations_config=None,\n autoinspect_func=None,\n *args, **kwargs\n):\n df = pd.read_excel(filename, *args, **kwargs)\n if isinstance(df, dict):\n for key in df:\n df[key] = _convert_to_dataset_class(\n df[key], dataset_class, expectations_config, autoinspect_func)\n else:\n df = _convert_to_dataset_class(\n df, dataset_class, expectations_config, autoinspect_func)\n return df", - "docstring": "Read a file using Pandas read_excel and return a great_expectations dataset.\n\n Args:\n filename (string): path to file to read\n dataset_class (Dataset class): class to which to convert resulting Pandas df\n expectations_config (string): path to great_expectations config file\n\n Returns:\n great_expectations dataset or ordered dict of great_expectations datasets,\n if multiple worksheets are imported" - }, - { - "code": "def vector_angle(pairs):\n pairs = np.asanyarray(pairs, dtype=np.float64)\n if len(pairs) == 0:\n return np.array([])\n elif util.is_shape(pairs, (2, 3)):\n pairs = pairs.reshape((-1, 2, 3))\n elif not util.is_shape(pairs, (-1, 2, (2, 3))):\n raise ValueError('pairs must be (n,2,(2|3))!')\n dots = util.diagonal_dot(pairs[:, 0], pairs[:, 1])\n dots = np.clip(dots, -1.0, 1.0)\n angles = np.abs(np.arccos(dots))\n return angles", - "docstring": "Find the angles between pairs of unit vectors.\n\n Parameters\n ----------\n pairs : (n, 2, 3) float\n Unit vector pairs\n\n Returns\n ----------\n angles : (n,) float\n Angles between vectors in radians" - }, - { - "code": "def _data_build(self, data, modname, path):\n try:\n node = _parse(data + \"\\n\")\n except (TypeError, ValueError, SyntaxError) as exc:\n raise exceptions.AstroidSyntaxError(\n \"Parsing Python code failed:\\n{error}\",\n source=data,\n modname=modname,\n path=path,\n error=exc,\n ) from exc\n if path is not None:\n node_file = os.path.abspath(path)\n else:\n node_file = \"\"\n if modname.endswith(\".__init__\"):\n modname = modname[:-9]\n package = True\n else:\n package = (\n path is not None\n and os.path.splitext(os.path.basename(path))[0] == \"__init__\"\n )\n builder = rebuilder.TreeRebuilder(self._manager)\n module = builder.visit_module(node, modname, node_file, package)\n module._import_from_nodes = builder._import_from_nodes\n module._delayed_assattr = builder._delayed_assattr\n return module", - "docstring": "Build tree node from data and add some informations" - }, - { - "code": "def create_header(cls, request_id=None):\n header = {\n 'msgid' : bkserial.make_id(),\n 'msgtype' : cls.msgtype\n }\n if request_id is not None:\n header['reqid'] = request_id\n return header", - "docstring": "Return a message header fragment dict.\n\n Args:\n request_id (str or None) :\n Message ID of the message this message replies to\n\n Returns:\n dict : a message header" - }, - { - "code": "def intersect(self, other):\n loc = self.locate_keys(other, strict=False)\n return self.compress(loc, axis=0)", - "docstring": "Intersect with `other` sorted index.\n\n Parameters\n ----------\n other : array_like, int\n Array of values to intersect with.\n\n Returns\n -------\n out : SortedIndex\n Values in common.\n\n Examples\n --------\n\n >>> import allel\n >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35])\n >>> idx2 = allel.SortedIndex([4, 6, 20, 39])\n >>> idx1.intersect(idx2)\n \n [6, 20]" - }, - { - "code": "def get_handlers(self):\n handlers = []\n self.static_root = self.application.get_app_component(\n ).get_component_path()\n if self.conf:\n if 'maps' in self.conf:\n if self.conf['maps'] is None:\n logger.warning(\"Maps configuration is empty. Finish the\"\n \"static maps configuration.\")\n return handlers\n for map_item in self.conf['maps']:\n logger.debug(\"Mapping %s handlers.\" % map_item['name'])\n self.static_maps[map_item['name']] = {}\n self.static_maps[\n map_item['name']]['root'] = self.static_root\n if 'root' in map_item:\n if os.path.isabs(map_item['root']):\n self.static_maps[\n map_item['name']]['root'] = map_item['root']\n else:\n self.static_maps[\n map_item['name']]['root'] = os.path.abspath(\n os.path.join(self.static_root,\n map_item['root']))\n if 'handlers' in map_item:\n if map_item['handlers'] is None:\n logger.warning(\"There is no handles mapped in the\"\n \" static maps config file.\")\n else:\n handlers = handlers + self.get_static_handlers(\n map_item)\n else:\n logger.warning(\"No static maps configurations were provided.\")\n return handlers", - "docstring": "Returns the handlers defined on the static_maps.yml file located\n at the app config directory.\n\n Returns: An array of static handlers to be added to the app." - }, - { - "code": "def fof(self, linkinglength, out=None, method='splay'):\n if out is None:\n out = numpy.empty(self.size, dtype='intp')\n return _core.KDNode.fof(self, linkinglength, out, method)", - "docstring": "Friend-of-Friend clustering with linking length.\n\n Returns: the label" - }, - { - "code": "def modify_object(self, property_name, property_value_variant=None):\n if type(property_name) is dict:\n property_value_variant = property_name.values()\n property_name = property_name.keys()\n if isinstance(property_name, str):\n property_name, property_value_variant = [property_name], [property_value_variant]\n assert len(property_name) == len(property_value_variant)\n property_name = self.__class__._to_visible(property_name)\n for n, v in zip(property_name, property_value_variant):\n self._modify_property(n.encode('ascii','ignore'), v)\n self._rebuild_object()\n return self", - "docstring": "api visible method for modifying visible object properties\n\n :param property_name: property name\n :type property_name: string, list or dict\n :param property_value_variant: property value, must be `None` if property_name is of type `dict`\n :type property_value_variant: various or None\n :return: modified object\n :rtype: unicum.lfojbect.VisibleObject" - }, - { - "code": "def get_index_mappings(self, index):\n fields_arr = []\n for (key, val) in iteritems(index):\n doc_mapping = self.get_doc_type_mappings(index[key])\n if doc_mapping is None:\n return None\n fields_arr.extend(doc_mapping)\n return fields_arr", - "docstring": "Converts all index's doc_types to .kibana" - }, - { - "code": "def warn_disabled(scraperclass, reasons):\n out.warn(u\"Skipping comic %s: %s\" % (scraperclass.getName(), ' '.join(reasons.values())))", - "docstring": "Print warning about disabled comic modules." - }, - { - "code": "def batch(iterable, n, fillvalue=None):\n ensure_iterable(iterable)\n if not isinstance(n, Integral):\n raise TypeError(\"invalid number of elements in a batch\")\n if not (n > 0):\n raise ValueError(\"number of elements in a batch must be positive\")\n if fillvalue is None:\n fillvalue = object()\n trimmer = lambda item: tuple(x for x in item if x is not fillvalue)\n else:\n trimmer = identity()\n args = [iter(iterable)] * n\n zipped = izip_longest(*args, fillvalue=fillvalue)\n return imap(trimmer, zipped)", - "docstring": "Batches the elements of given iterable.\n\n Resulting iterable will yield tuples containing at most ``n`` elements\n (might be less if ``fillvalue`` isn't specified).\n\n :param n: Number of items in every batch\n :param fillvalue: Value to fill the last batch with. If None, last batch\n might be shorter than ``n`` elements\n\n :return: Iterable of batches\n\n .. note::\n\n This is an extended version of grouper() recipe\n from the :module:`itertools` module documentation." - }, - { - "code": "def _process_scrape_info(self, scraper: BaseScraper,\n scrape_result: ScrapeResult,\n item_session: ItemSession):\n if not scrape_result:\n return 0, 0\n num_inline = 0\n num_linked = 0\n for link_context in scrape_result.link_contexts:\n url_info = self.parse_url(link_context.link)\n if not url_info:\n continue\n url_info = self.rewrite_url(url_info)\n child_url_record = item_session.child_url_record(\n url_info.url, inline=link_context.inline\n )\n if not self._fetch_rule.consult_filters(item_session.request.url_info, child_url_record)[0]:\n continue\n if link_context.inline:\n num_inline += 1\n else:\n num_linked += 1\n item_session.add_child_url(url_info.url, inline=link_context.inline,\n link_type=link_context.link_type)\n return num_inline, num_linked", - "docstring": "Collect the URLs from the scrape info dict." - }, - { - "code": "def remove_sbi_id(self, sbi_id):\n sbi_ids = self.sbi_ids\n sbi_ids.remove(sbi_id)\n DB.set_hash_value(self._key, 'sbi_ids', sbi_ids)", - "docstring": "Remove an SBI Identifier." - }, - { - "code": "def from_shared_access_key(\n cls,\n uri,\n key_name,\n shared_access_key,\n expiry=None,\n port=constants.DEFAULT_AMQPS_PORT,\n timeout=10,\n retry_policy=TokenRetryPolicy(),\n verify=None,\n http_proxy=None,\n encoding='UTF-8'):\n expires_in = datetime.timedelta(seconds=expiry or constants.AUTH_EXPIRATION_SECS)\n encoded_uri = compat.quote_plus(uri).encode(encoding)\n encoded_key = compat.quote_plus(key_name).encode(encoding)\n expires_at = time.time() + expires_in.seconds\n token = utils.create_sas_token(\n encoded_key,\n shared_access_key.encode(encoding),\n encoded_uri,\n expires_in)\n return cls(\n uri, uri, token,\n expires_in=expires_in,\n expires_at=expires_at,\n username=key_name,\n password=shared_access_key,\n port=port,\n timeout=timeout,\n retry_policy=retry_policy,\n verify=verify,\n http_proxy=http_proxy,\n encoding=encoding)", - "docstring": "Attempt to create a CBS token session using a Shared Access Key such\n as is used to connect to Azure services.\n\n :param uri: The AMQP endpoint URI. This must be provided as\n a decoded string.\n :type uri: str\n :param key_name: The SAS token username, also referred to as the key\n name or policy name.\n :type key_name: str\n :param shared_access_key: The SAS token password, also referred to as the key.\n :type shared_access_key: str\n :param expiry: The lifetime in seconds for the generated token. Default is 1 hour.\n :type expiry: int\n :param port: The TLS port - default for AMQP is 5671.\n :type port: int\n :param timeout: The timeout in seconds in which to negotiate the token.\n The default value is 10 seconds.\n :type timeout: int\n :param retry_policy: The retry policy for the PUT token request. The default\n retry policy has 3 retries.\n :type retry_policy: ~uamqp.authentication.cbs_auth.TokenRetryPolicy\n :param verify: The path to a user-defined certificate.\n :type verify: str\n :param http_proxy: HTTP proxy configuration. This should be a dictionary with\n the following keys present: 'proxy_hostname' and 'proxy_port'. Additional optional\n keys are 'username' and 'password'.\n :type http_proxy: dict\n :param encoding: The encoding to use if hostname is provided as a str.\n Default is 'UTF-8'.\n :type encoding: str" - }, - { - "code": "async def get(self, cid, coinid):\n\t\tif settings.SIGNATURE_VERIFICATION:\n\t\t\tsuper().verify()\n\t\tif coinid in settings.bridges.keys():\n\t\t\tself.account.blockchain.setendpoint(settings.bridges[coinid])\n\t\treviews = await self.account.blockchain.getreviews(cid=cid)\n\t\tif isinstance(reviews, dict):\n\t\t\tif \"error\" in reviews:\n\t\t\t\tself.set_status(500)\n\t\t\t\tself.write(reviews)\n\t\t\t\traise tornado.web.Finish\n\t\tfor review in reviews:\n\t\t\treview[\"confirmed\"] = 1\n\t\tstorage_reviews = await self.account.getreviews(coinid=coinid, cid=cid)\n\t\tif isinstance(reviews, dict):\n\t\t\tif \"error\" in reviews.keys():\n\t\t\t\tself.set_status(reviews[\"error\"])\n\t\t\t\tself.write(reviews)\n\t\t\t\traise tornado.web.Finish\n\t\tself.write(json.dumps(reviews + storage_reviews))", - "docstring": "Receives all contents reviews" - }, - { - "code": "def load(saved_classifier_filename, train_data_filename):\n try:\n return joblib.load(saved_classifier_filename)\n except Exception:\n import sys\n if sys.version_info > (3, 0):\n return load_compat(saved_classifier_filename)\n raise", - "docstring": "Loads saved classifier." - }, - { - "code": "def unique(new_cmp_dict, old_cmp_dict):\n newkeys = set(new_cmp_dict)\n oldkeys = set(old_cmp_dict)\n unique = newkeys - oldkeys\n unique_ldict = []\n for key in unique:\n unique_ldict.append(new_cmp_dict[key])\n return unique_ldict", - "docstring": "Return a list dict of\n the unique keys in new_cmp_dict" - }, - { - "code": "def create_fake_copies(files, destination):\n dest_files = []\n for filename in files:\n leaf_dest_folder = os.path.join(destination, os.path.dirname(filename))\n if not os.path.exists(leaf_dest_folder):\n os.makedirs(leaf_dest_folder)\n dest_file = os.path.join(destination, filename)\n bash(\"git show :{filename} > {dest_file}\".format(\n filename=filename,\n dest_file=dest_file)\n )\n dest_files.append(os.path.realpath(dest_file))\n return dest_files", - "docstring": "Create copies of the given list of files in the destination given.\n\n Creates copies of the actual files to be committed using\n git show :\n\n Return a list of destination files." - }, - { - "code": "def join_dags(self, names=None):\n return self._client.send(\n Request(\n action='join_dags',\n payload={'names': names}\n )\n ).success", - "docstring": "Wait for the specified dags to terminate.\n\n This function blocks until the specified dags terminate. If no dags are specified\n wait for all dags of the workflow, except the dag of the task calling this signal,\n to terminate.\n\n Args:\n names (list): The names of the dags that have to terminate.\n\n Returns:\n bool: True if all the signal was sent successfully." - }, - { - "code": "def _format_output(kernel_restart, packages, verbose, restartable, nonrestartable, restartservicecommands,\n restartinitcommands):\n if not verbose:\n packages = restartable + nonrestartable\n if kernel_restart:\n packages.append('System restart required.')\n return packages\n else:\n ret = ''\n if kernel_restart:\n ret = 'System restart required.\\n\\n'\n if packages:\n ret += \"Found {0} processes using old versions of upgraded files.\\n\".format(len(packages))\n ret += \"These are the packages:\\n\"\n if restartable:\n ret += \"Of these, {0} seem to contain systemd service definitions or init scripts \" \\\n \"which can be used to restart them:\\n\".format(len(restartable))\n for package in restartable:\n ret += package + ':\\n'\n for program in packages[package]['processes']:\n ret += program + '\\n'\n if restartservicecommands:\n ret += \"\\n\\nThese are the systemd services:\\n\"\n ret += '\\n'.join(restartservicecommands)\n if restartinitcommands:\n ret += \"\\n\\nThese are the initd scripts:\\n\"\n ret += '\\n'.join(restartinitcommands)\n if nonrestartable:\n ret += \"\\n\\nThese processes {0} do not seem to have an associated init script \" \\\n \"to restart them:\\n\".format(len(nonrestartable))\n for package in nonrestartable:\n ret += package + ':\\n'\n for program in packages[package]['processes']:\n ret += program + '\\n'\n return ret", - "docstring": "Formats the output of the restartcheck module.\n\n Returns:\n String - formatted output.\n\n Args:\n kernel_restart: indicates that newer kernel is instaled\n packages: list of packages that should be restarted\n verbose: enables extensive output\n restartable: list of restartable packages\n nonrestartable: list of non-restartable packages\n restartservicecommands: list of commands to restart services\n restartinitcommands: list of commands to restart init.d scripts" - }, - { - "code": "def results(self, Pc):\n r\n phase = self.project.find_phase(self)\n net = self.project.network\n inv_p = self['pore.invasion_pressure'].copy()\n inv_t = self['throat.invasion_pressure'].copy()\n if np.sum(self['pore.invasion_sequence'] == -1) > 0:\n inv_p[self['pore.invasion_sequence'] == -1] = Pc + 1\n if np.sum(self['throat.invasion_sequence'] == -1) > 0:\n inv_t[self['throat.invasion_sequence'] == -1] = Pc + 1\n p_inv = inv_p <= Pc\n t_inv = inv_t <= Pc\n if self.settings['late_pore_filling']:\n phase['pore.pressure'] = Pc\n for phys in self.project.find_physics(phase=phase):\n phys.regenerate_models(self.settings['late_pore_filling'])\n frac = phase[self.settings['late_pore_filling']]\n p_vol = net['pore.volume']*frac\n else:\n p_vol = net['pore.volume']\n if self.settings['late_throat_filling']:\n phase['throat.pressure'] = Pc\n for phys in self.project.find_physics(phase=phase):\n phys.regenerate_models(self.settings['late_throat_filling'])\n frac = phase[self.settings['late_throat_filling']]\n t_vol = net['throat.volume']*frac\n else:\n t_vol = net['throat.volume']\n return {'pore.occupancy': p_inv*p_vol, 'throat.occupancy': t_inv*t_vol}", - "docstring": "r\"\"\"\n Places the results of the IP simulation into the Phase object.\n\n Parameters\n ----------\n Pc : float\n Capillary Pressure at which phase configuration was reached" - }, - { - "code": "def new_partition(self, table, **kwargs):\n from . import Partition\n if isinstance(table, string_types):\n table = self.table(table)\n if 'sequence_id' in kwargs:\n sequence_id = kwargs['sequence_id']\n del kwargs['sequence_id']\n else:\n sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition)\n p = Partition(\n t_vid=table.vid,\n table_name=table.name,\n sequence_id=sequence_id,\n dataset=self,\n d_vid=self.vid,\n **kwargs\n )\n p.update_id()\n return p", - "docstring": "Creates new partition and returns it.\n\n Args:\n table (orm.Table):\n\n Returns:\n orm.Partition" - }, - { - "code": "def wishart_pairwise_pvals(self, axis=0):\n if axis != 0:\n raise NotImplementedError(\"Pairwise comparison only implemented for colums\")\n return WishartPairwiseSignificance.pvals(self, axis=axis)", - "docstring": "Return square symmetric matrix of pairwise column-comparison p-values.\n\n Square, symmetric matrix along *axis* of pairwise p-values for the\n null hypothesis that col[i] = col[j] for each pair of columns.\n\n *axis* (int): axis along which to perform comparison. Only columns (0)\n are implemented currently." - }, - { - "code": "def endsbefore(self, other):\n if self.is_valid_range(other):\n if self.upper == other.upper:\n return not self.upper_inc or other.upper_inc\n elif self.upper_inf:\n return False\n elif other.upper_inf:\n return True\n else:\n return self.upper <= other.upper\n elif self.is_valid_scalar(other):\n return self.upper <= other\n else:\n raise TypeError(\n \"Unsupported type to test for ends before '{}'\".format(\n other.__class__.__name__))", - "docstring": "Test if this range ends before `other`. `other` may be either range or\n scalar. This only takes the upper end of the ranges into consideration.\n If the scalar or the upper end of the given range is less than or equal\n to this range's upper end, ``True`` is returned.\n\n >>> intrange(1, 5).endsbefore(5)\n True\n >>> intrange(1, 5).endsbefore(intrange(1, 5))\n True\n\n :param other: Range or scalar to test.\n :return: ``True`` if this range ends before `other`, otherwise ``False``\n :raises TypeError: If `other` is of the wrong type." - }, - { - "code": "def set(self, section, option, value):\n if not value:\n value = '!!False!!'\n if self.is_secure_option(section, option):\n self.set_secure(section, option, value)\n else:\n ConfigParser.set(self, section, option, value)", - "docstring": "Set an option value. Knows how to set options properly marked\n as secure." - }, - { - "code": "def _set_oversampling(self, value):\n try:\n value = np.atleast_1d(value).astype(float)\n if len(value) == 1:\n value = np.repeat(value, 2)\n except ValueError:\n raise ValueError('Oversampling factors must be float')\n if np.any(value <= 0):\n raise ValueError('Oversampling factors must be greater than 0')\n self._oversampling = value", - "docstring": "This is a private method because it's used in the initializer by the\n ``oversampling``" - }, - { - "code": "def _read_23andme(file):\n df = pd.read_csv(\n file,\n comment=\"\n sep=\"\\t\",\n na_values=\"--\",\n names=[\"rsid\", \"chrom\", \"pos\", \"genotype\"],\n index_col=0,\n dtype={\"chrom\": object},\n )\n return sort_snps(df), \"23andMe\"", - "docstring": "Read and parse 23andMe file.\n\n https://www.23andme.com\n\n Parameters\n ----------\n file : str\n path to file\n\n Returns\n -------\n pandas.DataFrame\n individual's genetic data normalized for use with `lineage`\n str\n name of data source" - }, - { - "code": "def compile_highstate(self):\n err = []\n top = self.get_top()\n err += self.verify_tops(top)\n matches = self.top_matches(top)\n high, errors = self.render_highstate(matches)\n err += errors\n if err:\n return err\n return high", - "docstring": "Return just the highstate or the errors" - }, - { - "code": "def parse_wait_time(text: str) -> int:\n val = RATELIMIT.findall(text)\n if len(val) > 0:\n try:\n res = val[0]\n if res[1] == 'minutes':\n return int(res[0]) * 60\n if res[1] == 'seconds':\n return int(res[0])\n except Exception as e:\n util_logger.warning('Could not parse ratelimit: ' + str(e))\n return 1 * 60", - "docstring": "Parse the waiting time from the exception" - }, - { - "code": "def solve_fba(self, objective):\n self._prob.set_objective(self._v_wt[objective])\n return self._solve(lp.ObjectiveSense.Maximize)", - "docstring": "Solve the wild type problem using FBA.\n\n Args:\n objective: The objective reaction to be maximized.\n\n Returns:\n The LP Result object for the solved FBA problem." - }, - { - "code": "def prime_ge(n):\n p = max(np.ceil(n), 2)\n while not is_prime(p):\n p += 1\n return p", - "docstring": "PRIME_GE returns the smallest prime greater than or equal to N.\n\n Example:\n +-----+---------\n | N | PRIME_GE\n +-----+---------\n | -10 | 2\n | 1 | 2\n | 2 | 2\n | 3 | 3\n | 4 | 5\n | 5 | 5\n | 6 | 7\n | 7 | 7\n | 8 | 11\n | 9 | 11\n | 10 | 11\n\n Parameters:\n Input, integer N, the number to be bounded.\n\n Output, integer P, the smallest prime number that is greater\n than or equal to N." - }, - { - "code": "def update_subdomain_entry(self, subdomain_obj, cur=None):\n assert isinstance(subdomain_obj, Subdomain)\n zonefile_hash = get_zonefile_data_hash(subdomain_obj.zonefile_str)\n rc = store_atlas_zonefile_data(subdomain_obj.zonefile_str, self.zonefiles_dir, fsync=False)\n if not rc:\n raise Exception(\"Failed to store zone file {} from {}\".format(zonefile_hash, subdomain_obj.get_fqn()))\n write_cmd = 'INSERT OR REPLACE INTO {} VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)'.format(self.subdomain_table)\n args = (subdomain_obj.get_fqn(), subdomain_obj.domain, subdomain_obj.n, subdomain_obj.address, zonefile_hash,\n subdomain_obj.sig, subdomain_obj.block_height, subdomain_obj.parent_zonefile_hash,\n subdomain_obj.parent_zonefile_index, subdomain_obj.zonefile_offset, subdomain_obj.txid, \n ','.join(str(i) for i in subdomain_obj.domain_zonefiles_missing),\n 1 if subdomain_obj.accepted else 0,\n subdomain_obj.resolver)\n cursor = None\n if cur is None:\n cursor = self.conn.cursor()\n else:\n cursor = cur\n db_query_execute(cursor, write_cmd, args)\n num_rows_written = cursor.rowcount\n if cur is None:\n self.conn.commit()\n if num_rows_written != 1:\n raise ValueError(\"No row written: fqn={} seq={}\".format(subdomain_obj.get_fqn(), subdomain_obj.n))\n return True", - "docstring": "Update the subdomain history table for this subdomain entry.\n Creates it if it doesn't exist.\n\n Return True on success\n Raise exception on error" - }, - { - "code": "def get_present_elements(self, locator, params=None, timeout=None, visible=False, parent=None):\n error_msg = \"Children were never present\" if parent else \"Elements were never present!\"\n expected_condition = ec.visibility_of_all_elements_located if visible else ec.presence_of_all_elements_located\n return self._get(locator, expected_condition, params, timeout, error_msg, parent)", - "docstring": "Get elements present in the DOM.\n\n If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise\n TimeoutException should the element not be found.\n\n :param locator: element identifier\n :param params: (optional) locator parameters\n :param timeout: (optional) time to wait for element (default: self._explicit_wait)\n :param visible: (optional) if the element should also be visible (default: False)\n :param parent: internal (see #get_present_children)\n :return: WebElement instance" - }, - { - "code": "def parse_tag_pattern(self, sel, m, has_selector):\n parts = [css_unescape(x) for x in m.group(0).split('|')]\n if len(parts) > 1:\n prefix = parts[0]\n tag = parts[1]\n else:\n tag = parts[0]\n prefix = None\n sel.tag = ct.SelectorTag(tag, prefix)\n has_selector = True\n return has_selector", - "docstring": "Parse tag pattern from regex match." - }, - { - "code": "def is_valid_sid_for_chain(pid, sid):\n if not d1_gmn.app.did.is_valid_sid_for_chain(pid, sid):\n existing_sid = d1_gmn.app.revision.get_sid_by_pid(pid)\n raise d1_common.types.exceptions.IdentifierNotUnique(\n 0,\n 'A different SID is already assigned to the revision chain to which '\n 'the object being created or updated belongs. A SID cannot be changed '\n 'once it has been assigned to a chain. '\n 'existing_sid=\"{}\", new_sid=\"{}\", pid=\"{}\"'.format(existing_sid, sid, pid),\n )", - "docstring": "Assert that ``sid`` can be assigned to the single object ``pid`` or to the chain\n to which ``pid`` belongs.\n\n - If the chain does not have a SID, the new SID must be previously unused.\n - If the chain already has a SID, the new SID must match the existing SID." - }, - { - "code": "def url2domain(url):\n parsed_uri = urlparse.urlparse(url)\n domain = '{uri.netloc}'.format(uri=parsed_uri)\n domain = re.sub(\"^.+@\", \"\", domain)\n domain = re.sub(\":.+$\", \"\", domain)\n return domain", - "docstring": "extract domain from url" - }, - { - "code": "def fit(self, X, y=None, **fit_params):\n self.estimator.fit(X, y, **fit_params)\n return self", - "docstring": "Fit the StackingEstimator meta-transformer.\n\n Parameters\n ----------\n X: array-like of shape (n_samples, n_features)\n The training input samples.\n y: array-like, shape (n_samples,)\n The target values (integers that correspond to classes in classification, real numbers in regression).\n fit_params:\n Other estimator-specific parameters.\n\n Returns\n -------\n self: object\n Returns a copy of the estimator" - }, - { - "code": "def read_sampling_params_from_config(cp, section_group=None,\n section='sampling_params'):\n if section_group is not None:\n section_prefix = '{}_'.format(section_group)\n else:\n section_prefix = ''\n section = section_prefix + section\n replaced_params = set()\n sampling_params = set()\n for args in cp.options(section):\n map_args = cp.get(section, args)\n sampling_params.update(set(map(str.strip, map_args.split(','))))\n replaced_params.update(set(map(str.strip, args.split(','))))\n return list(sampling_params), list(replaced_params)", - "docstring": "Reads sampling parameters from the given config file.\n\n Parameters are read from the `[({section_group}_){section}]` section.\n The options should list the variable args to transform; the parameters they\n point to should list the parameters they are to be transformed to for\n sampling. If a multiple parameters are transformed together, they should\n be comma separated. Example:\n\n .. code-block:: ini\n\n [sampling_params]\n mass1, mass2 = mchirp, logitq\n spin1_a = logitspin1_a\n\n Note that only the final sampling parameters should be listed, even if\n multiple intermediate transforms are needed. (In the above example, a\n transform is needed to go from mass1, mass2 to mchirp, q, then another one\n needed to go from q to logitq.) These transforms should be specified\n in separate sections; see ``transforms.read_transforms_from_config`` for\n details.\n\n Parameters\n ----------\n cp : WorkflowConfigParser\n An open config parser to read from.\n section_group : str, optional\n Append `{section_group}_` to the section name. Default is None.\n section : str, optional\n The name of the section. Default is 'sampling_params'.\n\n Returns\n -------\n sampling_params : list\n The list of sampling parameters to use instead.\n replaced_params : list\n The list of variable args to replace in the sampler." - }, - { - "code": "def set_node_status(node_id, status, **kwargs):\n user_id = kwargs.get('user_id')\n try:\n node_i = db.DBSession.query(Node).filter(Node.id == node_id).one()\n except NoResultFound:\n raise ResourceNotFoundError(\"Node %s not found\"%(node_id))\n node_i.network.check_write_permission(user_id)\n node_i.status = status\n for link in node_i.links_to:\n link.status = status\n for link in node_i.links_from:\n link.status = status\n db.DBSession.flush()\n return node_i", - "docstring": "Set the status of a node to 'X'" - }, - { - "code": "def try_checkpoint_metadata(self, trial):\n if trial._checkpoint.storage == Checkpoint.MEMORY:\n logger.debug(\"Not saving data for trial w/ memory checkpoint.\")\n return\n try:\n logger.debug(\"Saving trial metadata.\")\n self._cached_trial_state[trial.trial_id] = trial.__getstate__()\n except Exception:\n logger.exception(\"Error checkpointing trial metadata.\")", - "docstring": "Checkpoints metadata.\n\n Args:\n trial (Trial): Trial to checkpoint." - }, - { - "code": "def delete_file(self, commit, path):\n req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))\n self.stub.DeleteFile(req, metadata=self.metadata)", - "docstring": "Deletes a file from a Commit. DeleteFile leaves a tombstone in the\n Commit, assuming the file isn't written to later attempting to get the\n file from the finished commit will result in not found error. The file\n will of course remain intact in the Commit's parent.\n\n Params:\n * commit: A tuple, string, or Commit object representing the commit.\n * path: The path to the file." - }, - { - "code": "def get_gene_info(ensembl_ids=None, hgnc_symbols=None):\n uniq_ensembl_ids = set(ensembl_id for ensembl_id in (ensembl_ids or []))\n uniq_hgnc_symbols = set(hgnc_symbol for hgnc_symbol in (hgnc_symbols or []))\n genes = []\n gene_data = []\n if uniq_ensembl_ids:\n for ensembl_id in uniq_ensembl_ids:\n for res in query_gene(ensembl_id=ensembl_id):\n gene_data.append(res) \n elif uniq_hgnc_symbols:\n for hgnc_symbol in uniq_hgnc_symbols:\n query_res = query_gene(hgnc_symbol=hgnc_symbol)\n if query_res:\n for res in query_res:\n gene_data.append(res)\n else:\n gene_data.append({\n 'hgnc_symbol': hgnc_symbol,\n 'hgnc_id': None,\n 'ensembl_id': None,\n 'description': None,\n 'chrom': 'unknown',\n 'start': 0,\n 'stop': 0,\n 'hi_score': None,\n 'constraint_score': None,\n })\n for gene in gene_data:\n genes.append(Gene(\n symbol=gene ['hgnc_symbol'],\n hgnc_id=gene['hgnc_id'],\n ensembl_id=gene['ensembl_id'],\n description=gene['description'],\n chrom=gene['chrom'],\n start=gene['start'],\n stop=gene['stop'],\n location=get_cytoband_coord(gene['chrom'], gene['start']),\n hi_score=gene['hi_score'],\n constraint_score=gene['constraint_score'],\n omim_number=get_omim_number(gene['hgnc_symbol'])\n ))\n return genes", - "docstring": "Return the genes info based on the transcripts found\n\n Args:\n ensembl_ids (Optional[list]): list of Ensembl gene ids\n hgnc_symbols (Optional[list]): list of HGNC gene symbols\n\n Returns:\n iterable: an iterable with `Gene` objects" - }, - { - "code": "def metric(cls, name, count, elapsed):\n if name is None:\n warnings.warn(\"Ignoring unnamed metric\", stacklevel=3)\n return\n with cls.lock:\n if not cls.instances:\n shutil.rmtree(cls.outdir, ignore_errors=True)\n os.makedirs(cls.outdir)\n if cls.dump_atexit: atexit.register(cls.dump)\n try:\n self = cls.instances[name]\n except KeyError:\n self = cls.instances[name] = cls(name)\n self.writer.writerow((count, \"%f\"%elapsed))", - "docstring": "A metric function that writes multiple CSV files\n\n :arg str name: name of the metric\n :arg int count: number of items\n :arg float elapsed: time in seconds" - }, - { - "code": "def add(self, resource, replace=False):\n uri = resource.uri\n for r in self:\n if (uri == r.uri):\n if (replace):\n r = resource\n return\n else:\n raise ResourceListDupeError(\n \"Attempt to add resource already in resource_list\")\n self.append(resource)", - "docstring": "Add a single resource, check for dupes." - }, - { - "code": "def label(self, input_grid):\n unset = 0\n high_labels, num_labels = label(input_grid > self.high_thresh)\n region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1]\n output_grid = np.zeros(input_grid.shape, dtype=int)\n stack = []\n for rank in region_ranking:\n label_num = rank + 1\n label_i, label_j = np.where(high_labels == label_num)\n for i in range(label_i.size):\n if output_grid[label_i[i], label_j[i]] == unset:\n stack.append((label_i[i], label_j[i]))\n while len(stack) > 0:\n index = stack.pop()\n output_grid[index] = label_num\n for i in range(index[0] - 1, index[0] + 2):\n for j in range(index[1] - 1, index[1] + 2):\n if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]:\n if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset):\n stack.append((i, j))\n return output_grid", - "docstring": "Label input grid with hysteresis method.\n\n Args:\n input_grid: 2D array of values.\n\n Returns:\n Labeled output grid." - }, - { - "code": "def get_inventory_by_name(nme, character):\n for ndx, sk in enumerate(character[\"inventory\"]):\n if sk[\"name\"] == nme:\n return ndx\n return 0", - "docstring": "returns the inventory index by name" - }, - { - "code": "def sg_max(tensor, opt):\n r\n return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)", - "docstring": "r\"\"\"Computes the maximum of elements across axis of a tensor.\n\n See `tf.reduce_max()` in tensorflow.\n\n Args:\n tensor: A `Tensor` (automatically given by chain).\n opt:\n axis : A tuple/list of integers or an integer. The axis to reduce.\n keep_dims: If true, retains reduced dimensions with length 1.\n name: If provided, replace current tensor's name.\n\n Returns:\n A `Tensor`." - }, - { - "code": "def lazy_import(module_name, to_import):\n module = importlib.import_module(module_name)\n import_mapping = {}\n for name in to_import:\n importing, _, binding = name.partition(' as ')\n if not binding:\n _, _, binding = importing.rpartition('.')\n import_mapping[binding] = importing\n def __getattr__(name):\n if name not in import_mapping:\n raise ModuleAttributeError(module_name, name)\n importing = import_mapping[name]\n imported = importlib.import_module(importing,\n module.__spec__.parent)\n setattr(module, name, imported)\n return imported\n return module, __getattr__", - "docstring": "Return the importing module and a callable for lazy importing.\n\n The module named by module_name represents the module performing the\n import to help facilitate resolving relative imports.\n\n to_import is an iterable of the modules to be potentially imported (absolute\n or relative). The `as` form of importing is also supported,\n e.g. `pkg.mod as spam`.\n\n This function returns a tuple of two items. The first is the importer\n module for easy reference within itself. The second item is a callable to be\n set to `__getattr__`." - }, - { - "code": "def get_fwhm_tag(expnum, ccd, prefix=None, version='p'):\n uri = get_uri(expnum, ccd, version, ext='fwhm', prefix=prefix)\n if uri not in fwhm:\n key = \"fwhm_{:1s}{:02d}\".format(version, int(ccd))\n fwhm[uri] = get_tag(expnum, key)\n return fwhm[uri]", - "docstring": "Get the FWHM from the VOSpace annotation.\n\n @param expnum:\n @param ccd:\n @param prefix:\n @param version:\n @return:" - }, - { - "code": "def _decimal_to_json(value):\n if isinstance(value, decimal.Decimal):\n value = str(value)\n return value", - "docstring": "Coerce 'value' to a JSON-compatible representation." - }, - { - "code": "def schedule_saved(sender, instance, **kwargs):\n from contentstore.tasks import sync_schedule\n sync_schedule.delay(str(instance.id))", - "docstring": "Fires off the celery task to ensure that this schedule is in the scheduler\n\n Arguments:\n sender {class} -- The model class, always Schedule\n instance {Schedule} --\n The instance of the Schedule that we want to sync" - }, - { - "code": "def set_exposure_scale(self, name, scale=None):\n name = self.roi.get_source_by_name(name).name\n if scale is None and name not in self._src_expscale:\n return\n elif scale is None:\n scale = self._src_expscale.get(name, 1.0)\n else:\n self._src_expscale[name] = scale\n self._scale_srcmap({name: scale})", - "docstring": "Set the exposure correction of a source.\n\n Parameters\n ----------\n name : str\n Source name.\n\n scale : factor\n Exposure scale factor (1.0 = nominal exposure)." - }, - { - "code": "def decode(self, litmap):\n return Or(*[And(*[litmap[idx] for idx in clause])\n for clause in self.clauses])", - "docstring": "Convert the DNF to an expression." - }, - { - "code": "def update_empty_fields(self, **kwargs):\n if self._is_deprecated is None:\n self._is_deprecated = kwargs.get('is_deprecated')\n if self._is_dubious is None:\n self._is_dubious = kwargs.get('is_dubious')\n if self._is_synonym is None:\n self._is_synonym = kwargs.get('is_synonym')\n if self._synonyms is _EMPTY_TUPLE:\n self._synonyms = kwargs.get('synonyms')\n if self._synonyms is None:\n self._synonyms = _EMPTY_TUPLE\n if self.rank is None:\n self._rank = kwargs.get('rank')\n if self._nomenclature_code:\n self._nomenclature_code = kwargs.get('nomenclature_code')\n if not self._unique_name:\n self._unique_name = kwargs.get('unique_name')\n if self._taxonomic_lineage is None:\n self._taxonomic_lineage = kwargs.get('taxonomic_lineage')\n if self._parent is None:\n self._parent = kwargs.get('parent')\n if self._parent is None and self._taxomachine_wrapper is not None and self._taxonomic_lineage:\n self._fill_parent_attr()", - "docstring": "Updates the field of info about an OTU that might not be filled in by a match_names or taxon call." - }, - { - "code": "def parse_request(self, data):\n if isinstance(data, bytes):\n data = data.decode()\n try:\n req = json.loads(data)\n except Exception as e:\n raise JSONRPCParseError()\n if isinstance(req, list):\n requests = JSONRPCBatchRequest()\n for subreq in req:\n try:\n requests.append(self._parse_subrequest(subreq))\n except RPCError as e:\n requests.append(e)\n except Exception as e:\n requests.append(JSONRPCInvalidRequestError())\n if not requests:\n raise JSONRPCInvalidRequestError()\n return requests\n else:\n return self._parse_subrequest(req)", - "docstring": "Deserializes and validates a request.\n\n Called by the server to reconstruct the serialized :py:class:`JSONRPCRequest`.\n\n :param bytes data: The data stream received by the transport layer containing the\n serialized request.\n :return: A reconstructed request.\n :rtype: :py:class:`JSONRPCRequest`\n :raises JSONRPCParseError: if the ``data`` cannot be parsed as valid JSON.\n :raises JSONRPCInvalidRequestError: if the request does not comply with the standard." - }, - { - "code": "def attach_binary(self, data, filename, content_type=None,\n content_id=None):\n if self.attachments is None:\n self.attachments = []\n if content_type is None:\n content_type = self._detect_content_type(filename)\n attachment = {\n 'Name': filename,\n 'Content': b64encode(data).decode('utf-8'),\n 'ContentType': content_type\n }\n if content_id is not None:\n if not content_id.startswith('cid:'):\n raise MessageError('content_id parameter must be an '\n 'RFC-2392 URL starting with \"cid:\"')\n attachment['ContentID'] = content_id\n self.attachments.append(attachment)", - "docstring": "Attach a file to the message given raw binary data.\n\n :param data: Raw data to attach to the message.\n :param filename: Name of the file for the data.\n :param content_type: mimetype of the data. It will be guessed from the\n filename if not provided.\n :param content_id: ContentID URL of the attachment. A RFC 2392-\n compliant URL for the attachment that allows it to be referenced\n from inside the body of the message. Must start with 'cid:'" - }, - { - "code": "def ball_pick(n, d, rng=None):\n def valid(r):\n return vector_mag_sq(r) < 1.0\n return rejection_pick(L=2.0, n=n, d=d, valid=valid, rng=rng)", - "docstring": "Return cartesian vectors uniformly picked on the unit ball in an\n arbitrary number of dimensions.\n\n The unit ball is the space enclosed by the unit sphere.\n\n The picking is done by rejection sampling in the unit cube.\n\n In 3-dimensional space, the fraction `\\pi / 6 \\sim 0.52` points are valid.\n\n Parameters\n ----------\n n: integer\n Number of points to return.\n d: integer\n Number of dimensions of the space in which the ball lives\n\n Returns\n -------\n r: array, shape (n, d)\n Sample cartesian vectors." - }, - { - "code": "def MakeSuiteFromDict(d, name=''):\n suite = Suite(name=name)\n suite.SetDict(d)\n suite.Normalize()\n return suite", - "docstring": "Makes a suite from a map from values to probabilities.\n\n Args:\n d: dictionary that maps values to probabilities\n name: string name for this suite\n\n Returns:\n Suite object" - }, - { - "code": "def update(self, data):\n self.data = data\n self.name = data['name']\n self.description = data['description']\n self.win_index = data['win_index']\n self.guid = data['guid']\n self.mac = data['mac']\n self.ipv4_metric = data['ipv4_metric']\n self.ipv6_metric = data['ipv6_metric']\n self.ips = data['ips']\n if 'invalid' in data:\n self.invalid = data['invalid']\n self._update_pcapdata()\n try:\n if conf.use_npcap:\n pcap_name_loopback = _get_npcap_config(\"LoopbackAdapter\")\n if pcap_name_loopback:\n guid = _pcapname_to_guid(pcap_name_loopback)\n if self.guid == guid:\n self.mac = \"00:00:00:00:00:00\"\n self.ip = \"127.0.0.1\"\n return\n except KeyError:\n pass\n try:\n self.ip = next(x for x in self.ips if \":\" not in x)\n except StopIteration:\n pass\n try:\n if not self.ip and self.name == scapy.consts.LOOPBACK_NAME:\n self.ip = \"127.0.0.1\"\n except (KeyError, AttributeError, NameError) as e:\n print(e)", - "docstring": "Update info about a network interface according\n to a given dictionary. Such data is provided by get_windows_if_list" - }, - { - "code": "def _GetMountpoints(only_physical=True):\n partitions = psutil.disk_partitions(all=not only_physical)\n return set(partition.mountpoint for partition in partitions)", - "docstring": "Fetches a list of mountpoints.\n\n Args:\n only_physical: Determines whether only mountpoints for physical devices\n (e.g. hard disks) should be listed. If false, mountpoints for things such\n as memory partitions or `/dev/shm` will be returned as well.\n\n Returns:\n A set of mountpoints." - }, - { - "code": "def preserve_namespace(newns=None):\n ns = cmds.namespaceInfo(an=True)\n try:\n cmds.namespace(set=newns)\n yield\n finally:\n cmds.namespace(set=ns)", - "docstring": "Contextmanager that will restore the current namespace\n\n :param newns: a name of namespace that should be set in the beginning. the original namespace will be restored afterwards.\n If None, does not set a namespace.\n :type newns: str | None\n :returns: None\n :rtype: None\n :raises: None" - }, - { - "code": "def use_in(ContentHandler):\n\tdef startStream(self, parent, attrs, __orig_startStream = ContentHandler.startStream):\n\t\tif parent.tagName == ligolw.Array.tagName:\n\t\t\treturn ArrayStream(attrs).config(parent)\n\t\treturn __orig_startStream(self, parent, attrs)\n\tdef startArray(self, parent, attrs):\n\t\treturn Array(attrs)\n\tContentHandler.startStream = startStream\n\tContentHandler.startArray = startArray\n\treturn ContentHandler", - "docstring": "Modify ContentHandler, a sub-class of\n\tpycbc_glue.ligolw.LIGOLWContentHandler, to cause it to use the Array and\n\tArrayStream classes defined in this module when parsing XML\n\tdocuments.\n\n\tExample:\n\n\t>>> from pycbc_glue.ligolw import ligolw\n\t>>> class MyContentHandler(ligolw.LIGOLWContentHandler):\n\t...\tpass\n\t...\n\t>>> use_in(MyContentHandler)\n\t" - }, - { - "code": "def _put_bucket_cors(self):\n if self.s3props['cors']['enabled'] and self.s3props['website']['enabled']:\n cors_config = {}\n cors_rules = []\n for each_rule in self.s3props['cors']['cors_rules']:\n cors_rules.append({\n 'AllowedHeaders': each_rule['cors_headers'],\n 'AllowedMethods': each_rule['cors_methods'],\n 'AllowedOrigins': each_rule['cors_origins'],\n 'ExposeHeaders': each_rule['cors_expose_headers'],\n 'MaxAgeSeconds': each_rule['cors_max_age']\n })\n cors_config = {\n 'CORSRules': cors_rules\n }\n LOG.debug(cors_config)\n _response = self.s3client.put_bucket_cors(Bucket=self.bucket, CORSConfiguration=cors_config)\n else:\n _response = self.s3client.delete_bucket_cors(Bucket=self.bucket)\n LOG.debug('Response setting up S3 CORS: %s', _response)\n LOG.info('S3 CORS configuration updated')", - "docstring": "Adds bucket cors configuration." - }, - { - "code": "def get_historical_prices(symbol, start_date, end_date):\n params = urlencode({\n 's': symbol,\n 'a': int(start_date[5:7]) - 1,\n 'b': int(start_date[8:10]),\n 'c': int(start_date[0:4]),\n 'd': int(end_date[5:7]) - 1,\n 'e': int(end_date[8:10]),\n 'f': int(end_date[0:4]),\n 'g': 'd',\n 'ignore': '.csv',\n })\n url = 'http://ichart.yahoo.com/table.csv?%s' % params\n req = Request(url)\n resp = urlopen(req)\n content = str(resp.read().decode('utf-8').strip())\n daily_data = content.splitlines()\n hist_dict = dict()\n keys = daily_data[0].split(',')\n for day in daily_data[1:]:\n day_data = day.split(',')\n date = day_data[0]\n hist_dict[date] = \\\n {keys[1]: day_data[1],\n keys[2]: day_data[2],\n keys[3]: day_data[3],\n keys[4]: day_data[4],\n keys[5]: day_data[5],\n keys[6]: day_data[6]}\n return hist_dict", - "docstring": "Get historical prices for the given ticker symbol.\n Date format is 'YYYY-MM-DD'\n\n Returns a nested dictionary (dict of dicts).\n outer dict keys are dates ('YYYY-MM-DD')" - }, - { - "code": "def set_install_dir(self, install_dir=None, version=None, verbose=False):\n if version is None:\n self.__install_dir = install_dir\n if install_dir is not None:\n common.validate_install_dir(install_dir)\n else:\n self.__install_dir = self.node_setup(version, verbose=verbose)\n self._cassandra_version = common.get_version_from_build(self.__install_dir, cassandra=True)\n if self.get_base_cassandra_version() >= 4.0:\n self.network_interfaces['thrift'] = None\n self.import_config_files()\n self.import_bin_files()\n self.__conf_updated = False\n return self", - "docstring": "Sets the path to the cassandra source directory for use by this node." - }, - { - "code": "def get_previous_version(version: str) -> Optional[str]:\n debug('get_previous_version')\n found_version = False\n for commit_hash, commit_message in get_commit_log():\n debug('checking commit {}'.format(commit_hash))\n if version in commit_message:\n found_version = True\n debug('found_version in \"{}\"'.format(commit_message))\n continue\n if found_version:\n matches = re.match(r'v?(\\d+.\\d+.\\d+)', commit_message)\n if matches:\n debug('version matches', commit_message)\n return matches.group(1).strip()\n return get_last_version([version, 'v{}'.format(version)])", - "docstring": "Returns the version prior to the given version.\n\n :param version: A string with the version number.\n :return: A string with the previous version number" - }, - { - "code": "def read_ring_images_index():\n meta = read_cumulative_iss_index()\n ringfilter = meta.TARGET_DESC.str.contains(\"ring\", case=False)\n return meta[ringfilter]", - "docstring": "Filter cumulative index for ring images.\n\n This is done by matching the column TARGET_DESC to contain the string 'ring'\n\n Returns\n -------\n pandas.DataFrame\n data table containing only meta-data for ring images" - }, - { - "code": "def get_content_type(content_type):\n m = email.message.Message()\n m['Content-Type'] = content_type\n return m.get_content_type()", - "docstring": "Extract the MIME type value from a content type string.\n\n Removes any subtype and parameter values that may be present in the string.\n\n Args:\n content_type: str\n String with content type and optional subtype and parameter fields.\n\n Returns:\n str: String with only content type\n\n Example:\n\n ::\n\n Input: multipart/form-data; boundary=aBoundaryString\n Returns: multipart/form-data" - }, - { - "code": "def get(cls, pid, session):\n with cls._lock:\n cls._ensure_pool_exists(pid)\n return cls._pools[pid].get(session)", - "docstring": "Get an idle, unused connection from the pool. Once a connection has\n been retrieved, it will be marked as in-use until it is freed.\n\n :param str pid: The pool ID\n :param queries.Session session: The session to assign to the connection\n :rtype: psycopg2.extensions.connection" - }, - { - "code": "def rain_series(self):\n return [(tstamp, \\\n self._station_history.get_measurements()[tstamp]['rain']) \\\n for tstamp in self._station_history.get_measurements()]", - "docstring": "Returns the precipitation time series relative to the\n meteostation, in the form of a list of tuples, each one containing the\n couple timestamp-value\n\n :returns: a list of tuples" - }, - { - "code": "def from_charmm(cls, path, positions=None, forcefield=None, strict=True, **kwargs):\n psf = CharmmPsfFile(path)\n if strict and forcefield is None:\n raise ValueError('PSF files require key `forcefield`.')\n if strict and positions is None:\n raise ValueError('PSF files require key `positions`.')\n psf.parmset = CharmmParameterSet(*forcefield)\n psf.loadParameters(psf.parmset)\n return cls(master=psf, topology=psf.topology, positions=positions, path=path,\n **kwargs)", - "docstring": "Loads PSF Charmm structure from `path`. Requires `charmm_parameters`.\n\n Parameters\n ----------\n path : str\n Path to PSF file\n forcefield : list of str\n Paths to Charmm parameters files, such as *.par or *.str. REQUIRED\n\n Returns\n -------\n psf : SystemHandler\n SystemHandler with topology. Charmm parameters are embedded in\n the `master` attribute." - }, - { - "code": "def _dict_mapping_to_pb(mapping, proto_type):\n converted_pb = getattr(trace_pb2, proto_type)()\n ParseDict(mapping, converted_pb)\n return converted_pb", - "docstring": "Convert a dict to protobuf.\n\n Args:\n mapping (dict): A dict that needs to be converted to protobuf.\n proto_type (str): The type of the Protobuf.\n\n Returns:\n An instance of the specified protobuf." - }, - { - "code": "def get_extra_managed_storage_volume_paths(self, start=0, count=-1, filter='', sort=''):\n uri = self.URI + '/repair?alertFixType=ExtraManagedStorageVolumePaths'\n return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)", - "docstring": "Gets the list of extra managed storage volume paths.\n\n Args:\n start:\n The first item to return, using 0-based indexing.\n If not specified, the default is 0 - start with the first available item.\n count:\n The number of resources to return. A count of -1 requests all items.\n The actual number of items in the response might differ from the requested\n count if the sum of start and count exceeds the total number of items.\n filter (list or str):\n A general filter/query string to narrow the list of items returned. The\n default is no filter; all resources are returned.\n sort:\n The sort order of the returned data set. By default, the sort order is based\n on create time with the oldest entry first.\n\n Returns:\n list: A list of extra managed storage volume paths." - }, - { - "code": "def on_sigchld(self, _signum, _unused_frame):\n LOGGER.info('SIGCHLD received from child')\n if not self.active_processes(False):\n LOGGER.info('Stopping with no active processes and child error')\n signal.setitimer(signal.ITIMER_REAL, 0, 0)\n self.set_state(self.STATE_STOPPED)", - "docstring": "Invoked when a child sends up an SIGCHLD signal.\n\n :param int _signum: The signal that was invoked\n :param frame _unused_frame: The frame that was interrupted" - }, - { - "code": "def _get_request_token(self):\n params = {\n 'oauth_callback': self.get_callback_url()\n }\n response, content = self.client().request(self.request_token_url,\n \"POST\", body=urllib.urlencode(params))\n content = smart_unicode(content)\n if not response['status'] == '200':\n raise OAuthError(_(\n u\"Invalid status code %s while obtaining request token from %s: %s\") % (\n response['status'], self.request_token_url, content))\n token = dict(urlparse.parse_qsl(content))\n return oauth.Token(token['oauth_token'], token['oauth_token_secret'])", - "docstring": "Fetch a request token from `self.request_token_url`." - }, - { - "code": "def compile_(self):\n from Cython import Build\n argv = copy.deepcopy(sys.argv)\n sys.argv = [sys.argv[0], 'build_ext', '--build-lib='+self.buildpath]\n exc_modules = [\n distutils.extension.Extension(\n 'hydpy.cythons.autogen.'+self.cyname,\n [self.pyxfilepath], extra_compile_args=['-O2'])]\n distutils.core.setup(ext_modules=Build.cythonize(exc_modules),\n include_dirs=[numpy.get_include()])\n sys.argv = argv", - "docstring": "Translate cython code to C code and compile it." - }, - { - "code": "def is_admin(self, roles):\n for r in roles:\n if r in self.admin_roles:\n return True\n return False", - "docstring": "determine from a list of roles if is ldapcherry administrator" - }, - { - "code": "def persistent_id(self, obj):\n obj_class = None if not hasattr(obj, '__class__') else obj.__class__\n if obj_class is None:\n return None\n if _is_not_pickle_safe_gl_class(obj_class):\n if (id(obj) in self.gl_object_memo):\n return (None, None, id(obj))\n else:\n relative_filename = str(_uuid.uuid4())\n filename = _os.path.join(self.gl_temp_storage_path, relative_filename)\n self.mark_for_delete -= set([filename])\n obj.save(filename)\n self.gl_object_memo.add(id(obj))\n return (_get_gl_class_type(obj.__class__), relative_filename, id(obj))\n else:\n return None", - "docstring": "Provide a persistent ID for \"saving\" GLC objects by reference. Return\n None for all non GLC objects.\n\n Parameters\n ----------\n\n obj: Name of the object whose persistent ID is extracted.\n\n Returns\n --------\n None if the object is not a GLC object. (ClassName, relative path)\n if the object is a GLC object.\n\n Notes\n -----\n\n Borrowed from pickle docs (https://docs.python.org/2/library/_pickle.html)\n\n For the benefit of object persistence, the pickle module supports the\n notion of a reference to an object outside the pickled data stream.\n\n To pickle objects that have an external persistent id, the pickler must\n have a custom persistent_id() method that takes an object as an argument and\n returns either None or the persistent id for that object.\n\n For GLC objects, the persistent_id is merely a relative file path (within\n the ZIP archive) to the GLC archive where the GLC object is saved. For\n example:\n\n (SFrame, 'sframe-save-path')\n (SGraph, 'sgraph-save-path')\n (Model, 'model-save-path')" - }, - { - "code": "async def update(self):\n if self.client.session.closed:\n async with core.Client() as client:\n data = await client.request(self.url)\n else:\n data = await self.client.request(self.url)\n self.raw_data = data\n self.from_data(data)\n return self", - "docstring": "Update an object with current info." - }, - { - "code": "def merge(directory=None, revisions='', message=None, branch_label=None,\n rev_id=None):\n if alembic_version >= (0, 7, 0):\n config = current_app.extensions['migrate'].migrate.get_config(\n directory)\n command.merge(config, revisions, message=message,\n branch_label=branch_label, rev_id=rev_id)\n else:\n raise RuntimeError('Alembic 0.7.0 or greater is required')", - "docstring": "Merge two revisions together. Creates a new migration file" - }, - { - "code": "def select_column(self, key, column, **kwargs):\n return self.get_storer(key).read_column(column=column, **kwargs)", - "docstring": "return a single column from the table. This is generally only useful to\n select an indexable\n\n Parameters\n ----------\n key : object\n column: the column of interest\n\n Exceptions\n ----------\n raises KeyError if the column is not found (or key is not a valid\n store)\n raises ValueError if the column can not be extracted individually (it\n is part of a data block)" - }, - { - "code": "def find_next_character(code, position, char):\n end = LineCol(code, *position)\n while not end.eof and end.char() in WHITESPACE:\n end.inc()\n if not end.eof and end.char() == char:\n return end.tuple(), inc_tuple(end.tuple())\n return None, None", - "docstring": "Find next char and return its first and last positions" - }, - { - "code": "def serialize(expr):\n result = None\n if isinstance(expr, string_types):\n result = expr\n elif expr is not None:\n result = '=py:{0}'.format(expr)\n return result", - "docstring": "Serialize input expr into a parsable value.\n\n :rtype: str" - }, - { - "code": "def add_notification_listener(self, notification_type, notification_callback):\n if notification_type not in self.notifications:\n self.notifications[notification_type] = [(self.notification_id, notification_callback)]\n else:\n if reduce(lambda a, b: a + 1,\n filter(lambda tup: tup[1] == notification_callback, self.notifications[notification_type]),\n 0) > 0:\n return -1\n self.notifications[notification_type].append((self.notification_id, notification_callback))\n ret_val = self.notification_id\n self.notification_id += 1\n return ret_val", - "docstring": "Add a notification callback to the notification center.\n\n Args:\n notification_type: A string representing the notification type from .helpers.enums.NotificationTypes\n notification_callback: closure of function to call when event is triggered.\n\n Returns:\n Integer notification id used to remove the notification or -1 if the notification has already been added." - }, - { - "code": "def server(**kwargs):\n start_server(**{k: v for k, v in kwargs.items() if v},\n blocking=True)", - "docstring": "Starts the Clearly Server.\n\n BROKER: The broker being used by celery, like \"amqp://localhost\"." - }, - { - "code": "def _check_names_match(data_names, data_shapes, name, throw):\n actual = [x[0] for x in data_shapes]\n if sorted(data_names) != sorted(actual):\n msg = \"Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)\"%(\n name, name, str(data_shapes), str(data_names))\n if throw:\n raise ValueError(msg)\n else:\n warnings.warn(msg)", - "docstring": "Check that input names matches input data descriptors." - }, - { - "code": "def create_from_png(cls, source):\n if hasattr(source, 'read'):\n read_func = _make_read_func(source)\n pointer = cairo.cairo_image_surface_create_from_png_stream(\n read_func, ffi.NULL)\n else:\n pointer = cairo.cairo_image_surface_create_from_png(\n _encode_filename(source))\n self = object.__new__(cls)\n Surface.__init__(self, pointer)\n return self", - "docstring": "Decode a PNG file into a new image surface.\n\n :param source:\n A filename or\n a binary mode file-like object with a :meth:`~file.read` method.\n If you already have a byte string in memory,\n use :class:`io.BytesIO`.\n :returns: A new :class:`ImageSurface` instance." - }, - { - "code": "def _do_flood(self, in_port, msg):\n datapath = msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n actions = [parser.OFPActionOutput(ofproto.OFPP_FLOOD)]\n self._do_packet_out(datapath, msg.data, in_port, actions)", - "docstring": "the process when the snooper received a message of the\n outside for processing." - }, - { - "code": "def _expand_dataset_packages(dataset_label_dict):\n new_dataset_label_dict = []\n for label in dataset_label_dict:\n dataset_metadata = data_urls[label]\n if type(dataset_metadata) == dict and \"package\" in dataset_metadata:\n new_dataset_label_dict.extend(dataset_metadata[\"package\"])\n else:\n new_dataset_label_dict.append(label)\n return new_dataset_label_dict", - "docstring": "Returns list of possible packages contained in dataset, in case the dataset is multi dataset, eg. 'lisa'.\n\n In case the param is not pointing to multidataset returns only that label in a list.\n\n :param str dataset_label_dict: label of multi dataset\n :return: list of labels" - }, - { - "code": "def get_text(self, title):\n r = requests.get(self.api,\n params={'action': 'parse', 'page': title, 'format': 'json'},\n headers=self.header)\n jsd = r.json()\n return jsd['parse']['text']['*']", - "docstring": "This will grab the html content of the chapter given by url. Technically you can use this to get the content of other pages too.\n\n :param title: Title for the page you want the content of\n :return: a string containing the html content" - }, - { - "code": "def image(self):\n slide_part, rId = self.part, self._element.blip_rId\n if rId is None:\n raise ValueError('no embedded image')\n return slide_part.get_image(rId)", - "docstring": "An |Image| object providing access to the properties and bytes of the\n image in this picture shape." - }, - { - "code": "def set_autostart(vm_, state='on', **kwargs):\n conn = __get_conn(**kwargs)\n dom = _get_domain(conn, vm_)\n ret = False\n if state == 'on':\n ret = dom.setAutostart(1) == 0\n elif state == 'off':\n ret = dom.setAutostart(0) == 0\n conn.close()\n return ret", - "docstring": "Set the autostart flag on a VM so that the VM will start with the host\n system on reboot.\n\n :param vm_: domain name\n :param state: 'on' to auto start the pool, anything else to mark the\n pool not to be started when the host boots\n :param connection: libvirt connection URI, overriding defaults\n\n .. versionadded:: 2019.2.0\n :param username: username to connect with, overriding defaults\n\n .. versionadded:: 2019.2.0\n :param password: password to connect with, overriding defaults\n\n .. versionadded:: 2019.2.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt \"*\" virt.set_autostart " - }, - { - "code": "def import_feature(dest, src, name, api=None, profile=None,\n filter_symbol=None):\n if filter_symbol is None:\n filter_symbol = _default_filter_symbol\n ft = src.features[name] if isinstance(name, str) else name\n remove_symbols = set()\n for x in src.get_removes(api, profile):\n remove_symbols.update(x.as_symbols())\n def my_filter_symbol(t, name):\n return False if (t, name) in remove_symbols else filter_symbol(t, name)\n for req in ft.get_requires(profile):\n for x in req.types:\n if not my_filter_symbol('type', x):\n continue\n import_type(dest, src, x, api, filter_symbol)\n for x in req.enums:\n if not my_filter_symbol('enum', x):\n continue\n import_enum(dest, src, x)\n for x in req.commands:\n if not my_filter_symbol('command', x):\n continue\n import_command(dest, src, x, api, filter_symbol)\n dest.features[name] = ft", - "docstring": "Imports Feature `name`, and all its dependencies, from\n Registry `src` to Registry `dest`.\n\n :param Registry dest: Destination Registry\n :param Registry src: Source Registry\n :param str name: Name of Feature to import\n :param str api: Prefer to import dependencies with api name `api`,\n or None to import dependencies with no API name.\n :param str profile: Import dependencies with profile name\n `profile`, or None to import all dependencies.\n :param filter_symbol: Optional symbol filter callable\n :type filter_symbol: Callable with signature\n ``(symbol_type:str, symbol_name:str) -> bool``" - }, - { - "code": "async def wait_closed(self):\n if self._server is None:\n raise RuntimeError('Server is not started')\n await self._server.wait_closed()\n if self._handlers:\n await asyncio.wait({h.wait_closed() for h in self._handlers},\n loop=self._loop)", - "docstring": "Coroutine to wait until all existing request handlers will exit\n properly." - }, - { - "code": "def stop(self):\n self.__stop = True\n self._queue.stop()\n self._zk.stop()", - "docstring": "Stops the connection" - }, - { - "code": "def stop_request(self, stop_now='0'):\n self.app.interrupted = (stop_now == '1')\n self.app.will_stop = True\n return True", - "docstring": "Request the daemon to stop\n\n If `stop_now` is set to '1' the daemon will stop now. Else, the daemon\n will enter the stop wait mode. In this mode the daemon stops its activity and\n waits until it receives a new `stop_now` request to stop really.\n\n :param stop_now: stop now or go to stop wait mode\n :type stop_now: bool\n :return: None" - }, - { - "code": "def _request(self, function, params, method='POST', headers={}):\n if method is 'POST':\n params = urllib.parse.urlencode(params)\n headers = { \"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\" }\n path = '/%s/%s' % (self._version, function)\n self._conn.request(method, path, params, headers)\n return self._conn.getresponse()", - "docstring": "Builds a request object." - }, - { - "code": "def source_absent(name):\n ret = {'name': name,\n 'changes': {},\n 'result': None,\n 'comment': ''}\n if name not in __salt__['imgadm.sources']():\n ret['result'] = True\n ret['comment'] = 'image source {0} is absent'.format(name)\n else:\n if __opts__['test']:\n res = {}\n ret['result'] = True\n else:\n res = __salt__['imgadm.source_delete'](name)\n ret['result'] = (name not in res)\n if ret['result']:\n ret['comment'] = 'image source {0} deleted'.format(name)\n ret['changes'][name] = 'deleted'\n else:\n ret['comment'] = 'image source {0} not deleted'.format(name)\n if 'Error' in res:\n ret['comment'] = '{0}: {1}'.format(ret['comment'], res['Error'])\n return ret", - "docstring": "Ensure an image source is absent on the computenode\n\n name : string\n source url" - }, - { - "code": "def all_fields(self):\n return [field \n for container in FieldsContainer.class_container.values()\n for field in getattr(self, container)]", - "docstring": "A list with all the fields contained in this object." - }, - { - "code": "def dotopdict(self, dict_):\n mlookup = {'get':'GET', 'post':'POST'}\n def rearrange(path, method_dict, method):\n oid = method_dict['operationId']\n self._paths[oid] = path\n method_dict['nickname'] = oid\n method_dict['method'] = mlookup[method]\n paths = dict_['paths']\n for path, path_dict in paths.items():\n if self.path_prefix and self.path_prefix not in path:\n continue\n path_dict['operations'] = []\n for method, method_dict in sorted(path_dict.items()):\n if method == 'operations':\n continue\n rearrange(path, method_dict, method)\n path_dict['operations'].append(method_dict)\n path_dict['path'] = path\n def setp(v, lenp=len(self.path_prefix)):\n v['path'] = v['path'][lenp:]\n return v\n dict_['apis'] = []\n for tag_dict in dict_['tags']:\n path = '/' + tag_dict['name']\n d = {'path':path,\n 'description':tag_dict['description'],\n 'class_json':{\n 'docstring':tag_dict['description'],\n 'resourcePath':path,\n 'apis':[setp(v) for k, v in paths.items()\n if k.startswith(self.path_prefix + path)]},\n }\n dict_['apis'].append(d)\n self._swagger(dict_['swagger'])\n self._info(dict_['info'])\n self._definitions(dict_['definitions'])\n return dict_", - "docstring": "Rewrite the 2.0 json to match what we feed the code for 1.2" - }, - { - "code": "def solve_sdp(sdp, solver=None, solverparameters=None):\n solvers = autodetect_solvers(solverparameters)\n solver = solver.lower() if solver is not None else solver\n if solvers == []:\n raise Exception(\"Could not find any SDP solver. Please install SDPA,\" +\n \" Mosek, Cvxpy, or Picos with Cvxopt\")\n elif solver is not None and solver not in solvers:\n print(\"Available solvers: \" + str(solvers))\n if solver == \"cvxopt\":\n try:\n import cvxopt\n except ImportError:\n pass\n else:\n raise Exception(\"Cvxopt is detected, but Picos is not. \"\n \"Please install Picos to use Cvxopt\")\n raise Exception(\"Could not detect requested \" + solver)\n elif solver is None:\n solver = solvers[0]\n primal, dual, x_mat, y_mat, status = None, None, None, None, None\n tstart = time.time()\n if solver == \"sdpa\":\n primal, dual, x_mat, y_mat, status = \\\n solve_with_sdpa(sdp, solverparameters)\n elif solver == \"cvxpy\":\n primal, dual, x_mat, y_mat, status = \\\n solve_with_cvxpy(sdp, solverparameters)\n elif solver == \"scs\":\n if solverparameters is None:\n solverparameters_ = {\"solver\": \"SCS\"}\n else:\n solverparameters_ = solverparameters.copy()\n solverparameters_[\"solver\"] = \"SCS\"\n primal, dual, x_mat, y_mat, status = \\\n solve_with_cvxpy(sdp, solverparameters_)\n elif solver == \"mosek\":\n primal, dual, x_mat, y_mat, status = \\\n solve_with_mosek(sdp, solverparameters)\n elif solver == \"cvxopt\":\n primal, dual, x_mat, y_mat, status = \\\n solve_with_cvxopt(sdp, solverparameters)\n for constraint in sdp.constraints[sdp._n_inequalities:]:\n idx = sdp._constraint_to_block_index[constraint]\n sdp._constraint_to_block_index[constraint] = (idx[0],)\n else:\n raise Exception(\"Unkown solver: \" + solver)\n sdp.solution_time = time.time() - tstart\n sdp.primal = primal\n sdp.dual = dual\n sdp.x_mat = x_mat\n sdp.y_mat = y_mat\n sdp.status = status\n return primal, dual, x_mat, y_mat", - "docstring": "Call a solver on the SDP relaxation. Upon successful solution, it\n returns the primal and dual objective values along with the solution\n matrices.\n\n :param sdpRelaxation: The SDP relaxation to be solved.\n :type sdpRelaxation: :class:`ncpol2sdpa.SdpRelaxation`.\n :param solver: The solver to be called, either `None`, \"sdpa\", \"mosek\",\n \"cvxpy\", \"scs\", or \"cvxopt\". The default is `None`,\n which triggers autodetect.\n :type solver: str.\n :param solverparameters: Parameters to be passed to the solver. Actual\n options depend on the solver:\n\n SDPA:\n\n - `\"executable\"`:\n Specify the executable for SDPA. E.g.,\n `\"executable\":\"/usr/local/bin/sdpa\"`, or\n `\"executable\":\"sdpa_gmp\"`\n - `\"paramsfile\"`: Specify the parameter file\n\n Mosek:\n Refer to the Mosek documentation. All\n arguments are passed on.\n\n Cvxopt:\n Refer to the PICOS documentation. All\n arguments are passed on.\n\n Cvxpy:\n Refer to the Cvxpy documentation. All\n arguments are passed on.\n\n SCS:\n Refer to the Cvxpy documentation. All\n arguments are passed on.\n :type solverparameters: dict of str.\n :returns: tuple of the primal and dual optimum, and the solutions for the\n primal and dual.\n :rtype: (float, float, list of `numpy.array`, list of `numpy.array`)" - }, - { - "code": "def loads(self, string):\n \"Decompress the passed-in compact script and return the result.\"\n script_class = self.get_script_class()\n script = self._load(BytesIO(string), self._protocol, self._version)\n return script_class(script)", - "docstring": "Decompress the passed-in compact script and return the result." - }, - { - "code": "def text_length(elem):\n if not elem:\n return 0\n value = elem.value()\n try:\n value = len(value)\n except:\n value = 0\n try:\n for a in elem.elements:\n value += len(a.value())\n except:\n pass\n return value", - "docstring": "Returns length of the content in this element.\n\n Return value is not correct but it is **good enough***." - }, - { - "code": "def _clrgen(n, h0, hr):\n n0 = n if n == 1 else n-1\n clst = ['%f,%f,%f' % (h0 + hr*hi/n0, 0.35, 0.85) for\n hi in range(n)]\n return clst", - "docstring": "Default colour generating function\n\n Parameters\n ----------\n\n n : int\n Number of colours to generate\n h0 : float\n Initial H value in HSV colour specification\n hr : float\n Size of H value range to use for colour generation\n (final H value is h0 + hr)\n\n Returns\n -------\n clst : list of strings\n List of HSV format colour specification strings" - }, - { - "code": "def with_organisation(self, organisation):\n if organisation is None:\n organisation = ''\n organisation = slugify(organisation)\n self._validate_organisation(organisation)\n self.organisation = organisation\n return self", - "docstring": "Add an organisation segment.\n\n Args:\n organisation (str): Official name of an administrative body\n holding an election.\n\n Returns:\n IdBuilder\n\n Raises:\n ValueError" - }, - { - "code": "def distance_to_angle(distance, units='metric'):\n if units in ('km', 'metric'):\n pass\n elif units in ('sm', 'imperial', 'US customary'):\n distance *= STATUTE_MILE\n elif units in ('nm', 'nautical'):\n distance *= NAUTICAL_MILE\n else:\n raise ValueError('Unknown units type %r' % units)\n return math.degrees(distance / BODY_RADIUS)", - "docstring": "Convert a distance in to an angle along a great circle.\n\n Args:\n distance (float): Distance to convert to degrees\n units (str): Unit type to be used for distances\n\n Returns:\n float: Angle in degrees\n\n Raises:\n ValueError: Unknown value for ``units``" - }, - { - "code": "def _dfromtimestamp(timestamp):\n try:\n return datetime.date.fromtimestamp(timestamp)\n except OSError:\n timestamp -= time.timezone\n d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp)\n if _isdst(d):\n timestamp += 3600\n d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp)\n return d", - "docstring": "Custom date timestamp constructor. ditto" - }, - { - "code": "def create_option_from_value(tag, value):\n dhcp_option.parser()\n fake_opt = dhcp_option(tag = tag)\n for c in dhcp_option.subclasses:\n if c.criteria(fake_opt):\n if hasattr(c, '_parse_from_value'):\n return c(tag = tag, value = c._parse_from_value(value))\n else:\n raise ValueError('Invalid DHCP option ' + str(tag) + \": \" + repr(value))\n else:\n fake_opt._setextra(_tobytes(value))\n return fake_opt", - "docstring": "Set DHCP option with human friendly value" - }, - { - "code": "def compare_motifs(self, m1, m2, match=\"total\", metric=\"wic\", combine=\"mean\", pval=False):\n if metric == \"seqcor\":\n return seqcor(m1, m2)\n elif match == \"partial\":\n if pval:\n return self.pvalue(m1, m2, \"total\", metric, combine, self.max_partial(m1.pwm, m2.pwm, metric, combine))\n elif metric in [\"pcc\", \"ed\", \"distance\", \"wic\", \"chisq\", \"ssd\"]:\n return self.max_partial(m1.pwm, m2.pwm, metric, combine)\n else:\n return self.max_partial(m1.pfm, m2.pfm, metric, combine)\n elif match == \"total\":\n if pval:\n return self.pvalue(m1, m2, match, metric, combine, self.max_total(m1.pwm, m2.pwm, metric, combine))\n elif metric in [\"pcc\", 'akl']:\n return self.max_total(m1.wiggle_pwm(), m2.wiggle_pwm(), metric, combine)\n elif metric in [\"ed\", \"distance\", \"wic\", \"chisq\", \"pcc\", \"ssd\"]:\n return self.max_total(m1.pwm, m2.pwm, metric, combine)\n else:\n return self.max_total(m1.pfm, m2.pfm, metric, combine)\n elif match == \"subtotal\":\n if metric in [\"pcc\", \"ed\", \"distance\", \"wic\", \"chisq\", \"ssd\"]:\n return self.max_subtotal(m1.pwm, m2.pwm, metric, combine)\n else:\n return self.max_subtotal(m1.pfm, m2.pfm, metric, combine)", - "docstring": "Compare two motifs.\n \n The similarity metric can be any of seqcor, pcc, ed, distance, wic, \n chisq, akl or ssd. If match is 'total' the similarity score is \n calculated for the whole match, including positions that are not \n present in both motifs. If match is partial or subtotal, only the\n matching psotiions are used to calculate the score. The score of\n individual position is combined using either the mean or the sum.\n\n Note that the match and combine parameters have no effect on the seqcor\n similarity metric. \n\n Parameters\n ----------\n m1 : Motif instance\n Motif instance 1.\n\n m2 : Motif instance\n Motif instance 2.\n\n match : str, optional\n Match can be \"partial\", \"subtotal\" or \"total\". Not all metrics use \n this.\n\n metric : str, optional\n Distance metric.\n\n combine : str, optional\n Combine positional scores using \"mean\" or \"sum\". Not all metrics\n use this.\n\n pval : bool, optional\n Calculate p-vale of match.\n \n Returns\n -------\n score, position, strand" - }, - { - "code": "def get_parent_element(self):\n return {AUDIT_REF_STATE: self.context.audit_record,\n SIGNATURE_REF_STATE: self.context.signature}[self.ref_state]", - "docstring": "Signatures and Audit elements share sub-elements, we need to know which to set attributes on" - }, - { - "code": "def _signal_handler_init(self):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n signal.signal(signal.SIGINT, self._signal_handler)\n signal.signal(signal.SIGTERM, self._signal_handler)", - "docstring": "Catch interupt signals." - }, - { - "code": "def pages_to_show(paginator, page, page_labels=None):\n show_pages = {}\n if page_labels is None:\n page_labels = {}\n def get_page_label(index):\n if index in page_labels:\n return page_labels[index]\n else:\n return unicode(index)\n if page != 1:\n before = 3\n if page >= (paginator.num_pages - 3):\n before += (3 - (paginator.num_pages - page))\n for i in range(before, 0, -1):\n if (page - i) >= 1:\n show_pages[page - i] = get_page_label(page - i)\n for i in range(7 - len(show_pages)):\n if (page + i) <= paginator.num_pages:\n show_pages[page + i] = get_page_label(page + i)\n return show_pages", - "docstring": "Generate a dictionary of pages to show around the current page. Show\n 3 numbers on either side of the specified page, or more if close to end or\n beginning of available pages.\n\n :param paginator: django :class:`~django.core.paginator.Paginator`,\n populated with objects\n :param page: number of the current page\n :param page_labels: optional dictionary of page labels, keyed on page number\n :rtype: dictionary; keys are page numbers, values are page labels" - }, - { - "code": "def _get_argument_list_from_toolkit_function_name(fn):\n unity = _get_unity()\n fnprops = unity.describe_toolkit_function(fn)\n argnames = fnprops['arguments']\n return argnames", - "docstring": "Given a toolkit function name, return the argument list" - }, - { - "code": "def eglGetDisplay(display=EGL_DEFAULT_DISPLAY):\n res = _lib.eglGetDisplay(display)\n if not res or res == EGL_NO_DISPLAY:\n raise RuntimeError('Could not create display')\n return res", - "docstring": "Connect to the EGL display server." - }, - { - "code": "def queue_bind(self, queue, exchange, routing_key='',\n nowait=False, arguments=None, ticket=None):\n if arguments is None:\n arguments = {}\n args = AMQPWriter()\n if ticket is not None:\n args.write_short(ticket)\n else:\n args.write_short(self.default_ticket)\n args.write_shortstr(queue)\n args.write_shortstr(exchange)\n args.write_shortstr(routing_key)\n args.write_bit(nowait)\n args.write_table(arguments)\n self._send_method((50, 20), args)\n if not nowait:\n return self.wait(allowed_methods=[\n (50, 21),\n ])", - "docstring": "bind queue to an exchange\n\n This method binds a queue to an exchange. Until a queue is\n bound it will not receive any messages. In a classic\n messaging model, store-and-forward queues are bound to a dest\n exchange and subscription queues are bound to a dest_wild\n exchange.\n\n RULE:\n\n A server MUST allow ignore duplicate bindings - that is,\n two or more bind methods for a specific queue, with\n identical arguments - without treating these as an error.\n\n RULE:\n\n If a bind fails, the server MUST raise a connection\n exception.\n\n RULE:\n\n The server MUST NOT allow a durable queue to bind to a\n transient exchange. If the client attempts this the server\n MUST raise a channel exception.\n\n RULE:\n\n Bindings for durable queues are automatically durable and\n the server SHOULD restore such bindings after a server\n restart.\n\n RULE:\n\n If the client attempts to an exchange that was declared as\n internal, the server MUST raise a connection exception\n with reply code 530 (not allowed).\n\n RULE:\n\n The server SHOULD support at least 4 bindings per queue,\n and ideally, impose no limit except as defined by\n available resources.\n\n PARAMETERS:\n queue: shortstr\n\n Specifies the name of the queue to bind. If the queue\n name is empty, refers to the current queue for the\n channel, which is the last declared queue.\n\n RULE:\n\n If the client did not previously declare a queue,\n and the queue name in this method is empty, the\n server MUST raise a connection exception with\n reply code 530 (not allowed).\n\n RULE:\n\n If the queue does not exist the server MUST raise\n a channel exception with reply code 404 (not\n found).\n\n exchange: shortstr\n\n The name of the exchange to bind to.\n\n RULE:\n\n If the exchange does not exist the server MUST\n raise a channel exception with reply code 404 (not\n found).\n\n routing_key: shortstr\n\n message routing key\n\n Specifies the routing key for the binding. The\n routing key is used for routing messages depending on\n the exchange configuration. Not all exchanges use a\n routing key - refer to the specific exchange\n documentation. If the routing key is empty and the\n queue name is empty, the routing key will be the\n current queue for the channel, which is the last\n declared queue.\n\n nowait: boolean\n\n do not send a reply method\n\n If set, the server will not respond to the method. The\n client should not wait for a reply method. If the\n server could not complete the method it will raise a\n channel or connection exception.\n\n arguments: table\n\n arguments for binding\n\n A set of arguments for the binding. The syntax and\n semantics of these arguments depends on the exchange\n class.\n\n ticket: short\n\n The client provides a valid access ticket giving\n \"active\" access rights to the queue's access realm." - }, - { - "code": "def save(self):\n import pickle\n if self._faces is not None:\n names = ['_vertices', '_faces']\n else:\n names = ['_vertices_indexed_by_faces']\n if self._vertex_colors is not None:\n names.append('_vertex_colors')\n elif self._vertex_colors_indexed_by_faces is not None:\n names.append('_vertex_colors_indexed_by_faces')\n if self._face_colors is not None:\n names.append('_face_colors')\n elif self._face_colors_indexed_by_faces is not None:\n names.append('_face_colors_indexed_by_faces')\n state = dict([(n, getattr(self, n)) for n in names])\n return pickle.dumps(state)", - "docstring": "Serialize this mesh to a string appropriate for disk storage\n\n Returns\n -------\n state : dict\n The state." - }, - { - "code": "def db_exists(name, **connection_args):\n dbc = _connect(**connection_args)\n if dbc is None:\n return False\n cur = dbc.cursor()\n args = {\"dbname\": name}\n qry = \"SHOW DATABASES LIKE %(dbname)s;\"\n try:\n _execute(cur, qry, args)\n except MySQLdb.OperationalError as exc:\n err = 'MySQL Error {0}: {1}'.format(*exc.args)\n __context__['mysql.error'] = err\n log.error(err)\n return False\n cur.fetchall()\n return cur.rowcount == 1", - "docstring": "Checks if a database exists on the MySQL server.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mysql.db_exists 'dbname'" - }, - { - "code": "def _to_java(self):\n sc = SparkContext._active_spark_context\n java_models = [model._to_java() for model in self.models]\n java_models_array = JavaWrapper._new_java_array(\n java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)\n metadata = JavaParams._new_java_obj(\"org.apache.spark.sql.types.Metadata\")\n _java_obj = JavaParams._new_java_obj(\"org.apache.spark.ml.classification.OneVsRestModel\",\n self.uid, metadata.empty(), java_models_array)\n _java_obj.set(\"classifier\", self.getClassifier()._to_java())\n _java_obj.set(\"featuresCol\", self.getFeaturesCol())\n _java_obj.set(\"labelCol\", self.getLabelCol())\n _java_obj.set(\"predictionCol\", self.getPredictionCol())\n return _java_obj", - "docstring": "Transfer this instance to a Java OneVsRestModel. Used for ML persistence.\n\n :return: Java object equivalent to this instance." - }, - { - "code": "def subnet_group_present(name, subnet_ids=None, subnet_names=None,\n description=None, tags=None, region=None,\n key=None, keyid=None, profile=None):\n ret = {'name': name,\n 'result': True,\n 'comment': '',\n 'changes': {}\n }\n exists = __salt__['boto_elasticache.subnet_group_exists'](name=name, tags=tags, region=region, key=key,\n keyid=keyid, profile=profile)\n if not exists:\n if __opts__['test']:\n ret['comment'] = 'Subnet group {0} is set to be created.'.format(name)\n ret['result'] = None\n return ret\n created = __salt__['boto_elasticache.create_subnet_group'](name=name, subnet_ids=subnet_ids,\n subnet_names=subnet_names,\n description=description, tags=tags,\n region=region, key=key, keyid=keyid,\n profile=profile)\n if not created:\n ret['result'] = False\n ret['comment'] = 'Failed to create {0} subnet group.'.format(name)\n return ret\n ret['changes']['old'] = None\n ret['changes']['new'] = name\n ret['comment'] = 'Subnet group {0} created.'.format(name)\n return ret\n ret['comment'] = 'Subnet group present.'\n return ret", - "docstring": "Ensure ElastiCache subnet group exists.\n\n .. versionadded:: 2015.8.0\n\n name\n The name for the ElastiCache subnet group. This value is stored as a lowercase string.\n\n subnet_ids\n A list of VPC subnet IDs for the cache subnet group. Exclusive with subnet_names.\n\n subnet_names\n A list of VPC subnet names for the cache subnet group. Exclusive with subnet_ids.\n\n description\n Subnet group description.\n\n tags\n A list of tags.\n\n region\n Region to connect to.\n\n key\n Secret key to be used.\n\n keyid\n Access key to be used.\n\n profile\n A dict with region, key and keyid, or a pillar key (string) that\n contains a dict with region, key and keyid." - }, - { - "code": "def get_image_tar(image_path):\n bot.debug('Generate file system tar...') \n file_obj = Client.image.export(image_path=image_path)\n if file_obj is None:\n bot.error(\"Error generating tar, exiting.\")\n sys.exit(1)\n tar = tarfile.open(file_obj)\n return file_obj, tar", - "docstring": "get an image tar, either written in memory or to\n the file system. file_obj will either be the file object,\n or the file itself." - }, - { - "code": "def setup_standalone_signals(instance):\n window = instance.get_widget('config-window')\n window.connect('delete-event', Gtk.main_quit)\n button = instance.get_widget('button1')\n button.handler_block_by_func(instance.gtk_widget_destroy)\n button.connect('clicked', Gtk.main_quit)\n return instance", - "docstring": "Called when prefs dialog is running in standalone mode. It\n makes the delete event of dialog and click on close button finish\n the application." - }, - { - "code": "def get_equivalent_atoms(self, tolerance=0.3):\n PA = self._get_point_group_analyzer(tolerance=tolerance)\n eq = PA.get_equivalent_atoms()\n self._convert_eq(eq)\n return eq", - "docstring": "Returns sets of equivalent atoms with symmetry operations\n\n Args:\n tolerance (float): Tolerance to generate the full set of symmetry\n operations.\n\n Returns:\n dict: The returned dictionary has two possible keys:\n\n ``eq_sets``:\n A dictionary of indices mapping to sets of indices,\n each key maps to indices of all equivalent atoms.\n The keys are guaranteed to be not equivalent.\n\n ``sym_ops``:\n Twofold nested dictionary.\n ``operations[i][j]`` gives the symmetry operation\n that maps atom ``i`` unto ``j``." - }, - { - "code": "def list_objects(self, bucket_name, prefix='', recursive=False):\n is_valid_bucket_name(bucket_name)\n if prefix is None:\n prefix = ''\n method = 'GET'\n query = {\n 'max-keys': '1000',\n 'prefix': prefix\n }\n if not recursive:\n query['delimiter'] = '/'\n marker = ''\n is_truncated = True\n while is_truncated:\n if marker:\n query['marker'] = marker\n headers = {}\n response = self._url_open(method,\n bucket_name=bucket_name,\n query=query,\n headers=headers)\n objects, is_truncated, marker = parse_list_objects(response.data,\n bucket_name=bucket_name)\n for obj in objects:\n yield obj", - "docstring": "List objects in the given bucket.\n\n Examples:\n objects = minio.list_objects('foo')\n for current_object in objects:\n print(current_object)\n # hello\n # hello/\n # hello/\n # world/\n\n objects = minio.list_objects('foo', prefix='hello/')\n for current_object in objects:\n print(current_object)\n # hello/world/\n\n objects = minio.list_objects('foo', recursive=True)\n for current_object in objects:\n print(current_object)\n # hello/world/1\n # world/world/2\n # ...\n\n objects = minio.list_objects('foo', prefix='hello/',\n recursive=True)\n for current_object in objects:\n print(current_object)\n # hello/world/1\n # hello/world/2\n\n :param bucket_name: Bucket to list objects from\n :param prefix: String specifying objects returned must begin with\n :param recursive: If yes, returns all objects for a specified prefix\n :return: An iterator of objects in alphabetical order." - }, - { - "code": "def purge_content(self, account_id, urls):\n if isinstance(urls, six.string_types):\n urls = [urls]\n content_list = []\n for i in range(0, len(urls), MAX_URLS_PER_PURGE):\n content = self.account.purgeCache(urls[i:i + MAX_URLS_PER_PURGE], id=account_id)\n content_list.extend(content)\n return content_list", - "docstring": "Purges one or more URLs from the CDN edge nodes.\n\n :param int account_id: the CDN account ID from which content should\n be purged.\n :param urls: a string or a list of strings representing the CDN URLs\n that should be purged.\n :returns: a list of SoftLayer_Container_Network_ContentDelivery_PurgeService_Response objects\n which indicates if the purge for each url was SUCCESS, FAILED or INVALID_URL." - }, - { - "code": "def _format_evidence_text(stmt):\n def get_role(ag_ix):\n if isinstance(stmt, Complex) or \\\n isinstance(stmt, SelfModification) or \\\n isinstance(stmt, ActiveForm) or isinstance(stmt, Conversion) or\\\n isinstance(stmt, Translocation):\n return 'other'\n else:\n assert len(stmt.agent_list()) == 2, (len(stmt.agent_list()),\n type(stmt))\n return 'subject' if ag_ix == 0 else 'object'\n ev_list = []\n for ix, ev in enumerate(stmt.evidence):\n if ev.source_api == 'biopax' and \\\n 'source_sub_id' in ev.annotations and \\\n ev.annotations['source_sub_id']:\n source_api = '%s:%s' % (ev.source_api,\n ev.annotations['source_sub_id'])\n else:\n source_api = ev.source_api\n if ev.text is None:\n format_text = None\n else:\n indices = []\n for ix, ag in enumerate(stmt.agent_list()):\n if ag is None:\n continue\n try:\n ag_text = ev.annotations['agents']['raw_text'][ix]\n if ag_text is None:\n raise KeyError\n except KeyError:\n ag_text = ag.db_refs.get('TEXT')\n if ag_text is None:\n continue\n role = get_role(ix)\n tag_start = '' % role\n tag_close = ''\n indices += [(m.start(), m.start() + len(ag_text),\n ag_text, tag_start, tag_close)\n for m in re.finditer(re.escape(ag_text),\n ev.text)]\n format_text = tag_text(ev.text, indices)\n ev_list.append({'source_api': source_api,\n 'pmid': ev.pmid,\n 'text_refs': ev.text_refs,\n 'text': format_text,\n 'source_hash': ev.source_hash })\n return ev_list", - "docstring": "Returns evidence metadata with highlighted evidence text.\n\n Parameters\n ----------\n stmt : indra.Statement\n The Statement with Evidence to be formatted.\n\n Returns\n -------\n list of dicts\n List of dictionaries corresponding to each Evidence object in the\n Statement's evidence list. Each dictionary has keys 'source_api',\n 'pmid' and 'text', drawn from the corresponding fields in the\n Evidence objects. The text entry of the dict includes\n `` tags identifying the agents referenced by the Statement." - }, - { - "code": "def _create_event(instance, action):\n user = None\n user_repr = repr(user)\n if CUSER:\n user = CuserMiddleware.get_user()\n user_repr = repr(user)\n if user is not None and user.is_anonymous:\n user = None\n return TrackingEvent.objects.create(\n action=action,\n object=instance,\n object_repr=repr(instance),\n user=user,\n user_repr=user_repr,\n )", - "docstring": "Create a new event, getting the use if django-cuser is available." - }, - { - "code": "def write_frames(self, input, nframes = -1):\n if nframes == -1:\n if input.ndim == 1:\n nframes = input.size\n elif input.ndim == 2:\n nframes = input.shape[0]\n else:\n raise ValueError(\"Input has to be rank 1 (mono) or rank 2 \"\\\n \"(multi-channels)\")\n return self._sndfile.write_frames(input[:nframes,...])", - "docstring": "write data to file.\n\n :Parameters:\n input : ndarray\n array containing data to write.\n nframes : int\n number of frames to write.\n\n Notes\n -----\n\n - One column is one channel (one row per channel after 0.9)\n - updates the write pointer.\n - if float are given when the file contains integer data, you should\n put normalized data (that is the range [-1..1] will be written as the\n maximum range allowed by the integer bitwidth)." - }, - { - "code": "def from_string(cls, string):\n if string in units.UNITS_BY_ALL:\n return cls(description=string, unit=units.Unit(string))\n else:\n return cls(description=string)", - "docstring": "Convert a string into a Dimension" - }, - { - "code": "def _prepare_executor(self, data, executor):\n logger.debug(__(\"Preparing executor for Data with id {}\", data.id))\n import resolwe.flow.executors as executor_package\n exec_dir = os.path.dirname(inspect.getsourcefile(executor_package))\n dest_dir = self._get_per_data_dir('RUNTIME_DIR', data.location.subpath)\n dest_package_dir = os.path.join(dest_dir, 'executors')\n shutil.copytree(exec_dir, dest_package_dir)\n dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('RUNTIME_DIR_MODE', 0o755)\n os.chmod(dest_dir, dir_mode)\n class_name = executor.rpartition('.executors.')[-1]\n return '.{}'.format(class_name), dest_dir", - "docstring": "Copy executor sources into the destination directory.\n\n :param data: The :class:`~resolwe.flow.models.Data` object being\n prepared for.\n :param executor: The fully qualified name of the executor that\n is to be used for this data object.\n :return: Tuple containing the relative fully qualified name of\n the executor class ('relative' to how the executor will be\n run) and the path to the directory where the executor will\n be deployed.\n :rtype: (str, str)" - }, - { - "code": "def after_retract(reference_analysis):\n reference = reference_analysis.getSample()\n service = reference_analysis.getAnalysisService()\n worksheet = reference_analysis.getWorksheet()\n instrument = reference_analysis.getInstrument()\n if worksheet:\n slot = worksheet.get_slot_position_for(reference_analysis)\n refgid = reference_analysis.getReferenceAnalysesGroupID()\n ref = worksheet.add_reference_analysis(reference, service, slot, refgid)\n if not ref:\n logger.warn(\"Cannot add a retest for reference analysis {} into {}\"\n .format(reference_analysis.getId(), worksheet.getId()))\n return\n ref.setRetestOf(reference_analysis)\n ref.setResult(reference_analysis.getResult())\n if instrument:\n ref.setInstrument(instrument)\n instrument.reindexObject()\n wf.doActionFor(worksheet, \"rollback_to_open\")\n elif instrument:\n instrument.addReferences(reference, [api.get_uid(service)])\n instrument.reindexObject()", - "docstring": "Function triggered after a 'retract' transition for the reference\n analysis passed in is performed. The reference analysis transitions to\n \"retracted\" state and a new copy of the reference analysis is created" - }, - { - "code": "def long_name(self) -> str:\n if self.chat_alias:\n return \"{0} ({1})\".format(self.chat_alias, self.chat_name)\n else:\n return self.chat_name", - "docstring": "Shortcut property, if alias exists, this will provide the alias with name\n in parenthesis. Otherwise, this will return the name" - }, - { - "code": "def validate_examples(self, fail_fn):\n for collection, doc in self.examples():\n _log.debug(\"validating example in collection {}\".format(collection))\n sch = schema.get_schema(collection)\n result = sch.validate(doc)\n _log.debug(\"validation result: {}\".format(\"OK\" if result is None else result))\n if result is not None:\n fail_fn(\"Failed to validate sample document: {}\".format(result))", - "docstring": "Check the examples against the schema.\n\n :param fail_fn: Pass failure messages to this function\n :type fail_fn: function(str)" - }, - { - "code": "def batch(ctx, path, recursive, watch):\n batch = Batch(ctx.obj['config'], ctx.obj['cucco'])\n if os.path.exists(path):\n if watch:\n batch.watch(path, recursive)\n elif os.path.isfile(path):\n batch.process_file(path)\n else:\n batch.process_files(path, recursive)\n else:\n click.echo('Error: Specified path doesn\\'t exists', err=True)\n sys.exit(-1)", - "docstring": "Normalize files in a path.\n\n Apply normalizations over all files found in a given path.\n The normalizations applied will be those defined in the config\n file. If no config is specified, the default normalizations will\n be used." - }, - { - "code": "def kallisto_general_stats_table(self):\n headers = OrderedDict()\n headers['fragment_length'] = {\n 'title': 'Frag Length',\n 'description': 'Estimated average fragment length',\n 'min': 0,\n 'suffix': 'bp',\n 'scale': 'RdYlGn'\n }\n headers['percent_aligned'] = {\n 'title': '% Aligned',\n 'description': '% processed reads that were pseudoaligned',\n 'max': 100,\n 'min': 0,\n 'suffix': '%',\n 'scale': 'YlGn'\n }\n headers['pseudoaligned_reads'] = {\n 'title': '{} Aligned'.format(config.read_count_prefix),\n 'description': 'Pseudoaligned reads ({})'.format(config.read_count_desc),\n 'min': 0,\n 'scale': 'PuRd',\n 'modify': lambda x: x * config.read_count_multiplier,\n 'shared_key': 'read_count'\n }\n self.general_stats_addcols(self.kallisto_data, headers)", - "docstring": "Take the parsed stats from the Kallisto report and add it to the\n basic stats table at the top of the report" - }, - { - "code": "def _convert_types(self, a):\n if a.dtype != 'object':\n return a, None\n if len(a) == 0:\n return a.astype('U1'), None\n mask = pd.isnull(a)\n if mask.sum() > 0:\n a = a.copy()\n np.putmask(a, mask, '')\n else:\n mask = None\n if infer_dtype(a, skipna=False) == 'mixed':\n try:\n a = np.array([s.encode('ascii') for s in a])\n a = a.astype('O')\n except:\n raise ValueError(\"Column of type 'mixed' cannot be converted to string\")\n type_ = infer_dtype(a, skipna=False)\n if type_ in ['unicode', 'string']:\n max_len = max_len_string_array(a)\n return a.astype('U{:d}'.format(max_len)), mask\n else:\n raise ValueError('Cannot store arrays with {} dtype'.format(type_))", - "docstring": "Converts object arrays of strings to numpy string arrays" - }, - { - "code": "def generate_pseudo(strain_states, order=3):\n s = sp.Symbol('s')\n nstates = len(strain_states)\n ni = np.array(strain_states)*s\n mis, absent_syms = [], []\n for degree in range(2, order + 1):\n cvec, carr = get_symbol_list(degree)\n sarr = np.zeros((nstates, 6), dtype=object)\n for n, strain_v in enumerate(ni):\n exps = carr.copy()\n for i in range(degree - 1):\n exps = np.dot(exps, strain_v)\n exps /= np.math.factorial(degree - 1)\n sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps]\n svec = sarr.ravel()\n present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])\n absent_syms += [set(cvec) - present_syms]\n m = np.zeros((6*nstates, len(cvec)))\n for n, c in enumerate(cvec):\n m[:, n] = v_diff(svec, c)\n mis.append(np.linalg.pinv(m))\n return mis, absent_syms", - "docstring": "Generates the pseudoinverse for a given set of strains.\n\n Args:\n strain_states (6xN array like): a list of voigt-notation\n \"strain-states\", i. e. perturbed indices of the strain\n as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)\n order (int): order of pseudoinverse to calculate\n\n Returns:\n mis: pseudo inverses for each order tensor, these can\n be multiplied by the central difference derivative\n of the stress with respect to the strain state\n absent_syms: symbols of the tensor absent from the PI\n expression" - }, - { - "code": "def pretty_render(data, format='text', indent=0):\n if format == 'json':\n return render_json(data)\n elif format == 'html':\n return render_html(data)\n elif format == 'xml':\n return render_xml(data)\n else:\n return dict_to_plaintext(data, indent=indent)", - "docstring": "Render a dict based on a format" - }, - { - "code": "def get_normalized_bdew_profile(self):\n self.df['temperature'] = self.temperature.values\n self.df['temperature_geo'] = self.weighted_temperature(\n how='geometric_series')\n sf = self.get_sf_values()\n [a, b, c, d] = self.get_sigmoid_parameters()\n f = self.get_weekday_parameters()\n h = (a / (1 + (b / (self.df['temperature_geo'] - 40)) ** c) + d)\n kw = 1.0 / (sum(h * f) / 24)\n heat_profile_normalized = (kw * h * f * sf)\n return heat_profile_normalized", - "docstring": "Calculation of the normalized hourly heat demand" - }, - { - "code": "def push_firq_registers(self):\n self.cycles += 1\n self.push_word(self.system_stack_pointer, self.program_counter.value)\n self.push_byte(self.system_stack_pointer, self.get_cc_value())", - "docstring": "FIRQ - Fast Interrupt Request\n push PC and CC on System stack pointer" - }, - { - "code": "def iddtxt2groups(txt):\n try:\n txt = txt.decode('ISO-8859-2')\n except AttributeError as e:\n pass\n txt = nocomment(txt, '!')\n txt = txt.replace(\"\\\\group\", \"!-group\")\n txt = nocomment(txt, '\\\\')\n lines = txt.splitlines()\n lines = [line.strip() for line in lines]\n lines = [line for line in lines if line != '']\n txt = '\\n'.join(lines)\n gsplits = txt.split('!')\n gsplits = [gsplit.splitlines() for gsplit in gsplits]\n gsplits[0].insert(0, None)\n gdict = {}\n for gsplit in gsplits:\n gdict.update({gsplit[0]:gsplit[1:]})\n gdict = {k:'\\n'.join(v) for k, v in gdict.items()}\n gdict = {k:v.split(';') for k, v in gdict.items()}\n gdict = {k:[i.strip() for i in v] for k, v in gdict.items()}\n gdict = {k:[i.splitlines() for i in v] for k, v in gdict.items()}\n gdict = {k:[i for i in v if len(i) > 0] for k, v in gdict.items()}\n gdict = {k:[i[0] for i in v] for k, v in gdict.items()}\n gdict = {k:[i.split(',')[0] for i in v] for k, v in gdict.items()}\n nvalue = gdict.pop(None)\n gdict = {k[len('-group '):]:v for k, v in gdict.items()}\n gdict.update({None:nvalue})\n return gdict", - "docstring": "extract the groups from the idd file" - }, - { - "code": "def insert_record_by_fieldspecs_with_values(\n self,\n table: str,\n fieldspeclist: FIELDSPECLIST_TYPE) -> int:\n fields = []\n values = []\n for fs in fieldspeclist:\n fields.append(fs[\"name\"])\n values.append(fs[\"value\"])\n return self.insert_record(table, fields, values)", - "docstring": "Inserts a record into the database using a list of fieldspecs having\n their value stored under the 'value' key." - }, - { - "code": "def _parse_text(self, element_name, namespace=''):\n try:\n text = self._channel.find('.//' + namespace + element_name).text\n except AttributeError:\n raise Exception(\n 'Element, {0} not found in RSS feed'.format(element_name)\n )\n return text", - "docstring": "Returns the text, as a string, of the specified element in the specified\n namespace of the RSS feed.\n\n Takes element_name and namespace as strings." - }, - { - "code": "def getWorkingCollisionBoundsInfo(self):\n fn = self.function_table.getWorkingCollisionBoundsInfo\n pQuadsBuffer = HmdQuad_t()\n punQuadsCount = c_uint32()\n result = fn(byref(pQuadsBuffer), byref(punQuadsCount))\n return result, pQuadsBuffer, punQuadsCount.value", - "docstring": "Returns the number of Quads if the buffer points to null. Otherwise it returns Quads \n into the buffer up to the max specified from the working copy." - }, - { - "code": "def watch_dir(path: str) -> None:\n _compile_exclude_patterns()\n if config.autoreload or config.debug:\n p = pathlib.Path(path)\n p.resolve()\n _add_watch_path(p)", - "docstring": "Add ``path`` to watch for autoreload." - }, - { - "code": "def _realValue_to_float(value_str):\n if REAL_VALUE.match(value_str):\n value = float(value_str)\n else:\n value = None\n return value", - "docstring": "Convert a value string that conforms to DSP0004 `realValue`, into\n the corresponding float and return it.\n\n The special values 'INF', '-INF', and 'NAN' are supported.\n\n Note that the Python `float()` function supports a superset of input\n formats compared to the `realValue` definition in DSP0004. For example,\n \"1.\" is allowed for `float()` but not for `realValue`. In addition, it\n has the same support for Unicode decimal digits as `int()`.\n Therefore, the match patterns explicitly check for US-ASCII digits, and\n the `float()` function should never raise `ValueError`.\n\n Returns None if the value string does not conform to `realValue`." - }, - { - "code": "def combine(args, part=None):\n args = [cleanup(arg) for arg in args]\n if part is not None:\n parts, orders = part\n if numpy.array(orders).size == 1:\n orders = [int(numpy.array(orders).item())]*len(args)\n parts = numpy.array(parts).flatten()\n for i, arg in enumerate(args):\n m, n = float(parts[i]), float(orders[i])\n l = len(arg)\n args[i] = arg[int(m/n*l):int((m+1)/n*l)]\n shapes = [arg.shape for arg in args]\n size = numpy.prod(shapes, 0)[0]*numpy.sum(shapes, 0)[1]\n if size > 10**9:\n raise MemoryError(\"Too large sets\")\n if len(args) == 1:\n out = args[0]\n elif len(args) == 2:\n out = combine_two(*args)\n else:\n arg1 = combine_two(*args[:2])\n out = combine([arg1,]+args[2:])\n return out", - "docstring": "All linear combination of a list of list.\n\n Args:\n args (numpy.ndarray) : List of input arrays. Components to take linear\n combination of with `args[i].shape=(N[i], M[i])` where N is to be\n taken linear combination of and M is static. M[i] is set to 1 if\n missing.\n\n Returns:\n (numpy.array) : matrix of combinations with shape (numpy.prod(N),\n numpy.sum(M)).\n\n Examples:\n >>> A, B = [1,2], [[4,4],[5,6]]\n >>> print(chaospy.quad.combine([A, B]))\n [[1. 4. 4.]\n [1. 5. 6.]\n [2. 4. 4.]\n [2. 5. 6.]]" - }, - { - "code": "def identifier(self):\n if len(self.polygons_full) != 1:\n raise TypeError('Identifier only valid for single body')\n return polygons.polygon_hash(self.polygons_full[0])", - "docstring": "A unique identifier for the path.\n\n Returns\n ---------\n identifier: (5,) float, unique identifier" - }, - { - "code": "def _add_task(self, tile_address, coroutine):\n self.verify_calling_thread(True, \"_add_task is not thread safe\")\n if tile_address not in self._tasks:\n self._tasks[tile_address] = []\n task = self._loop.create_task(coroutine)\n self._tasks[tile_address].append(task)", - "docstring": "Add a task from within the event loop.\n\n All tasks are associated with a tile so that they can be cleanly\n stopped when that tile is reset." - }, - { - "code": "def save(self, fname):\n element = _transform.SVGFigure(self.width, self.height)\n element.append(self)\n element.save(os.path.join(CONFIG['figure.save_path'], fname))", - "docstring": "Save figure to SVG file.\n\n Parameters\n ----------\n fname : str\n Full path to file." - }, - { - "code": "def cmd_slow_requests(self):\n slow_requests = [\n line.time_wait_response\n for line in self._valid_lines\n if line.time_wait_response > 1000\n ]\n return slow_requests", - "docstring": "List all requests that took a certain amount of time to be\n processed.\n\n .. warning::\n By now hardcoded to 1 second (1000 milliseconds), improve the\n command line interface to allow to send parameters to each command\n or globally." - }, - { - "code": "def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,\n vsiid, filter_frmt, gid, mac, vlan, oui_id,\n oui_data):\n if not self.is_ncb:\n LOG.error(\"EVB cannot be set on NB\")\n return\n vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,\n typeid_ver, vsiid_frmt, vsiid,\n filter_frmt, gid, mac, vlan,\n None, None)\n if len(vdp_key_str) == 0:\n LOG.error(\"NULL List\")\n return\n reply = self.run_vdptool([\"-t\", \"-i\", self.port_name, \"-R\", \"-V\", mode,\n \"-c\", vdp_key_str['mode'],\n \"-c\", vdp_key_str['mgrid'],\n \"-c\", vdp_key_str['typeid'],\n \"-c\", vdp_key_str['typeid_ver'],\n \"-c\", vdp_key_str['vsiid']])\n return reply", - "docstring": "Constructs and Sends the VDP Query Message.\n\n Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP\n Section for more detailed information\n :param mode: Associate or De-associate\n :param mgrid: MGR ID\n :param typeid: Type ID\n :param typeid_ver: Version of the Type ID\n :param vsiid_frmt: Format of the following VSI argument\n :param vsiid: VSI value\n :param filter_frmt: Filter Format\n :param gid: Group ID the vNIC belongs to\n :param mac: MAC Address of the vNIC\n :param vlan: VLAN of the vNIC\n :param oui_id: OUI Type\n :param oui_data: OUI Data\n :param sw_resp: Flag indicating if response is required from the daemon\n :return reply: Reply from vdptool" - }, - { - "code": "def is_me(self):\n logger.info(\"And arbiter is launched with the hostname:%s \"\n \"from an arbiter point of view of addr:%s\", self.host_name, socket.getfqdn())\n return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()", - "docstring": "Check if parameter name if same than name of this object\n\n TODO: is it useful?\n\n :return: true if parameter name if same than this name\n :rtype: bool" - }, - { - "code": "def return_file_objects(connection, container, prefix='database'):\n options = []\n meta_data = objectstore.get_full_container_list(\n connection, container, prefix='database')\n env = ENV.upper()\n for o_info in meta_data:\n expected_file = f'database.{ENV}'\n if o_info['name'].startswith(expected_file):\n dt = dateparser.parse(o_info['last_modified'])\n now = datetime.datetime.now()\n delta = now - dt\n LOG.debug('AGE: %d %s', delta.days, expected_file)\n options.append((dt, o_info))\n options.sort()\n return options", - "docstring": "Given connecton and container find database dumps" - }, - { - "code": "def signup(request):\n if request.method == 'GET':\n return render(request, 'user_signup.html', {}, help_text=signup.__doc__)\n elif request.method == 'POST':\n if request.user.is_authenticated() and hasattr(request.user, \"userprofile\"):\n return render_json(request, {\n 'error': _('User already logged in'),\n 'error_type': 'username_logged'\n }, template='user_json.html', status=400)\n credentials = json_body(request.body.decode(\"utf-8\"))\n error = _save_user(request, credentials, new=True)\n if error is not None:\n return render_json(request, error, template='user_json.html', status=400)\n else:\n auth.login(request, request.user)\n request.method = \"GET\"\n return profile(request, status=201)\n else:\n return HttpResponseBadRequest(\"method %s is not allowed\".format(request.method))", - "docstring": "Create a new user with the given credentials.\n\n GET parameters:\n html\n turn on the HTML version of the API\n\n POST parameters (JSON):\n username:\n user's name\n email:\n user's e-mail\n password:\n user's password\n password_check:\n user's password again to check it\n first_name (optional):\n user's first name\n last_name (optional):\n user's last name" - }, - { - "code": "def message_handler(self, target):\n def register(handler):\n self._register_message_handler_internal(target, handler)\n handler.unregister_mpv_messages = lambda: self.unregister_message_handler(handler)\n return handler\n return register", - "docstring": "Decorator to register a mpv script message handler.\n\n WARNING: Only one handler can be registered at a time for any given target.\n\n To unregister the message handler, call its ``unregister_mpv_messages`` function::\n\n player = mpv.MPV()\n @player.message_handler('foo')\n def my_handler(some, args):\n print(args)\n\n my_handler.unregister_mpv_messages()" - }, - { - "code": "def _do_digest(data, func):\n func = FuncReg.get(func)\n hash = FuncReg.hash_from_func(func)\n if not hash:\n raise ValueError(\"no available hash function for hash\", func)\n hash.update(data)\n return bytes(hash.digest())", - "docstring": "Return the binary digest of `data` with the given `func`." - }, - { - "code": "def coroutine(func):\n def wrapper(*args, **kwargs):\n gen = func(*args, **kwargs)\n val = next(gen)\n if val != None:\n raise TypeError('Unexpected value from start of coroutine')\n return gen\n wrapper.__name__ = func.__name__\n wrapper.__doc__ = func.__doc__\n return wrapper", - "docstring": "Wraps a PEP-342 enhanced generator in a way that avoids boilerplate of the \"priming\" call to ``next``.\n\n Args:\n func (Callable): The function constructing a generator to decorate.\n\n Returns:\n Callable: The decorated generator." - }, - { - "code": "def get_connector(database_name=None):\n from django.db import connections, DEFAULT_DB_ALIAS\n database_name = database_name or DEFAULT_DB_ALIAS\n connection = connections[database_name]\n engine = connection.settings_dict['ENGINE']\n connector_settings = settings.CONNECTORS.get(database_name, {})\n connector_path = connector_settings.get('CONNECTOR', CONNECTOR_MAPPING[engine])\n connector_module_path = '.'.join(connector_path.split('.')[:-1])\n module = import_module(connector_module_path)\n connector_name = connector_path.split('.')[-1]\n connector = getattr(module, connector_name)\n return connector(database_name, **connector_settings)", - "docstring": "Get a connector from its database key in setttings." - }, - { - "code": "def relpath(dataset_uri, item_identifier):\n dataset = dtoolcore.DataSet.from_uri(dataset_uri)\n try:\n props = dataset.item_properties(item_identifier)\n except KeyError:\n click.secho(\n \"No such item in dataset: {}\".format(item_identifier),\n fg=\"red\",\n err=True\n )\n sys.exit(21)\n click.secho(props[\"relpath\"])", - "docstring": "Return relpath associated with the item." - }, - { - "code": "def experiments_fmri_get(self, resource_url):\n obj_dir, obj_json, is_active, cache_id = self.get_object(resource_url)\n fmri_data = FunctionalDataHandle(obj_json, obj_dir)\n if not cache_id in self.cache:\n self.cache_add(resource_url, cache_id)\n return fmri_data", - "docstring": "Get handle for functional fMRI resource at given Url.\n\n Parameters\n ----------\n resource_url : string\n Url for fMRI resource at SCO-API\n\n Returns\n -------\n scoserv.FunctionalDataHandle\n Handle for funcrional MRI data resource" - }, - { - "code": "def tangent(f):\n node = annotate.resolve_calls(f)\n RemoveWith().visit(node)\n wrapped = functools.wraps(f)(compile_.compile_function(node))\n wrapped.tangent = f\n return wrapped", - "docstring": "A decorator which removes the `with insert_grad_of` statement.\n\n This allows the function to be called as usual.\n\n Args:\n f: A function\n\n Returns:\n A function with any `with insert_grad_of` context managers removed." - }, - { - "code": "def set_xylims(self, lims, axes=None, panel='top', **kws):\n panel = self.get_panel(panel)\n panel.set_xylims(lims, axes=axes, **kws)", - "docstring": "set xy limits" - }, - { - "code": "def preserve_cwd(func: Callable) -> Callable:\n def decorator(*args_, **kwargs) -> Any:\n cwd = os.getcwd()\n result = func(*args_, **kwargs)\n os.chdir(cwd)\n return result\n return decorator", - "docstring": "Decorator to preserve the current working directory in calls to the\n decorated function.\n\n Example:\n\n .. code-block:: python\n\n @preserve_cwd\n def myfunc():\n os.chdir(\"/faraway\")\n\n os.chdir(\"/home\")\n myfunc()\n assert os.getcwd() == \"/home\"" - }, - { - "code": "def compare_modules(file_, imports):\n modules = parse_requirements(file_)\n imports = [imports[i][\"name\"] for i in range(len(imports))]\n modules = [modules[i][\"name\"] for i in range(len(modules))]\n modules_not_imported = set(modules) - set(imports)\n return modules_not_imported", - "docstring": "Compare modules in a file to imported modules in a project.\n\n Args:\n file_ (str): File to parse for modules to be compared.\n imports (tuple): Modules being imported in the project.\n\n Returns:\n tuple: The modules not imported in the project, but do exist in the\n specified file." - }, - { - "code": "def auto_select_categorical_features(X, threshold=10):\n feature_mask = []\n for column in range(X.shape[1]):\n if sparse.issparse(X):\n indptr_start = X.indptr[column]\n indptr_end = X.indptr[column + 1]\n unique = np.unique(X.data[indptr_start:indptr_end])\n else:\n unique = np.unique(X[:, column])\n feature_mask.append(len(unique) <= threshold)\n return feature_mask", - "docstring": "Make a feature mask of categorical features in X.\n\n Features with less than 10 unique values are considered categorical.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Dense array or sparse matrix.\n\n threshold : int\n Maximum number of unique values per feature to consider the feature\n to be categorical.\n\n Returns\n -------\n feature_mask : array of booleans of size {n_features, }" - }, - { - "code": "def setMaxDaysBack(self, maxDaysBack):\n assert isinstance(maxDaysBack, int), \"maxDaysBack value has to be a positive integer\"\n assert maxDaysBack >= 1\n self.topicPage[\"maxDaysBack\"] = maxDaysBack", - "docstring": "what is the maximum allowed age of the results?" - }, - { - "code": "def save_attr(self, entity, value):\n if self.datatype == self.TYPE_MANY:\n self._save_m2m_attr(entity, value)\n else:\n self._save_single_attr(entity, value)", - "docstring": "Saves given EAV attribute with given value for given entity.\n\n If schema is not many-to-one, the value is saved to the corresponding\n Attr instance (which is created or updated).\n\n If schema is many-to-one, the value is processed thusly:\n\n * if value is iterable, all Attr instances for corresponding managed m2m\n schemata are updated (those with names from the value list are set to\n True, others to False). If a list item is not in available choices,\n ValueError is raised;\n * if the value is None, all corresponding Attr instances are reset to False;\n * if the value is neither a list nor None, it is wrapped into a list and\n processed as above (i.e. \"foo\" --> [\"foo\"])." - }, - { - "code": "def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask:\n return RecordTask(self.__hardware_source, frame_parameters, channels_enabled)", - "docstring": "Create a record task for this hardware source.\n\n .. versionadded:: 1.0\n\n :param frame_parameters: The frame parameters for the record. Pass None for defaults.\n :type frame_parameters: :py:class:`FrameParameters`\n :param channels_enabled: The enabled channels for the record. Pass None for defaults.\n :type channels_enabled: List of booleans.\n :return: The :py:class:`RecordTask` object.\n :rtype: :py:class:`RecordTask`\n\n Callers should call close on the returned task when finished.\n\n See :py:class:`RecordTask` for examples of how to use." - }, - { - "code": "def parse_reaction_equation_string(equation, default_compartment):\n def _translate_compartments(reaction, compartment):\n left = (((c.in_compartment(compartment), v)\n if c.compartment is None else (c, v))\n for c, v in reaction.left)\n right = (((c.in_compartment(compartment), v)\n if c.compartment is None else (c, v))\n for c, v in reaction.right)\n return Reaction(reaction.direction, left, right)\n eq = _REACTION_PARSER.parse(equation).normalized()\n return _translate_compartments(eq, default_compartment)", - "docstring": "Parse a string representation of a reaction equation.\n\n Converts undefined compartments to the default compartment." - }, - { - "code": "def ned2aer(n: float, e: float, d: float,\n deg: bool = True) -> Tuple[float, float, float]:\n return enu2aer(e, n, -d, deg=deg)", - "docstring": "converts North, East, Down to azimuth, elevation, range\n\n Parameters\n ----------\n\n n : float or numpy.ndarray of float\n North NED coordinate (meters)\n e : float or numpy.ndarray of float\n East NED coordinate (meters)\n d : float or numpy.ndarray of float\n Down NED coordinate (meters)\n deg : bool, optional\n degrees input/output (False: radians in/out)\n\n Results\n -------\n\n az : float or numpy.ndarray of float\n azimuth\n elev : float or numpy.ndarray of float\n elevation\n slantRange : float or numpy.ndarray of float\n slant range [meters]" - }, - { - "code": "def getBoneCount(self, action):\n fn = self.function_table.getBoneCount\n pBoneCount = c_uint32()\n result = fn(action, byref(pBoneCount))\n return result, pBoneCount.value", - "docstring": "Reads the number of bones in skeleton associated with the given action" - }, - { - "code": "def malloc(self, key, shape, dtype):\n if key not in self._memory or self._memory[key].shape != shape or self._memory[key].dtype != dtype:\n self._memory[key] = Shmem(key, shape, dtype, self._uuid)\n return self._memory[key].np_array", - "docstring": "Allocates a block of shared memory, and returns a numpy array whose data corresponds with that block.\n\n Args:\n key (str): The key to identify the block.\n shape (list of int): The shape of the numpy array to allocate.\n dtype (type): The numpy data type (e.g. np.float32).\n\n Returns:\n np.ndarray: The numpy array that is positioned on the shared memory." - }, - { - "code": "def get_task(client, task_id):\n endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])\n response = client.authenticated_request(endpoint)\n return response.json()", - "docstring": "Gets task information for the given ID" - }, - { - "code": "def inv(self):\n if self.det == 0:\n raise ValueError(\"SquareTensor is non-invertible\")\n return SquareTensor(np.linalg.inv(self))", - "docstring": "shorthand for matrix inverse on SquareTensor" - }, - { - "code": "def find_string_ids(self, substring, suffix_tree_id, limit=None):\n edge, ln = self.find_substring_edge(substring=substring, suffix_tree_id=suffix_tree_id)\n if edge is None:\n return set()\n string_ids = get_string_ids(\n node_id=edge.dest_node_id,\n node_repo=self.node_repo,\n node_child_collection_repo=self.node_child_collection_repo,\n stringid_collection_repo=self.stringid_collection_repo,\n length_until_end=edge.length + 1 - ln,\n limit=limit\n )\n return set(string_ids)", - "docstring": "Returns a set of IDs for strings that contain the given substring." - }, - { - "code": "def add(self, label):\n label.label_list = self\n self.label_tree.addi(label.start, label.end, label)", - "docstring": "Add a label to the end of the list.\n\n Args:\n label (Label): The label to add." - }, - { - "code": "def print_experiments(experiments):\n headers = [\"JOB NAME\", \"CREATED\", \"STATUS\", \"DURATION(s)\", \"INSTANCE\", \"DESCRIPTION\", \"METRICS\"]\n expt_list = []\n for experiment in experiments:\n expt_list.append([normalize_job_name(experiment.name),\n experiment.created_pretty, experiment.state,\n experiment.duration_rounded, experiment.instance_type_trimmed,\n experiment.description, format_metrics(experiment.latest_metrics)])\n floyd_logger.info(tabulate(expt_list, headers=headers))", - "docstring": "Prints job details in a table. Includes urls and mode parameters" - }, - { - "code": "def point_to_polygon_distance(polygon, pxx, pyy):\n pxx = numpy.array(pxx)\n pyy = numpy.array(pyy)\n assert pxx.shape == pyy.shape\n if pxx.ndim == 0:\n pxx = pxx.reshape((1, ))\n pyy = pyy.reshape((1, ))\n result = numpy.array([\n polygon.distance(shapely.geometry.Point(pxx.item(i), pyy.item(i)))\n for i in range(pxx.size)\n ])\n return result.reshape(pxx.shape)", - "docstring": "Calculate the distance to polygon for each point of the collection\n on the 2d Cartesian plane.\n\n :param polygon:\n Shapely \"Polygon\" geometry object.\n :param pxx:\n List or numpy array of abscissae values of points to calculate\n the distance from.\n :param pyy:\n Same structure as ``pxx``, but with ordinate values.\n :returns:\n Numpy array of distances in units of coordinate system. Points\n that lie inside the polygon have zero distance." - }, - { - "code": "def _gen_condition(cls, initial, new_public_keys):\n try:\n threshold = len(new_public_keys)\n except TypeError:\n threshold = None\n if isinstance(new_public_keys, list) and len(new_public_keys) > 1:\n ffill = ThresholdSha256(threshold=threshold)\n reduce(cls._gen_condition, new_public_keys, ffill)\n elif isinstance(new_public_keys, list) and len(new_public_keys) <= 1:\n raise ValueError('Sublist cannot contain single owner')\n else:\n try:\n new_public_keys = new_public_keys.pop()\n except AttributeError:\n pass\n if isinstance(new_public_keys, Fulfillment):\n ffill = new_public_keys\n else:\n ffill = Ed25519Sha256(\n public_key=base58.b58decode(new_public_keys))\n initial.add_subfulfillment(ffill)\n return initial", - "docstring": "Generates ThresholdSha256 conditions from a list of new owners.\n\n Note:\n This method is intended only to be used with a reduce function.\n For a description on how to use this method, see\n :meth:`~.Output.generate`.\n\n Args:\n initial (:class:`cryptoconditions.ThresholdSha256`):\n A Condition representing the overall root.\n new_public_keys (:obj:`list` of :obj:`str`|str): A list of new\n owners or a single new owner.\n\n Returns:\n :class:`cryptoconditions.ThresholdSha256`:" - }, - { - "code": "def restore_sampler_state(self):\n state = self.db.getstate() or {}\n sampler_state = state.get('sampler', {})\n self.__dict__.update(sampler_state)\n stoch_state = state.get('stochastics', {})\n for sm in self.stochastics:\n try:\n sm.value = stoch_state[sm.__name__]\n except:\n warnings.warn(\n 'Failed to restore state of stochastic %s from %s backend' %\n (sm.__name__, self.db.__name__))", - "docstring": "Restore the state of the sampler and to\n the state stored in the database." - }, - { - "code": "def add_fileline_to_docstring(module, incursive=True):\n def _add_fileline(obj):\n if obj.__doc__ is None or 'From:' in obj.__doc__:\n return\n fname = inspect.getsourcefile(obj)\n if fname is None:\n return\n try:\n line = inspect.getsourcelines(obj)[-1]\n except IOError:\n return\n obj.__doc__ += '\\n\\nFrom:%s:%d' % (fname, line)\n if isinstance(module, str):\n module = sys.modules[module]\n for _, obj in inspect.getmembers(module):\n if inspect.isbuiltin(obj):\n continue\n if inspect.isfunction(obj):\n _add_fileline(obj)\n if inspect.ismethod(obj):\n _add_fileline(obj.__func__)\n if inspect.isclass(obj) and incursive:\n add_fileline_to_docstring(obj, False)", - "docstring": "Append the definition position to each function contained in module.\n\n Examples\n --------\n # Put the following codes at the end of a file\n add_fileline_to_docstring(__name__)" - }, - { - "code": "def T(self):\n mycrs = matrix.to_list(self)\n trans_crs = []\n for cr in mycrs:\n trans_crs.append(_np.transpose(cr, [0, 2, 1, 3]))\n return matrix.from_list(trans_crs)", - "docstring": "Transposed TT-matrix" - }, - { - "code": "def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):\n if allele is not None:\n try:\n transform = self.allele_to_percent_rank_transform[allele]\n return transform.transform(affinities)\n except KeyError:\n msg = \"Allele %s has no percentile rank information\" % allele\n if throw:\n raise ValueError(msg)\n else:\n warnings.warn(msg)\n return numpy.ones(len(affinities)) * numpy.nan\n if alleles is None:\n raise ValueError(\"Specify allele or alleles\")\n df = pandas.DataFrame({\"affinity\": affinities})\n df[\"allele\"] = alleles\n df[\"result\"] = numpy.nan\n for (allele, sub_df) in df.groupby(\"allele\"):\n df.loc[sub_df.index, \"result\"] = self.percentile_ranks(\n sub_df.affinity, allele=allele, throw=throw)\n return df.result.values", - "docstring": "Return percentile ranks for the given ic50 affinities and alleles.\n\n The 'allele' and 'alleles' argument are as in the `predict` method.\n Specify one of these.\n\n Parameters\n ----------\n affinities : sequence of float\n nM affinities\n allele : string\n alleles : sequence of string\n throw : boolean\n If True, a ValueError will be raised in the case of unsupported\n alleles. If False, a warning will be logged and NaN will be returned\n for those percentile ranks.\n\n Returns\n -------\n numpy.array of float" - }, - { - "code": "def find_numeration(line):\n patterns = (\n re_numeration_vol_page_yr,\n re_numeration_vol_nucphys_page_yr,\n re_numeration_nucphys_vol_page_yr,\n re_numeration_vol_subvol_nucphys_yr_page,\n re_numeration_vol_nucphys_yr_subvol_page,\n re_numeration_vol_yr_page,\n re_numeration_nucphys_vol_yr_page,\n re_numeration_vol_nucphys_series_yr_page,\n re_numeration_vol_series_nucphys_page_yr,\n re_numeration_vol_nucphys_series_page_yr,\n re_numeration_yr_vol_page,\n )\n for pattern in patterns:\n match = pattern.match(line)\n if match:\n info = match.groupdict()\n series = info.get('series', None)\n if not series:\n series = extract_series_from_volume(info['vol'])\n if not info['vol_num']:\n info['vol_num'] = info['vol_num_alt']\n if not info['vol_num']:\n info['vol_num'] = info['vol_num_alt2']\n return {'year': info.get('year', None),\n 'series': series,\n 'volume': info['vol_num'],\n 'page': info['page'] or info['jinst_page'],\n 'page_end': info['page_end'],\n 'len': match.end()}\n return None", - "docstring": "Given a reference line, attempt to locate instances of citation\n 'numeration' in the line.\n @param line: (string) the reference line.\n @return: (string) the reference line after numeration has been checked\n and possibly recognized/marked-up." - }, - { - "code": "def encode_int(cls, obj):\n if not isinstance(obj, int):\n raise TypeError(\"cannot encode non-integer object in encode_int(): object was {} (type '{}').\"\n .format(obj, type(obj)))\n return str(obj).encode('ascii')", - "docstring": "Verify the object is an int, and ASCII-encode it.\n\n :param int obj: An integer to be encoded.\n :raises: :class:`TypeError` if `obj` is not an integer.\n :return: A binary representation of the int `obj` suitable to pass as the `payload` to\n send_exit()." - }, - { - "code": "def dafac(handle, buffer):\n handle = ctypes.c_int(handle)\n lenvals = ctypes.c_int(len(max(buffer, key=len)) + 1)\n n = ctypes.c_int(len(buffer))\n buffer = stypes.listToCharArrayPtr(buffer)\n libspice.dafac_c(handle, n, lenvals, buffer)", - "docstring": "Add comments from a buffer of character strings to the comment\n area of a binary DAF file, appending them to any comments which\n are already present in the file's comment area.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafac_c.html\n\n :param handle: handle of a DAF opened with write access.\n :type handle: int\n :param buffer: Buffer of comments to put into the comment area.\n :type buffer: list[str]" - }, - { - "code": "def center_str(txt, font_name, font_size, offset=0):\n return -(text_width(txt, font_name, font_size) / 2.0) + offset", - "docstring": "Center a string on the x axis of a reportslab canvas" - }, - { - "code": "def _validate_precinct_size(self, cparams):\n code_block_specified = False\n if cparams.cblockw_init != 0 and cparams.cblockh_init != 0:\n code_block_specified = True\n if cparams.res_spec != 0:\n for j in range(cparams.res_spec):\n prch = cparams.prch_init[j]\n prcw = cparams.prcw_init[j]\n if j == 0 and code_block_specified:\n height, width = cparams.cblockh_init, cparams.cblockw_init\n if prch < height * 2 or prcw < width * 2:\n msg = (\"The highest resolution precinct size \"\n \"({prch} x {prcw}) must be at least twice that \"\n \"of the code block size \"\n \"({cbh} x {cbw}).\")\n msg = msg.format(prch=prch, prcw=prcw,\n cbh=height, cbw=width)\n raise IOError(msg)\n if ((math.log(prch, 2) != math.floor(math.log(prch, 2)) or\n math.log(prcw, 2) != math.floor(math.log(prcw, 2)))):\n msg = (\"Bad precinct size ({height} x {width}). \"\n \"Precinct dimensions must be powers of 2.\")\n msg = msg.format(height=prch, width=prcw)\n raise IOError(msg)", - "docstring": "Precinct dimensions must satisfy certain restrictions if specified.\n\n They must both be a power of 2 and must both be at least twice the\n size of their codeblock size counterparts." - }, - { - "code": "def subscribe(self, _func=None, needs=(), returns=(), modifies=(),\n **conditions):\n modifies = set(modifies)\n parameters = set(needs) | modifies\n needs = parameters | set(conditions)\n if not needs:\n raise ValueError(\"tried to hook nothing\")\n returns = set(returns) | modifies\n def deco(func):\n self.hooks.append(_Hook(\n func, needs, parameters, returns, conditions))\n return func\n if _func is not None:\n deco(_func)\n else:\n return deco", - "docstring": "Add a hook to a pangler.\n\n This method can either be used as a decorator for a function or method,\n or standalone on a callable.\n\n * `needs` is an iterable of parameters that this hook operates on.\n * `returns` is an iterable of parameters that this hook will modify\n and then pass back to the pangler.\n * `modifies` is a convenient way to specify that this hook both needs\n and returns a parameter.\n * The rest of the keyword arguments are parameter predicates." - }, - { - "code": "def shell_config_for(shell, vexrc, environ):\n here = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(here, 'shell_configs', shell)\n try:\n with open(path, 'rb') as inp:\n data = inp.read()\n except FileNotFoundError as error:\n if error.errno != 2:\n raise\n return b''\n ve_base = vexrc.get_ve_base(environ).encode('ascii')\n if ve_base and not scary_path(ve_base) and os.path.exists(ve_base):\n data = data.replace(b'$WORKON_HOME', ve_base)\n return data", - "docstring": "return completion config for the named shell." - }, - { - "code": "def insert(self, bs, pos=None):\n bs = Bits(bs)\n if not bs.len:\n return self\n if bs is self:\n bs = self.__copy__()\n if pos is None:\n try:\n pos = self._pos\n except AttributeError:\n raise TypeError(\"insert require a bit position for this type.\")\n if pos < 0:\n pos += self.len\n if not 0 <= pos <= self.len:\n raise ValueError(\"Invalid insert position.\")\n self._insert(bs, pos)", - "docstring": "Insert bs at bit position pos.\n\n bs -- The bitstring to insert.\n pos -- The bit position to insert at.\n\n Raises ValueError if pos < 0 or pos > self.len." - }, - { - "code": "def build_dummies_dict(data):\n unique_val_list = unique(data)\n output = {}\n for val in unique_val_list:\n output[val] = (data == val)\n return output", - "docstring": "Return a dict with unique values as keys and vectors as values" - }, - { - "code": "def check_managed(\n name,\n source,\n source_hash,\n source_hash_name,\n user,\n group,\n mode,\n attrs,\n template,\n context,\n defaults,\n saltenv,\n contents=None,\n skip_verify=False,\n seuser=None,\n serole=None,\n setype=None,\n serange=None,\n **kwargs):\n source, source_hash = source_list(source,\n source_hash,\n saltenv)\n sfn = ''\n source_sum = None\n if contents is None:\n sfn, source_sum, comments = get_managed(\n name,\n template,\n source,\n source_hash,\n source_hash_name,\n user,\n group,\n mode,\n attrs,\n saltenv,\n context,\n defaults,\n skip_verify,\n **kwargs)\n if comments:\n __clean_tmp(sfn)\n return False, comments\n changes = check_file_meta(name, sfn, source, source_sum, user,\n group, mode, attrs, saltenv, contents,\n seuser=seuser, serole=serole, setype=setype,\n serange=serange)\n if name.startswith(tempfile.gettempdir()):\n for key in ['user', 'group', 'mode']:\n changes.pop(key, None)\n __clean_tmp(sfn)\n if changes:\n log.info(changes)\n comments = ['The following values are set to be changed:\\n']\n comments.extend('{0}: {1}\\n'.format(key, val)\n for key, val in six.iteritems(changes))\n return None, ''.join(comments)\n return True, 'The file {0} is in the correct state'.format(name)", - "docstring": "Check to see what changes need to be made for a file\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': }' root, root, '755' jinja True None None base" - }, - { - "code": "def find_author(self):\n return Author(name=self.context.capture('git', 'config', 'user.name', check=False, silent=True),\n email=self.context.capture('git', 'config', 'user.email', check=False, silent=True))", - "docstring": "Get the author information from the version control system." - }, - { - "code": "def image_url(self):\n return construct_api_url(self.input, 'image', self.resolvers, False, self.get3d, False, **self.kwargs)", - "docstring": "URL of a GIF image." - }, - { - "code": "def write_nowait(self, item):\n self._queue.put_nowait(item)\n self._can_read.set()\n if self._queue.full():\n self._can_write.clear()", - "docstring": "Write in the box in a non-blocking manner.\n\n If the box is full, an exception is thrown. You should always check\n for fullness with `full` or `wait_not_full` before calling this method.\n\n :param item: An item." - }, - { - "code": "def _file_write(path, content):\n with salt.utils.files.fopen(path, 'w+') as fp_:\n fp_.write(salt.utils.stringutils.to_str(content))\n fp_.close()", - "docstring": "Write content to a file" - }, - { - "code": "def refresh_db(failhard=False, **kwargs):\n salt.utils.pkg.clear_rtag(__opts__)\n ret = {}\n error_repos = []\n cmd = ['opkg', 'update']\n call = __salt__['cmd.run_all'](cmd,\n output_loglevel='trace',\n python_shell=False,\n ignore_retcode=True,\n redirect_stderr=True)\n out = call['stdout']\n prev_line = ''\n for line in salt.utils.itertools.split(out, '\\n'):\n if 'Inflating' in line:\n key = line.strip().split()[1][:-1]\n ret[key] = True\n elif 'Updated source' in line:\n key = prev_line.strip().split()[1][:-1]\n ret[key] = True\n elif 'Failed to download' in line:\n key = line.strip().split()[5].split(',')[0]\n ret[key] = False\n error_repos.append(key)\n prev_line = line\n if failhard and error_repos:\n raise CommandExecutionError(\n 'Error getting repos: {0}'.format(', '.join(error_repos))\n )\n if call['retcode'] != 0 and not error_repos:\n raise CommandExecutionError(out)\n return ret", - "docstring": "Updates the opkg database to latest packages based upon repositories\n\n Returns a dict, with the keys being package databases and the values being\n the result of the update attempt. Values can be one of the following:\n\n - ``True``: Database updated successfully\n - ``False``: Problem updating database\n\n failhard\n If False, return results of failed lines as ``False`` for the package\n database that encountered the error.\n If True, raise an error with a list of the package databases that\n encountered errors.\n\n .. versionadded:: 2018.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db" - }, - { - "code": "def flushall(host=None, port=None, db=None, password=None):\n server = _connect(host, port, db, password)\n return server.flushall()", - "docstring": "Remove all keys from all databases\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' redis.flushall" - }, - { - "code": "def tooltip_query(self, widget, x, y, keyboard_mode, tooltip):\n tooltip.set_text(subprocess.getoutput(\"acpi\"))\n return True", - "docstring": "Set tooltip which appears when you hover mouse curson onto icon in system panel." - }, - { - "code": "def _create_exprs_using_func(self, f, columns):\n expressions = map(lambda c: f(c).alias(c),\n self._columns)\n return expressions", - "docstring": "Create aggregate expressions using the provided function\n with the result coming back as the original column name." - }, - { - "code": "def get_arg_or_attr(self, name, default=None):\n if name in self.flow_args:\n return self.flow_args[name]\n try:\n return getattr(self, name)\n except AttributeError:\n return default", - "docstring": "Returns flow argument, as provided with sitegate decorators\n or attribute set as a flow class attribute or default." - }, - { - "code": "def save(self, data, dtype_out_time, dtype_out_vert=False,\n save_files=True, write_to_tar=False):\n self._update_data_out(data, dtype_out_time)\n if save_files:\n self._save_files(data, dtype_out_time)\n if write_to_tar and self.proj.tar_direc_out:\n self._write_to_tar(dtype_out_time)\n logging.info('\\t{}'.format(self.path_out[dtype_out_time]))", - "docstring": "Save aospy data to data_out attr and to an external file." - }, - { - "code": "def deploy():\n _require_root()\n if not confirm(\"This will apply any available migrations to the database. Has the database been backed up?\"):\n abort(\"Aborted.\")\n if not confirm(\"Are you sure you want to deploy?\"):\n abort(\"Aborted.\")\n with lcd(PRODUCTION_DOCUMENT_ROOT):\n with shell_env(PRODUCTION=\"TRUE\"):\n local(\"git pull\")\n with open(\"requirements.txt\", \"r\") as req_file:\n requirements = req_file.read().strip().split()\n try:\n pkg_resources.require(requirements)\n except pkg_resources.DistributionNotFound:\n local(\"pip install -r requirements.txt\")\n except Exception:\n traceback.format_exc()\n local(\"pip install -r requirements.txt\")\n else:\n puts(\"Python requirements already satisfied.\")\n with prefix(\"source /usr/local/virtualenvs/ion/bin/activate\"):\n local(\"./manage.py collectstatic --noinput\", shell=\"/bin/bash\")\n local(\"./manage.py migrate\", shell=\"/bin/bash\")\n restart_production_gunicorn(skip=True)\n puts(\"Deploy complete.\")", - "docstring": "Deploy to production." - }, - { - "code": "def remove_lambda_functions():\n logger.debug(\"[\n client = boto3.client('lambda', region_name=PRIMARY_REGION)\n responses = list()\n for label in LAMBDA_FUNCTIONS:\n try:\n response = client.delete_function(\n FunctionName=label,\n )\n except client.exceptions.ResourceNotFoundException:\n logger.info(\"[!] Function %s already removed\" % (label))\n continue\n responses.append(response)\n logger.debug(\"[*] Removed %s function\" % (label))\n logger.info(\"[\n return responses", - "docstring": "Remove the Blockade Lambda functions." - }, - { - "code": "def IsPipe(self):\n if self._stat_object is None:\n self._stat_object = self._GetStat()\n if self._stat_object is not None:\n self.entry_type = self._stat_object.type\n return self.entry_type == definitions.FILE_ENTRY_TYPE_PIPE", - "docstring": "Determines if the file entry is a pipe.\n\n Returns:\n bool: True if the file entry is a pipe." - }, - { - "code": "def _get_tough_method(self, method):\n def tough_method(*args, **kwargs):\n transaction = self._transaction\n if not transaction:\n try:\n if not self._con.db.status:\n raise AttributeError\n if self._maxusage:\n if self._usage >= self._maxusage:\n raise AttributeError\n except Exception:\n self.reset()\n try:\n result = method(*args, **kwargs)\n except Exception:\n if transaction:\n self._transaction = False\n raise\n elif self._con.db.status:\n raise\n else:\n self.reset()\n result = method(*args, **kwargs)\n self._usage += 1\n return result\n return tough_method", - "docstring": "Return a \"tough\" version of a connection class method.\n\n The tough version checks whether the connection is bad (lost)\n and automatically and transparently tries to reset the connection\n if this is the case (for instance, the database has been restarted)." - }, - { - "code": "def read_all(self) -> str:\n try:\n buffered_bytes = self.bytes_buffer.getvalue()\n if buffered_bytes is None:\n return ''\n return buffered_bytes.decode(self.source_encoding)\n except Exception as err:\n return 'Redirect Buffer Error: {}'.format(err)", - "docstring": "Reads the current state of the buffer and returns a string those\n contents\n\n :return:\n A string for the current state of the print buffer contents" - }, - { - "code": "def search(self, title=None, libtype=None, **kwargs):\n args = {}\n if title:\n args['title'] = title\n if libtype:\n args['type'] = utils.searchType(libtype)\n for attr, value in kwargs.items():\n args[attr] = value\n key = '/library/all%s' % utils.joinArgs(args)\n return self.fetchItems(key)", - "docstring": "Searching within a library section is much more powerful. It seems certain\n attributes on the media objects can be targeted to filter this search down\n a bit, but I havent found the documentation for it.\n\n Example: \"studio=Comedy%20Central\" or \"year=1999\" \"title=Kung Fu\" all work. Other items\n such as actor= seem to work, but require you already know the id of the actor.\n TLDR: This is untested but seems to work. Use library section search when you can." - }, - { - "code": "def grab_server_args():\n workbench_conf = ConfigParser.ConfigParser()\n config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini')\n workbench_conf.read(config_path)\n server = workbench_conf.get('workbench', 'server_uri')\n port = workbench_conf.get('workbench', 'server_port')\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--server', type=str, default=server, help='location of workbench server')\n parser.add_argument('-p', '--port', type=int, default=port, help='port used by workbench server')\n args, commands = parser.parse_known_args()\n server = str(args.server)\n port = str(args.port)\n return {'server':server, 'port':port, 'commands': commands}", - "docstring": "Grab server info from configuration file" - }, - { - "code": "def read_metadata_by_name(self, name, metadata_key, caster=None):\n file_path = self._metadata_file_path(name, metadata_key)\n try:\n metadata = read_file(file_path).strip()\n return self._maybe_cast(metadata, caster)\n except (IOError, OSError):\n return None", - "docstring": "Read process metadata using a named identity.\n\n :param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd').\n :param string metadata_key: The metadata key (e.g. 'pid').\n :param func caster: A casting callable to apply to the read value (e.g. `int`)." - }, - { - "code": "def get_queryset(self):\n args = (self.model._mptt_meta.tree_id_attr,\n self.model._mptt_meta.left_attr)\n method = 'get_query_set' if django.VERSION < (1, 6) else 'get_queryset'\n return getattr(super(SectionManager, self), method)().order_by(*args)", - "docstring": "Use the same ordering as TreeManager" - }, - { - "code": "def clone(self):\n clone_copy = copy.deepcopy(self)\n clone_copy.state_ = EntityState()\n return clone_copy", - "docstring": "Deepclone the entity, but reset state" - }, - { - "code": "def gc(args):\n p = OptionParser(gc.__doc__)\n p.add_option(\"--binsize\", default=500, type=\"int\",\n help=\"Bin size to use\")\n opts, args = p.parse_args(args)\n if len(args) != 1:\n sys.exit(not p.print_help())\n fastafile, = args\n binsize = opts.binsize\n allbins = []\n for name, seq in parse_fasta(fastafile):\n for i in range(len(seq) / binsize):\n atcnt = gccnt = 0\n for c in seq[i * binsize: (i + 1) * binsize].upper():\n if c in \"AT\":\n atcnt += 1\n elif c in \"GC\":\n gccnt += 1\n totalcnt = atcnt + gccnt\n if totalcnt == 0:\n continue\n gcpct = gccnt * 100 / totalcnt\n allbins.append(gcpct)\n from jcvi.graphics.base import asciiplot\n from collections import Counter\n title = \"Total number of bins={}\".format(len(allbins))\n c = Counter(allbins)\n x, y = zip(*sorted(c.items()))\n asciiplot(x, y, title=title)", - "docstring": "%prog gc fastafile\n\n Plot G+C content distribution." - }, - { - "code": "def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=''):\n stats_ts, pvals, nums = ts_stats_significance(\n ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast)\n return stats_ts, pvals, nums", - "docstring": "Returns the statistics, pvalues and the actual number of bootstrap\n samples." - }, - { - "code": "def dump_database(self, file_path, database=None, tables=None):\n if database:\n self.change_db(database)\n if not tables:\n tables = self.tables\n statements = [self.dump_table(table) for table in tqdm(tables, total=len(tables), desc='Generating dump files')]\n dump = 'SET FOREIGN_KEY_CHECKS=0;' + '\\n'.join(statements) + '\\nSET FOREIGN_KEY_CHECKS=1;'\n file_path = file_path if file_path.endswith('.sql') else file_path + '.sql'\n write_text(dump, file_path)\n return file_path", - "docstring": "Export the table structure and data for tables in a database.\n\n If not database is specified, it is assumed the currently connected database\n is the source. If no tables are provided, all tables will be dumped." - }, - { - "code": "def delete(self, service):\n url = self._url_format(service)\n return self.rest_action(\n self._session.delete, url\n )", - "docstring": "Generic DELETE operation for Learning Modules API.\n\n Args:\n service (str): The endpoint service to use, i.e. gradebook\n\n Raises:\n requests.RequestException: Exception connection error\n ValueError: Unable to decode response content\n\n Returns:\n list: the json-encoded content of the response" - }, - { - "code": "def isobaric_to_ambient_temperature(self, data):\n P = data['pressure'] / 100.0\n Tiso = data['temperature_iso']\n Td = data['temperature_dew_iso'] - 273.15\n e = 6.11 * 10**((7.5 * Td) / (Td + 273.3))\n w = 0.622 * (e / (P - e))\n temperature = Tiso - ((2.501 * 10.**6) / 1005.7) * w\n return temperature", - "docstring": "Calculates temperature from isobaric temperature.\n\n Parameters\n ----------\n data: DataFrame\n Must contain columns pressure, temperature_iso,\n temperature_dew_iso. Input temperature in K.\n\n Returns\n -------\n temperature : Series\n Temperature in K" - }, - { - "code": "def option(self, section, option):\n if self.config.has_section(section):\n if self.config.has_option(section, option):\n return (True, self.config.get(section, option))\n return (False, 'Option: ' + option + ' does not exist')\n return (False, 'Section: ' + section + ' does not exist')", - "docstring": "Returns the value of the option" - }, - { - "code": "def _loadConfiguration(self):\n configPath = os.path.join(self.path, \"config\")\n if not os.path.isdir(configPath):\n return\n config = Config(configPath)\n Config.mergeDictionaries(config.getData(), self.application.config)", - "docstring": "Load module configuration files.\n\n :return: " - }, - { - "code": "def resolve_path(target, start=os.path.curdir):\n\tr\n\treturn os.path.normpath(join(start, target))", - "docstring": "r\"\"\"\n\tFind a path from start to target where target is relative to start.\n\n\t>>> tmp = str(getfixture('tmpdir_as_cwd'))\n\n\t>>> findpath('d:\\\\')\n\t'd:\\\\'\n\n\t>>> findpath('d:\\\\', tmp)\n\t'd:\\\\'\n\n\t>>> findpath('\\\\bar', 'd:\\\\')\n\t'd:\\\\bar'\n\n\t>>> findpath('\\\\bar', 'd:\\\\foo') # fails with '\\\\bar'\n\t'd:\\\\bar'\n\n\t>>> findpath('bar', 'd:\\\\foo')\n\t'd:\\\\foo\\\\bar'\n\n\t>>> findpath('\\\\baz', 'd:\\\\foo\\\\bar') # fails with '\\\\baz'\n\t'd:\\\\baz'\n\n\t>>> os.path.abspath(findpath('\\\\bar')).lower()\n\t'c:\\\\bar'\n\n\t>>> os.path.abspath(findpath('bar'))\n\t'...\\\\bar'\n\n\t>>> findpath('..', 'd:\\\\foo\\\\bar')\n\t'd:\\\\foo'\n\n\tThe parent of the root directory is the root directory.\n\t>>> findpath('..', 'd:\\\\')\n\t'd:\\\\'" - }, - { - "code": "def _add_rid_to_vrf_list(self, ri):\n if ri.ex_gw_port or ri.router.get('gw_port'):\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n if not vrf_name:\n return\n if not self._router_ids_by_vrf.get(vrf_name):\n LOG.debug(\"++ CREATING VRF %s\" % vrf_name)\n driver._do_create_vrf(vrf_name)\n self._router_ids_by_vrf.setdefault(vrf_name, set()).add(\n ri.router['id'])", - "docstring": "Add router ID to a VRF list.\n\n In order to properly manage VRFs in the ASR, their\n usage has to be tracked. VRFs are provided with neutron\n router objects in their hosting_info fields of the gateway ports.\n This means that the VRF is only available when the gateway port\n of the router is set. VRFs can span routers, and even OpenStack\n tenants, so lists of routers that belong to the same VRF are\n kept in a dictionary, with the VRF name as the key." - }, - { - "code": "def tab_stop(editor, value):\n if value is None:\n editor.show_message('tabstop=%i' % editor.tabstop)\n else:\n try:\n value = int(value)\n if value > 0:\n editor.tabstop = value\n else:\n editor.show_message('Argument must be positive')\n except ValueError:\n editor.show_message('Number required after =')", - "docstring": "Set tabstop." - }, - { - "code": "def stability_of_timeseries(returns):\n if len(returns) < 2:\n return np.nan\n returns = np.asanyarray(returns)\n returns = returns[~np.isnan(returns)]\n cum_log_returns = np.log1p(returns).cumsum()\n rhat = stats.linregress(np.arange(len(cum_log_returns)),\n cum_log_returns)[2]\n return rhat ** 2", - "docstring": "Determines R-squared of a linear fit to the cumulative\n log returns. Computes an ordinary least squares linear fit,\n and returns R-squared.\n\n Parameters\n ----------\n returns : pd.Series or np.ndarray\n Daily returns of the strategy, noncumulative.\n - See full explanation in :func:`~empyrical.stats.cum_returns`.\n\n Returns\n -------\n float\n R-squared." - }, - { - "code": "def hash64(key, seed):\n hash_val = mmh3.hash64(key, seed)[0]\n return struct.unpack('>Q', struct.pack('q', hash_val))[0]", - "docstring": "Wrapper around mmh3.hash64 to get us single 64-bit value.\n\n This also does the extra work of ensuring that we always treat the\n returned values as big-endian unsigned long, like smhasher used to\n do." - }, - { - "code": "def _store_meta_info(self):\n if not self.meta_info['precursor_mz'] and self.meta_info['precursor_type'] and \\\n self.compound_info['exact_mass']:\n self.meta_info['precursor_mz'] = get_precursor_mz(float(self.compound_info['exact_mass']),\n self.meta_info['precursor_type'])\n if not self.meta_info['polarity']:\n m = re.search('^\\[.*\\](\\-|\\+)', self.meta_info['precursor_type'], re.IGNORECASE)\n if m:\n polarity = m.group(1).strip()\n if polarity == '+':\n self.meta_info['polarity'] = 'positive'\n elif polarity == '-':\n self.meta_info['polarity'] = 'negative'\n if not self.meta_info['accession']:\n self.meta_info['accession'] = 'unknown accession'\n self.meta_info_all.append(\n (str(self.current_id_meta),) +\n tuple(self.meta_info.values()) +\n (str(self.current_id_origin), self.compound_info['inchikey_id'],)\n )", - "docstring": "Update the meta dictionary with the current chunk of meta data details" - }, - { - "code": "def attributes_section(thing, doc, header_level):\n if not inspect.isclass(thing):\n return []\n props, class_doc = _split_props(thing, doc[\"Attributes\"])\n tl = type_list(inspect.signature(thing), class_doc, \"\\n\n if len(tl) == 0 and len(props) > 0:\n tl.append(\"\\n\n for prop in props:\n tl.append(f\"- [`{prop}`](\n return tl", - "docstring": "Generate an attributes section for classes.\n\n Prefers type annotations, if they are present.\n\n Parameters\n ----------\n thing : class\n Class to document\n doc : dict\n Numpydoc output\n header_level : int\n Number of `#`s to use for header\n\n Returns\n -------\n list of str\n Markdown formatted attribute list" - }, - { - "code": "def _compare_frame_rankings(ref, est, transitive=False):\n idx = np.argsort(ref)\n ref_sorted = ref[idx]\n est_sorted = est[idx]\n levels, positions, counts = np.unique(ref_sorted,\n return_index=True,\n return_counts=True)\n positions = list(positions)\n positions.append(len(ref_sorted))\n index = collections.defaultdict(lambda: slice(0))\n ref_map = collections.defaultdict(lambda: 0)\n for level, cnt, start, end in zip(levels, counts,\n positions[:-1], positions[1:]):\n index[level] = slice(start, end)\n ref_map[level] = cnt\n if transitive:\n level_pairs = itertools.combinations(levels, 2)\n else:\n level_pairs = [(i, i+1) for i in levels]\n level_pairs, lcounter = itertools.tee(level_pairs)\n normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter]))\n if normalizer == 0:\n return 0, 0.0\n inversions = 0\n for level_1, level_2 in level_pairs:\n inversions += _count_inversions(est_sorted[index[level_1]],\n est_sorted[index[level_2]])\n return inversions, float(normalizer)", - "docstring": "Compute the number of ranking disagreements in two lists.\n\n Parameters\n ----------\n ref : np.ndarray, shape=(n,)\n est : np.ndarray, shape=(n,)\n Reference and estimate ranked lists.\n `ref[i]` is the relevance score for point `i`.\n\n transitive : bool\n If true, all pairs of reference levels are compared.\n If false, only adjacent pairs of reference levels are compared.\n\n Returns\n -------\n inversions : int\n The number of pairs of indices `i, j` where\n `ref[i] < ref[j]` but `est[i] >= est[j]`.\n\n normalizer : float\n The total number of pairs (i, j) under consideration.\n If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}|\n If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}|" - }, - { - "code": "def rename_with_num(self, prefix=\"\", new_path=None, remove_desc=True):\n if new_path is None: numbered = self.__class__(new_temp_path())\n else: numbered = self.__class__(new_path)\n def numbered_iterator():\n for i,read in enumerate(self):\n read.id = prefix + str(i)\n read.seq = read.seq.upper()\n if remove_desc: read.description = \"\"\n yield read\n numbered.write(numbered_iterator())\n numbered.close()\n if new_path is None:\n os.remove(self.path)\n shutil.move(numbered, self.path)\n return numbered", - "docstring": "Rename every sequence based on a prefix and a number." - }, - { - "code": "def convert_to_layout_rules(x):\n if isinstance(x, LayoutRules):\n return x\n if isinstance(x, str):\n x = _parse_string_to_list_of_pairs(x)\n return LayoutRules(x)", - "docstring": "Converts input to a LayoutRules.\n\n Args:\n x: LayoutRules, str, or set-like of string pairs.\n\n Returns:\n LayoutRules." - }, - { - "code": "def get_representation(self, prefix=\"\", suffix=\"\\n\"):\n res = prefix + \"Section \" + self.get_section_name().upper() + suffix\n return res", - "docstring": "return the string representation of the current object." - }, - { - "code": "def set_value(cache, key, value):\n with cache as redis_connection:\n return redis_connection.set(key, value)", - "docstring": "Set a value by key.\n\n Arguments:\n cache:\n instance of Cache\n\n key:\n 'user:342:username'," - }, - { - "code": "def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2):\n cand1_f = tf.to_float(cand1)\n cand2_f = tf.to_float(cand2)\n step_size = cand2_f - cand1_f\n fpart = (x - cand1_f) / step_size\n ret = tf.where(tf.greater(fpart, noise), cand2, cand1)\n return ret", - "docstring": "Round-off x to cand1 or to cand2 in an unbiased way.\n\n Cand1 and cand2 are the same shape as x.\n For every element of x, the corresponding elements of cand1 and cand2 should\n be the two closest bfloat16 values to x. Order does not matter.\n cand1 and cand2 must differ from each other.\n\n Args:\n x: A float32 Tensor.\n noise: A Tensor broadcastable to the shape of x containing\n random uniform values in [0.0, 1.0].\n cand1: A bfloat16 Tensor the same shape as x.\n cand2: A bfloat16 Tensor the same shape as x.\n\n Returns:\n A bfloat16 Tensor." - }, - { - "code": "def removeSubscriber(self, email):\n headers, raw_data = self._perform_subscribe()\n missing_flag, raw_data = self._remove_subscriber(email, raw_data)\n if missing_flag:\n return\n self._update_subscribe(headers, raw_data)\n self.log.info(\"Successfully remove a subscriber: %s for \",\n email, self)", - "docstring": "Remove a subscriber from this workitem\n\n If the subscriber has not been added, no more actions will be\n performed.\n\n :param email: the subscriber's email" - }, - { - "code": "def pythonize(self, val):\n __boolean_states__ = {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False}\n if isinstance(val, bool):\n return val\n val = unique_value(val).lower()\n if val in list(__boolean_states__.keys()):\n return __boolean_states__[val]\n raise PythonizeError(\"Cannot convert '%s' to a boolean value\" % val)", - "docstring": "Convert value into a boolean\n\n :param val: value to convert\n :type val: bool, int, str\n :return: boolean corresponding to value ::\n\n {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False}\n\n :rtype: bool" - }, - { - "code": "def findPkt(pkt):\n\tret = []\n\twhile len(pkt)-10 >= 0:\n\t\tif pkt[0:4] != [0xFF, 0xFF, 0xFD, 0x00]:\n\t\t\tpkt.pop(0)\n\t\t\tcontinue\n\t\tlength = (pkt[6] << 8) + pkt[5]\n\t\tcrc_pos = 5 + length\n\t\tpkt_crc = pkt[crc_pos:crc_pos + 2]\n\t\tcrc = le(crc16(pkt[:crc_pos]))\n\t\tif pkt_crc == crc:\n\t\t\tpkt_end = crc_pos+2\n\t\t\tret.append(pkt[:pkt_end])\n\t\t\tdel pkt[:pkt_end]\n\t\telse:\n\t\t\tpkt_end = crc_pos+2\n\t\t\tdel pkt[:pkt_end]\n\treturn ret", - "docstring": "Search through a string of binary for a valid xl320 package.\n\n\tin: buffer to search through\n\tout: a list of valid data packet" - }, - { - "code": "def get_veto_segs(workflow, ifo, category, start_time, end_time, out_dir,\n veto_gen_job, tags=None, execute_now=False):\n if tags is None:\n tags = []\n seg_valid_seg = segments.segment([start_time,end_time])\n node = Node(veto_gen_job)\n node.add_opt('--veto-categories', str(category))\n node.add_opt('--ifo-list', ifo)\n node.add_opt('--gps-start-time', str(start_time))\n node.add_opt('--gps-end-time', str(end_time))\n if tags:\n veto_xml_file_name = \"%s-VETOTIME_CAT%d_%s-%d-%d.xml\" \\\n %(ifo, category, '_'.join(tags), start_time,\n end_time-start_time)\n else:\n veto_xml_file_name = \"%s-VETOTIME_CAT%d-%d-%d.xml\" \\\n %(ifo, category, start_time, end_time-start_time)\n veto_xml_file_path = os.path.abspath(os.path.join(out_dir,\n veto_xml_file_name))\n curr_url = urlparse.urlunparse(['file', 'localhost',\n veto_xml_file_path, None, None, None])\n if tags:\n curr_tags = tags + ['VETO_CAT%d' %(category)]\n else:\n curr_tags = ['VETO_CAT%d' %(category)]\n if file_needs_generating(veto_xml_file_path, workflow.cp, tags=tags):\n if execute_now:\n workflow.execute_node(node, verbatim_exe = True)\n veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path,\n tags=curr_tags,\n valid_segment=seg_valid_seg)\n else:\n veto_xml_file = SegFile(ifo, 'SEGMENTS', seg_valid_seg,\n file_url=curr_url, tags=curr_tags)\n node._add_output(veto_xml_file)\n workflow.add_node(node)\n else:\n node.executed = True\n for fil in node._outputs:\n fil.node = None\n veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path,\n tags=curr_tags,\n valid_segment=seg_valid_seg)\n return veto_xml_file", - "docstring": "Obtain veto segments for the selected ifo and veto category and add the job\n to generate this to the workflow.\n\n Parameters\n -----------\n workflow: pycbc.workflow.core.Workflow\n An instance of the Workflow class that manages the workflow.\n ifo : string\n The string describing the ifo to generate vetoes for.\n category : int\n The veto category to generate vetoes for.\n start_time : gps time (either int/LIGOTimeGPS)\n The time at which to begin searching for segments.\n end_time : gps time (either int/LIGOTimeGPS)\n The time at which to stop searching for segments.\n out_dir : path\n The directory in which output will be stored.\n vetoGenJob : Job\n The veto generation Job class that will be used to create the Node.\n tag : string, optional (default=None)\n Use this to specify a tag. This can be used if this module is being\n called more than once to give call specific configuration (by setting\n options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).\n This is also used to tag the Files returned by the class to uniqueify\n the Files and uniqueify the actual filename.\n FIXME: Filenames may not be unique with current codes!\n execute_now : boolean, optional\n If true, jobs are executed immediately. If false, they are added to the\n workflow to be run later.\n\n Returns\n --------\n veto_def_file : pycbc.workflow.core.SegFile\n The workflow File object corresponding to this DQ veto file." - }, - { - "code": "def extract(self):\n return np.vstack([self[r] for r in self.dtype.names]).T.squeeze()", - "docstring": "Creates a copy of this tabarray in the form of a numpy ndarray.\n\n Useful if you want to do math on array elements, e.g. if you have a \n subset of the columns that are all numerical, you can construct a \n numerical matrix and do matrix operations." - }, - { - "code": "def _get_xml(self, *args, **kwargs):\n req = self.session_xml.get(*args, **kwargs)\n return req", - "docstring": "Wrapper around Requests for GET XML requests\n\n Returns:\n Response:\n A Requests Response object" - }, - { - "code": "def set_category(self, category):\n if isinstance(category, Category):\n name = category.name\n else:\n name = category\n self.find(\"category\").text = name", - "docstring": "Set package category\n\n Args:\n category: String of an existing category's name, or a\n Category object." - }, - { - "code": "def change_password(self, password, new_password, email_code):\n message = Msg(EMsg.ClientPasswordChange3, extended=True)\n message.body.password = password\n message.body.new_password = new_password\n message.body.code = email_code\n resp = self.send_job_and_wait(message, timeout=10)\n if resp is None:\n return EResult.Timeout\n else:\n return EResult(resp.eresult)", - "docstring": "Change account's password\n\n :param password: current account password\n :type password: :class:`str`\n :param new_password: new account password\n :type new_password: :class:`str`\n :param email_code: confirmation code from email\n :type email_code: :class:`str`\n :return: result\n :rtype: :class:`.EResult`\n\n .. note::\n First request change mail via :meth:`request_password_change_mail()`\n to get the email code" - }, - { - "code": "def set_vector_catch(self, flags):\n res = self._dll.JLINKARM_WriteVectorCatch(flags)\n if res < 0:\n raise errors.JLinkException(res)\n return None", - "docstring": "Sets vector catch bits of the processor.\n\n The CPU will jump to a vector if the given vector catch is active, and\n will enter a debug state. This has the effect of halting the CPU as\n well, meaning the CPU must be explicitly restarted.\n\n Args:\n self (JLink): the ``JLink`` instance\n\n Returns:\n ``None``\n\n Raises:\n JLinkException: on error." - }, - { - "code": "def avail_sizes(call=None):\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The avail_sizes function must be called with '\n '-f or --function, or with the --list-sizes option'\n )\n conn = get_conn()\n data = conn.list_role_sizes()\n ret = {}\n for item in data.role_sizes:\n ret[item.name] = object_to_dict(item)\n return ret", - "docstring": "Return a list of sizes from Azure" - }, - { - "code": "def fill_subparser(subparser):\n subparser.add_argument(\n '--youtube-id', type=str, required=True,\n help=(\"The YouTube ID of the video from which to extract audio, \"\n \"usually an 11-character string.\")\n )\n subparser.add_argument(\n '--channels', type=int, default=1,\n help=(\"The number of audio channels to convert to. The default of 1\"\n \"means audio is converted to mono.\")\n )\n subparser.add_argument(\n '--sample', type=int, default=16000,\n help=(\"The sampling rate in Hz. The default of 16000 is \"\n \"significantly downsampled compared to normal WAVE files; \"\n \"pass 44100 for the usual sampling rate.\")\n )\n return convert_youtube_audio", - "docstring": "Sets up a subparser to convert YouTube audio files.\n\n Adds the compulsory `--youtube-id` flag as well as the optional\n `sample` and `channels` flags.\n\n Parameters\n ----------\n subparser : :class:`argparse.ArgumentParser`\n Subparser handling the `youtube_audio` command." - }, - { - "code": "def resource_name(self, resource):\n if not(resource in self.contents['files']):\n raise ValueError(\"No such resource %r in refpkg\" % (resource,))\n return self.contents['files'][resource]", - "docstring": "Return the name of the file within the reference package for a\n particular named resource." - }, - { - "code": "def multi_analysis(event_collection, analyses, timeframe=None, interval=None, timezone=None,\n filters=None, group_by=None, order_by=None, max_age=None, limit=None):\n _initialize_client_from_environment()\n return _client.multi_analysis(event_collection=event_collection, timeframe=timeframe,\n interval=interval, timezone=timezone, filters=filters,\n group_by=group_by, order_by=order_by, analyses=analyses,\n max_age=max_age, limit=limit)", - "docstring": "Performs a multi-analysis query\n\n Returns a dictionary of analysis results.\n\n :param event_collection: string, the name of the collection to query\n :param analyses: dict, the types of analyses you'd like to run. example:\n {\"total money made\":{\"analysis_type\":\"sum\",\"target_property\":\"purchase.price\",\n \"average price\":{\"analysis_type\":\"average\",\"target_property\":\"purchase.price\"}\n :param timeframe: string or dict, the timeframe in which the events\n happened example: \"previous_7_days\"\n :param interval: string, the time interval used for measuring data over\n time example: \"daily\"\n :param timezone: int, the timezone you'd like to use for the timeframe\n and interval in seconds\n :param filters: array of dict, contains the filters you'd like to apply to the data\n example: [{\"property_name\":\"device\", \"operator\":\"eq\", \"property_value\":\"iPhone\"}]\n :param group_by: string or array of strings, the name(s) of the properties you would\n like to group you results by. example: \"customer.id\" or [\"browser\",\"operating_system\"]\n :param order_by: dictionary or list of dictionary objects containing the property_name(s)\n to order by and the desired direction(s) of sorting.\n Example: {\"property_name\":\"result\", \"direction\":keen.direction.DESCENDING}\n May not be used without a group_by specified.\n :param limit: positive integer limiting the displayed results of a query using order_by\n :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're\n willing to trade for increased query performance, in seconds" - }, - { - "code": "def _read_repos(conf_file, repos, filename, regex):\n for line in conf_file:\n line = salt.utils.stringutils.to_unicode(line)\n if not regex.search(line):\n continue\n repo = _create_repo(line, filename)\n if repo['uri'] not in repos:\n repos[repo['uri']] = [repo]", - "docstring": "Read repos from configuration file" - }, - { - "code": "def to_ufo_family_user_data(self, ufo):\n if not self.use_designspace:\n ufo.lib[FONT_USER_DATA_KEY] = dict(self.font.userData)", - "docstring": "Set family-wide user data as Glyphs does." - }, - { - "code": "def rows(self):\n return map(\n lambda o: self._meta.row_class(self, o),\n self.paginator.page(self._meta.page).object_list )", - "docstring": "Return the list of object on the active page." - }, - { - "code": "def _download_linkode(self):\n linkode_id = self.url.split(\"/\")[-1]\n if linkode_id.startswith(\"\n linkode_id = linkode_id[1:]\n url = \"https://linkode.org/api/1/linkodes/\" + linkode_id\n req = request.Request(url, headers=self.HEADERS_JSON)\n resp = request.urlopen(req)\n raw = resp.read()\n data = json.loads(raw.decode(\"utf8\"))\n content = data['content']\n return content", - "docstring": "Download content from Linkode pastebin." - }, - { - "code": "def type_with_ranges(self, tchain, p_elem, rangekw, gen_data):\n ranges = self.get_ranges(tchain, rangekw)\n if not ranges: return p_elem.subnode(gen_data())\n if len(ranges) > 1:\n p_elem = SchemaNode.choice(p_elem)\n p_elem.occur = 2\n for r in ranges:\n d_elem = gen_data()\n for p in self.range_params(r, rangekw):\n d_elem.subnode(p)\n p_elem.subnode(d_elem)", - "docstring": "Handle types with 'range' or 'length' restrictions.\n\n `tchain` is the chain of type definitions from which the\n ranges may need to be extracted. `rangekw` is the statement\n keyword determining the range type (either 'range' or\n 'length'). `gen_data` is a function that generates the\n output schema node (a RELAX NG pattern)." - }, - { - "code": "def contains(self, data):\n bfo = BitFieldOperation(self.database, self.key)\n for bit_index in self._get_seeds(data):\n bfo.get('u1', bit_index)\n return all(bfo.execute())", - "docstring": "Check if an item has been added to the bloomfilter.\n\n :param bytes data: a bytestring representing the item to check.\n :returns: a boolean indicating whether or not the item is present in\n the bloomfilter. False-positives are possible, but a negative\n return value is definitive." - }, - { - "code": "def get_file(profile, branch, file_path):\n branch_sha = get_branch_sha(profile, branch)\n tree = get_files_in_branch(profile, branch_sha)\n match = None\n for item in tree:\n if item.get(\"path\") == file_path:\n match = item\n break\n file_sha = match.get(\"sha\")\n blob = blobs.get_blob(profile, file_sha)\n content = blob.get(\"content\")\n decoded_content = b64decode(content)\n return decoded_content.decode(\"utf-8\")", - "docstring": "Get a file from a branch.\n\n Args:\n\n profile\n A profile generated from ``simplygithub.authentication.profile``.\n Such profiles tell this module (i) the ``repo`` to connect to,\n and (ii) the ``token`` to connect with.\n\n branch\n The name of a branch.\n\n file_path\n The path of the file to fetch.\n\n Returns:\n The (UTF-8 encoded) content of the file, as a string." - }, - { - "code": "def flip_coords(X,loop):\n if(loop[0]==1):\n return np.array(map(lambda i: np.array([i[2],i[1],i[0],i[5],i[4],i[3]]),X))\n else:\n return X", - "docstring": "Align circulation with z-axis" - }, - { - "code": "def _required_idiom(tag_name, index, notfoundmsg):\n cond = \"\"\n if index > 0:\n cond = \" or len(el) - 1 < %d\" % index\n tag_name = str(tag_name)\n output = IND + \"if not el%s:\\n\" % cond\n output += IND + IND + \"raise UserWarning(\\n\"\n output += IND + IND + IND + \"%s +\\n\" % repr(notfoundmsg.strip() + \"\\n\")\n output += IND + IND + IND + repr(\"Tag name: \" + tag_name) + \" + '\\\\n' +\\n\"\n output += IND + IND + IND + \"'El:' + str(el) + '\\\\n' +\\n\"\n output += IND + IND + IND + \"'Dom:' + str(dom)\\n\"\n output += IND + IND + \")\\n\\n\"\n return output + IND + \"el = el[%d]\\n\\n\" % index", - "docstring": "Generate code, which make sure that `tag_name` has enoug items.\n\n Args:\n tag_name (str): Name of the container.\n index (int): Index of the item you want to obtain from container.\n notfoundmsg (str): Raise :class:`.UserWarning` with debug data and\n following message.\n\n Returns:\n str: Python code." - }, - { - "code": "def marvcli_user_rm(ctx, username):\n app = create_app()\n try:\n app.um.user_rm(username)\n except ValueError as e:\n ctx.fail(e.args[0])", - "docstring": "Remove a user" - }, - { - "code": "def cpf(self, with_mask: bool = True) -> str:\n def get_verifying_digit_cpf(cpf, peso):\n soma = 0\n for index, digit in enumerate(cpf):\n soma += digit * (peso - index)\n resto = soma % 11\n if resto == 0 or resto == 1 or resto >= 11:\n return 0\n return 11 - resto\n cpf_without_dv = [self.random.randint(0, 9) for _ in range(9)]\n first_dv = get_verifying_digit_cpf(cpf_without_dv, 10)\n cpf_without_dv.append(first_dv)\n second_dv = get_verifying_digit_cpf(cpf_without_dv, 11)\n cpf_without_dv.append(second_dv)\n cpf = ''.join([str(i) for i in cpf_without_dv])\n if with_mask:\n return cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]\n return cpf", - "docstring": "Get a random CPF.\n\n :param with_mask: Use CPF mask (###.###.###-##).\n :returns: Random CPF.\n\n :Example:\n 001.137.297-40" - }, - { - "code": "def recv_exactly(self, n, timeout='default'):\n self._print_recv_header(\n '======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)\n return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)", - "docstring": "Recieve exactly n bytes\n\n Aliases: read_exactly, readexactly, recvexactly" - }, - { - "code": "async def node(self, node_id: str) -> dict:\n return await self._request('get', node_id, base_url=NODE_URL_SCAFFOLD)", - "docstring": "Return data from a node by its ID." - }, - { - "code": "def set_default_backend(name: str):\n global _default_backend\n if name == \"bokeh\":\n raise RuntimeError(\"Support for bokeh has been discontinued. At some point, we may return to support holoviews.\")\n if not name in backends:\n raise RuntimeError(\"Backend {0} is not supported and cannot be set as default.\".format(name))\n _default_backend = name", - "docstring": "Choose a default backend." - }, - { - "code": "def wait_for_at_least_one_message(self, channel):\n unpacker = msgpack.Unpacker(encoding='utf-8')\n while True:\n try:\n start = time.time()\n chunk = self.ssh_channel[channel].recv(1024)\n end = time.time()\n self.read_speeds.append( len(chunk) / (end-start) )\n if len(self.read_speeds) > 20:\n self.read_speeds = self.read_speeds[10:]\n if chunk == b'':\n self.connection_error(channel, 'Connection broken w')\n return False\n except Exception as error:\n self.connection_error(channel, error)\n raise\n unpacker.feed(chunk)\n messages = [m for m in unpacker]\n if messages:\n return messages", - "docstring": "Reads until we receive at least one message we can unpack. Return all found messages." - }, - { - "code": "def trigger(self, event: str, *args: T.Any, **kw: T.Any) -> bool:\n callbacks = list(self._events.get(event, []))\n if not callbacks:\n return False\n for callback in callbacks:\n callback(*args, **kw)\n return True", - "docstring": "Triggers all handlers which are subscribed to an event.\n Returns True when there were callbacks to execute, False otherwise." - }, - { - "code": "def compute_cheby_coeff(f, m=30, N=None, *args, **kwargs):\n r\n G = f.G\n i = kwargs.pop('i', 0)\n if not N:\n N = m + 1\n a_arange = [0, G.lmax]\n a1 = (a_arange[1] - a_arange[0]) / 2\n a2 = (a_arange[1] + a_arange[0]) / 2\n c = np.zeros(m + 1)\n tmpN = np.arange(N)\n num = np.cos(np.pi * (tmpN + 0.5) / N)\n for o in range(m + 1):\n c[o] = 2. / N * np.dot(f._kernels[i](a1 * num + a2),\n np.cos(np.pi * o * (tmpN + 0.5) / N))\n return c", - "docstring": "r\"\"\"\n Compute Chebyshev coefficients for a Filterbank.\n\n Parameters\n ----------\n f : Filter\n Filterbank with at least 1 filter\n m : int\n Maximum order of Chebyshev coeff to compute\n (default = 30)\n N : int\n Grid order used to compute quadrature\n (default = m + 1)\n i : int\n Index of the Filterbank element to compute\n (default = 0)\n\n Returns\n -------\n c : ndarray\n Matrix of Chebyshev coefficients" - }, - { - "code": "def sendRequest(self, name, args):\n (respEvt, id) = self.newResponseEvent()\n self.sendMessage({\"id\":id, \"method\":name, \"params\": args})\n return respEvt", - "docstring": "sends a request to the peer" - }, - { - "code": "def create(gandi, private_key, certificate, certificate_id):\n if not certificate and not certificate_id:\n gandi.echo('One of --certificate or --certificate-id is needed.')\n return\n if certificate and certificate_id:\n gandi.echo('Only one of --certificate or --certificate-id is needed.')\n if os.path.isfile(private_key):\n with open(private_key) as fhandle:\n private_key = fhandle.read()\n if certificate:\n if os.path.isfile(certificate):\n with open(certificate) as fhandle:\n certificate = fhandle.read()\n else:\n cert = gandi.certificate.info(certificate_id)\n certificate = gandi.certificate.pretty_format_cert(cert)\n result = gandi.hostedcert.create(private_key, certificate)\n output_keys = ['id', 'subject', 'date_created', 'date_expire',\n 'fqdns', 'vhosts']\n output_hostedcert(gandi, result, output_keys)\n return result", - "docstring": "Create a new hosted certificate." - }, - { - "code": "def pretty_print_model(devicemodel):\n PRETTY_PRINT_MODEL =\n logging.info(PRETTY_PRINT_MODEL % devicemodel)\n if 'traits' in devicemodel:\n for trait in devicemodel['traits']:\n logging.info(' Trait %s' % trait)\n else:\n logging.info('No traits')\n logging.info('')", - "docstring": "Prints out a device model in the terminal by parsing dict." - }, - { - "code": "def load_objects(self, dirs=[], callwith={}):\n for d in dirs:\n contents = ls(d)\n for t in contents:\n first = join(d, t)\n if t.startswith('.') or os.path.isfile(first):\n continue\n t_l = t.lower()\n for fl in ls(first):\n full = join(first, fl)\n if fl.startswith('.') or os.path.isdir(full):\n continue\n fl_n = fl.lower().rsplit('.', 1)[0]\n ty = guess_type(full)\n if ty == T_IMAGE:\n self.load_image(full, fl_n, t)\n elif ty == T_SOUND:\n self.load_sound(full, fl_n, t)\n elif ty == T_MUSIC:\n self.load_music(full, fl_n, t)\n elif ty == T_CODE and fl_n == '__init__':\n self.load_code(d, t, callwith)\n elif ty == T_PLAYLIST:\n self.load_playlist(full, fl_n)", - "docstring": "Call this to load resources from each dir in @dirs. Code resources will \n receive @callwith as an argument." - }, - { - "code": "def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None,\n numPartitions=None, filterFunc=None):\n self._validate_window_param(windowDuration, slideDuration)\n if numPartitions is None:\n numPartitions = self._sc.defaultParallelism\n reduced = self.reduceByKey(func, numPartitions)\n if invFunc:\n def reduceFunc(t, a, b):\n b = b.reduceByKey(func, numPartitions)\n r = a.union(b).reduceByKey(func, numPartitions) if a else b\n if filterFunc:\n r = r.filter(filterFunc)\n return r\n def invReduceFunc(t, a, b):\n b = b.reduceByKey(func, numPartitions)\n joined = a.leftOuterJoin(b, numPartitions)\n return joined.mapValues(lambda kv: invFunc(kv[0], kv[1])\n if kv[1] is not None else kv[0])\n jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer)\n jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer)\n if slideDuration is None:\n slideDuration = self._slideDuration\n dstream = self._sc._jvm.PythonReducedWindowedDStream(\n reduced._jdstream.dstream(),\n jreduceFunc, jinvReduceFunc,\n self._ssc._jduration(windowDuration),\n self._ssc._jduration(slideDuration))\n return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)\n else:\n return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)", - "docstring": "Return a new DStream by applying incremental `reduceByKey` over a sliding window.\n\n The reduced value of over a new window is calculated using the old window's reduce value :\n 1. reduce the new values that entered the window (e.g., adding new counts)\n 2. \"inverse reduce\" the old values that left the window (e.g., subtracting old counts)\n\n `invFunc` can be None, then it will reduce all the RDDs in window, could be slower\n than having `invFunc`.\n\n @param func: associative and commutative reduce function\n @param invFunc: inverse function of `reduceFunc`\n @param windowDuration: width of the window; must be a multiple of this DStream's\n batching interval\n @param slideDuration: sliding interval of the window (i.e., the interval after which\n the new DStream will generate RDDs); must be a multiple of this\n DStream's batching interval\n @param numPartitions: number of partitions of each RDD in the new DStream.\n @param filterFunc: function to filter expired key-value pairs;\n only pairs that satisfy the function are retained\n set this to null if you do not want to filter" - }, - { - "code": "def dict_filter_update(base, updates):\n base.update((k, v) for k, v in updates.items() if v is not None)", - "docstring": "Update dict with None values filtered out." - }, - { - "code": "def getLabel(self):\n if self._state & self.ST_CLEAN:\n return self._label\n else:\n raise SmiError(\n '%s object not fully initialized' % self.__class__.__name__)", - "docstring": "Returns symbolic path to this MIB variable.\n\n Meaning a sequence of symbolic identifications for each of parent\n MIB objects in MIB tree.\n\n Returns\n -------\n tuple\n sequence of names of nodes in a MIB tree from the top of the tree\n towards this MIB variable.\n\n Raises\n ------\n SmiError\n If MIB variable conversion has not been performed.\n\n Notes\n -----\n Returned sequence may not contain full path to this MIB variable\n if some symbols are now known at the moment of MIB look up.\n\n Examples\n --------\n >>> objectIdentity = ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)\n >>> objectIdentity.resolveWithMib(mibViewController)\n >>> objectIdentity.getOid()\n ('iso', 'org', 'dod', 'internet', 'mgmt', 'mib-2', 'system', 'sysDescr')\n >>>" - }, - { - "code": "def _assign_field(self, assigned_bits, identifier, field_values):\n field = self.fields.get_field(identifier, field_values)\n length = field.length\n if length is None:\n length = int(log(field.max_value, 2)) + 1\n start_at = field.start_at\n if start_at is None:\n start_at = self.length\n for bit in range(0, self.length - length):\n field_bits = ((1 << length) - 1) << bit\n if not (assigned_bits & field_bits):\n start_at = bit\n assigned_bits |= field_bits\n break\n else:\n field_bits = ((1 << length) - 1) << start_at\n if assigned_bits & field_bits:\n raise ValueError(\n \"{}-bit field {} with fixed position does not fit in \"\n \"{}.\".format(\n field.length,\n self.fields.get_field_human_readable(identifier,\n field_values),\n type(self).__name__\n )\n )\n assigned_bits |= field_bits\n if start_at + length <= self.length:\n field.length = length\n field.start_at = start_at\n else:\n raise ValueError(\n \"{}-bit field {} does not fit in {}.\".format(\n field.length,\n self.fields.get_field_human_readable(identifier,\n field_values),\n type(self).__name__\n )\n )\n return assigned_bits", - "docstring": "For internal use only. Assign a length and position to a field\n which may have either one of these values missing.\n\n Parameters\n ----------\n assigned_bits : int\n A bit mask of bits already in use by other fields\n identifier : str\n The identifier of the field to assign\n field_values : {identifier: value, ...}\n The values held by various fields (used to access the correct\n identifier)\n\n Returns\n -------\n int\n Mask of which bits which are assigned to fields after this field\n has been assigned." - }, - { - "code": "def follow_topic_from_config():\n config = get_config()['ResultTopicBolt']\n kafka_class = import_name(config['kafka_class'])\n return follow_topic(kafka_class, config['topic'], **config['kafka_init'])", - "docstring": "Read kafka config, then dispatch to `follow_topic`." - }, - { - "code": "def customer_source_webhook_handler(event):\n\tcustomer_data = event.data.get(\"object\", {})\n\tsource_type = customer_data.get(\"object\", {})\n\tif source_type == SourceType.card:\n\t\tif event.verb.endswith(\"deleted\") and customer_data:\n\t\t\tmodels.Card.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n\t\t\tmodels.DjstripePaymentMethod.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n\t\telse:\n\t\t\t_handle_crud_like_event(target_cls=models.Card, event=event)", - "docstring": "Handle updates to customer payment-source objects.\n\n\tDocs: https://stripe.com/docs/api#customer_object-sources." - }, - { - "code": "def prelu(inp, base_axis=1, shared=True, fix_parameters=False):\n shape = tuple() if shared else (inp.shape[base_axis],)\n w = get_parameter_or_create(\"slope\", shape,\n ConstantInitializer(-1), True, not fix_parameters)\n return F.prelu(inp, w, base_axis)", - "docstring": "Parametrized Rectified Linear Unit function defined as\n\n .. math::\n y_i = \\max(0, x_i) + w_i \\min(0, -x_i)\n\n where negative slope :math:`w` is learned and can vary across channels (an\n axis specified with base_axis). Weights are initialized with :math:`-1`.\n\n Args:\n x(~nnabla.Variable): N-D array as input\n base_axis(int): Dimensions up to base_axis is treated as sample dimension.\n shared(bool): Use shared weight value or not\n fix_parameters (bool): When set to `True`, the negative slope values\n will not be updated.\n\n Returns:\n ~nnabla.Variable: N-D array." - }, - { - "code": "def to_dict_formatter(row, cursor):\n if not row:\n return row\n if cursor is None or cursor.description is None:\n raise RuntimeError(\"No DB-API cursor or description available.\")\n column_names = (d[0] for d in cursor.description)\n return {name: value for value, name in zip(row, column_names)}", - "docstring": "Take a row and use the column names from cursor to turn the row into a\n dictionary.\n\n Note: converts column names to lower-case!\n\n :param row: one database row, sequence of column values\n :type row: (value, ...)\n :param cursor: the cursor which was used to make the query\n :type cursor: DB-API cursor object" - }, - { - "code": "def insert(self, index, chord):\n self._chords.insert(index, as_chord(chord))", - "docstring": "Insert a chord to chord progressions\n\n :param int index: Index to insert a chord\n :type chord: str|pychord.Chord\n :param chord: A chord to insert\n :return:" - }, - { - "code": "def get_device_info(self):\n device_info = {}\n url = '%s/ISAPI/System/deviceInfo' % self.root_url\n using_digest = False\n try:\n response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)\n if response.status_code == requests.codes.unauthorized:\n _LOGGING.debug('Basic authentication failed. Using digest.')\n self.hik_request.auth = HTTPDigestAuth(self.usr, self.pwd)\n using_digest = True\n response = self.hik_request.get(url)\n if response.status_code == requests.codes.not_found:\n _LOGGING.debug('Using alternate deviceInfo URL.')\n url = '%s/System/deviceInfo' % self.root_url\n response = self.hik_request.get(url)\n if not using_digest and response.status_code == requests.codes.unauthorized:\n _LOGGING.debug('Basic authentication failed. Using digest.')\n self.hik_request.auth = HTTPDigestAuth(self.usr, self.pwd)\n using_digest = True\n response = self.hik_request.get(url)\n except (requests.exceptions.RequestException,\n requests.exceptions.ConnectionError) as err:\n _LOGGING.error('Unable to fetch deviceInfo, error: %s', err)\n return None\n if response.status_code == requests.codes.unauthorized:\n _LOGGING.error('Authentication failed')\n return None\n if response.status_code != requests.codes.ok:\n _LOGGING.debug('Unable to fetch device info.')\n return None\n try:\n tree = ET.fromstring(response.text)\n nmsp = tree.tag.split('}')[0].strip('{')\n self.namespace = nmsp if nmsp.startswith('http') else XML_NAMESPACE\n _LOGGING.debug('Using Namespace: %s', self.namespace)\n for item in tree:\n tag = item.tag.split('}')[1]\n device_info[tag] = item.text\n return device_info\n except AttributeError as err:\n _LOGGING.error('Entire response: %s', response.text)\n _LOGGING.error('There was a problem: %s', err)\n return None", - "docstring": "Parse deviceInfo into dictionary." - }, - { - "code": "def get_failures_fixed_by_commit():\n failures = defaultdict(list)\n option_collection_map = models.OptionCollection.objects.get_option_collection_map()\n fixed_by_commit_data_set = models.JobNote.objects.filter(\n failure_classification=2,\n created__gt=timezone.now() - timedelta(days=SETA_FIXED_BY_COMMIT_DAYS),\n text__isnull=False,\n job__repository__name__in=SETA_FIXED_BY_COMMIT_REPOS\n ).exclude(\n job__signature__build_platform__in=SETA_UNSUPPORTED_PLATFORMS\n ).exclude(\n text=\"\"\n ).select_related('job', 'job__signature', 'job__job_type')\n if not fixed_by_commit_data_set.exists():\n logger.warning(\"We couldn't find any fixed-by-commit jobs\")\n return failures\n for job_note in fixed_by_commit_data_set.iterator():\n revision_id = job_note.text.strip('/')\n revision_id = revision_id.split('/')[-1]\n if not revision_id or len(revision_id) < 12:\n continue\n try:\n if job_note.job.signature.build_system_type != 'buildbot':\n if not job_note.job.job_type.name.startswith(tuple(SETA_SUPPORTED_TC_JOBTYPES)):\n continue\n testtype = parse_testtype(\n build_system_type=job_note.job.signature.build_system_type,\n job_type_name=job_note.job.job_type.name,\n platform_option=job_note.job.get_platform_option(option_collection_map),\n ref_data_name=job_note.job.signature.name,\n )\n if testtype:\n if is_job_blacklisted(testtype):\n continue\n else:\n logger.warning('We were unable to parse %s/%s',\n job_note.job.job_type.name, job_note.job.signature.name)\n continue\n failures[revision_id].append(unique_key(\n testtype=testtype,\n buildtype=job_note.job.get_platform_option(option_collection_map),\n platform=job_note.job.signature.build_platform\n ))\n except models.Job.DoesNotExist:\n logger.warning('job_note %s has no job associated to it', job_note.id)\n continue\n logger.warning(\"Number of fixed_by_commit revisions: %s\", len(failures))\n return failures", - "docstring": "Return all job failures annotated with \"fixed by commit\" grouped by reason given for annotation.\n\n It returns a dictionary with a revision or bug ID as the key (bug ID is used for\n intermittent failures and the revision is used for real failures). For SETA's purposes\n we only care about revisions (real failures).\n The failures for *real failures* will contain all jobs that have been starred as \"fixed by commit\".\n\n Notice that the data does not tell you on which repository a root failure was fixed.\n\n For instance, in the raw data you might see a reference to 9fa614d8310d which is a back out\n and it is referenced by 12 starred jobs:\n https://treeherder.mozilla.org/#/jobs?repo=autoland&filter-searchStr=android%20debug%20cpp&tochange=9fa614d8310db9aabe85cc3c3cff6281fe1edb0c\n The raw data will show those 12 jobs.\n\n The returned data will look like this:\n {\n \"44d29bac3654\": [\n [\"android-4-0-armv7-api15\", \"opt\", \"android-lint\"],\n [\"android-4-0-armv7-api15\", \"opt\", \"android-api-15-gradle-dependencies\"],\n ]\n }" - }, - { - "code": "def value( self, node, parent=None ):\n try:\n return node['contribution']\n except KeyError, err:\n contribution = int(node.get('totsize',0)/float( len(node.get('parents',())) or 1))\n node['contribution'] = contribution\n return contribution", - "docstring": "Return value used to compare size of this node" - }, - { - "code": "def bs_progress_bar(*args, **kwargs):\n bars = []\n contexts = kwargs.get(\n 'contexts',\n ['', 'success', 'info', 'warning', 'danger']\n )\n for ndx, arg in enumerate(args):\n bars.append(\n dict(percent=arg,\n context=kwargs.get('context', contexts[ndx % len(contexts)]))\n )\n return {\n 'bars': bars,\n 'text': kwargs.pop('text', False),\n 'striped': kwargs.pop('striped', False),\n 'animated': kwargs.pop('animated', False),\n 'min_val': kwargs.pop('min_val', 0),\n 'max_val': kwargs.pop('max_val', 100),\n }", - "docstring": "A Standard Bootstrap Progress Bar.\n\n http://getbootstrap.com/components/#progress\n\n param args (Array of Numbers: 0-100): Percent of Progress Bars\n param context (String): Adds 'progress-bar-{context} to the class attribute\n param contexts (Array of Strings): Cycles through contexts for stacked bars\n param text (String): True: shows value within the bar, False: uses sr span\n param striped (Boolean): Adds 'progress-bar-striped' to the class attribute\n param animated (Boolean): Adds 'active' to the class attribute if striped\n param min_val (0): Used for the aria-min value\n param max_val (0): Used for the aria-max value" - }, - { - "code": "def browse_clicked(self, widget, data=None):\n text = self.gui_helper.create_file_chooser_dialog(\"Please select directory\", self.path_window)\n if text is not None:\n data.set_text(text)", - "docstring": "Function sets the directory to entry" - }, - { - "code": "def migrateDown(self):\n subStore = self.store.parent.getItemByID(self.store.idInParent)\n ssph = self.store.parent.findUnique(\n _SubSchedulerParentHook,\n _SubSchedulerParentHook.subStore == subStore,\n default=None)\n if ssph is not None:\n te = self.store.parent.findUnique(TimedEvent,\n TimedEvent.runnable == ssph,\n default=None)\n if te is not None:\n te.deleteFromStore()\n ssph.deleteFromStore()", - "docstring": "Remove the components in the site store for this SubScheduler." - }, - { - "code": "def expired(self):\n if self._expired_latch:\n return True\n self._check_time_backwards()\n if time.time() > self.end:\n self._expired_latch = True\n return True\n return False", - "docstring": "Boolean property if this timeout has expired" - }, - { - "code": "def add_email_address(self, email, hidden=None):\n existing_emails = get_value(self.obj, 'email_addresses', [])\n found_email = next(\n (existing_email for existing_email in existing_emails if existing_email.get('value') == email),\n None\n )\n if found_email is None:\n new_email = {'value': email}\n if hidden is not None:\n new_email['hidden'] = hidden\n self._append_to('email_addresses', new_email)\n elif hidden is not None:\n found_email['hidden'] = hidden", - "docstring": "Add email address.\n\n Args:\n :param email: email of the author.\n :type email: string\n\n :param hidden: if email is public or not.\n :type hidden: boolean" - }, - { - "code": "def process_tick(self, tick_tup):\n self._tick_counter += 1\n self.ack(tick_tup)\n if self._tick_counter > self.ticks_between_batches and self._batches:\n self.process_batches()\n self._tick_counter = 0", - "docstring": "Increment tick counter, and call ``process_batch`` for all current\n batches if tick counter exceeds ``ticks_between_batches``.\n\n See :class:`pystorm.component.Bolt` for more information.\n\n .. warning::\n This method should **not** be overriden. If you want to tweak\n how Tuples are grouped into batches, override ``group_key``." - }, - { - "code": "def color_lerp(\n c1: Tuple[int, int, int], c2: Tuple[int, int, int], a: float\n) -> Color:\n return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a))", - "docstring": "Return the linear interpolation between two colors.\n\n ``a`` is the interpolation value, with 0 returing ``c1``,\n 1 returning ``c2``, and 0.5 returing a color halfway between both.\n\n Args:\n c1 (Union[Tuple[int, int, int], Sequence[int]]):\n The first color. At a=0.\n c2 (Union[Tuple[int, int, int], Sequence[int]]):\n The second color. At a=1.\n a (float): The interpolation value,\n\n Returns:\n Color: The interpolated Color." - }, - { - "code": "def repeat(self, count=2):\n try:\n return self.__class__(''.join((\n str(self),\n self.last_code() * (count - 1),\n )))\n except TypeError as ex:\n raise TypeError(\n '`count` must be an integer. Got: {!r}'.format(count)\n ) from ex", - "docstring": "Repeat the last control code a number of times.\n Returns a new Control with this one's data and the repeated code." - }, - { - "code": "def get_nonrentrant_counter(self, key):\n prefix = NONRENTRANT_STORAGE_OFFSET\n if key in self._nonrentrant_keys:\n return self._nonrentrant_keys[key]\n else:\n counter = prefix + self._nonrentrant_counter\n self._nonrentrant_keys[key] = counter\n self._nonrentrant_counter += 1\n return counter", - "docstring": "Nonrentrant locks use a prefix with a counter to minimise deployment cost of a contract." - }, - { - "code": "def get_subscribers(obj):\n ctype = ContentType.objects.get_for_model(obj)\n return Subscription.objects.filter(content_type=ctype, object_id=obj.pk)", - "docstring": "Returns the subscribers for a given object.\n\n :param obj: Any object." - }, - { - "code": "def SetStorageProfiler(self, storage_profiler):\n self._storage_profiler = storage_profiler\n if self._storage_file:\n self._storage_file.SetStorageProfiler(storage_profiler)", - "docstring": "Sets the storage profiler.\n\n Args:\n storage_profiler (StorageProfiler): storage profiler." - }, - { - "code": "def update(self, client=None):\n client = self._require_client(client)\n query_params = self._query_params\n query_params[\"projection\"] = \"full\"\n api_response = client._connection.api_request(\n method=\"PUT\",\n path=self.path,\n data=self._properties,\n query_params=query_params,\n _target_object=self,\n )\n self._set_properties(api_response)", - "docstring": "Sends all properties in a PUT request.\n\n Updates the ``_properties`` with the response from the backend.\n\n If :attr:`user_project` is set, bills the API request to that project.\n\n :type client: :class:`~google.cloud.storage.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current object." - }, - { - "code": "def send(self, auth_header=None, callback=None, **data):\n message = self.encode(data)\n return self.send_encoded(message, auth_header=auth_header, callback=callback)", - "docstring": "Serializes the message and passes the payload onto ``send_encoded``." - }, - { - "code": "def get_prep_value(self, value):\n value = super(TimeZoneField, self).get_prep_value(value)\n if isinstance(value, tzinfo):\n return value.zone\n return value", - "docstring": "Converts timezone instances to strings for db storage." - }, - { - "code": "def read_config(filename=None):\n if not os.path.exists(filename):\n raise IOError('Impossibile trovare il filename %s' % filename)\n shares = []\n config = ConfigParser()\n config.read(filename)\n for share_items in [config.items(share_title) for share_title in\n config.sections()]:\n dict_share = {}\n for key, value in share_items:\n if key == 'hostname' and '@' in value:\n hostname, credentials = (item[::-1] for item\n in value[::-1].split('@', 1))\n dict_share.update({key: hostname})\n credentials = tuple(cred.lstrip('\"').rstrip('\"')\n for cred in credentials.split(':', 1))\n dict_share.update({'username': credentials[0]})\n if len(credentials) > 1:\n dict_share.update({'password': credentials[1]})\n continue\n dict_share.update({key: value})\n shares.append(dict_share)\n return shares", - "docstring": "Read a config filename into .ini format and return dict of shares.\n\n Keyword arguments:\n filename -- the path of config filename (default None)\n\n Return dict." - }, - { - "code": "def write_output(job_id, content, conn=None):\n output_job = get_job_by_id(job_id, conn)\n results = {}\n if output_job is not None:\n entry = {\n OUTPUTJOB_FIELD: output_job,\n CONTENT_FIELD: content\n }\n results = RBO.insert(entry, conflict=RDB_REPLACE).run(conn)\n return results", - "docstring": "writes output to the output table\n\n :param job_id: id of the job\n :param content: output to write\n :param conn:" - }, - { - "code": "def monkey_patch_flask_security():\n if utils.get_hmac != get_hmac:\n utils.get_hmac = get_hmac\n if utils.hash_password != hash_password:\n utils.hash_password = hash_password\n changeable.hash_password = hash_password\n recoverable.hash_password = hash_password\n registerable.hash_password = hash_password\n def patch_do_nothing(*args, **kwargs):\n pass\n LoginManager._set_cookie = patch_do_nothing\n def patch_reload_anonym(self, *args, **kwargs):\n self.reload_user()\n LoginManager._load_from_header = patch_reload_anonym\n LoginManager._load_from_request = patch_reload_anonym", - "docstring": "Monkey-patch Flask-Security." - }, - { - "code": "def vsound(H):\n T = temperature(H)\n a = np.sqrt(gamma * R * T)\n return a", - "docstring": "Speed of sound" - }, - { - "code": "def filter_pages(pages, pagenum, pagename):\n if pagenum:\n try:\n pages = [list(pages)[pagenum - 1]]\n except IndexError:\n raise IndexError('Invalid page number: %d' % pagenum)\n if pagename:\n pages = [page for page in pages if page.name == pagename]\n if pages == []:\n raise IndexError('Page not found: pagename=%s' % pagename)\n return pages", - "docstring": "Choices pages by pagenum and pagename" - }, - { - "code": "def guess_function_name(next_line, regexps=FUNCTION_REGEXPS):\n for regexp in regexps:\n match = re.search(regexp, next_line)\n if match:\n return match.group(1)\n return None", - "docstring": "Attempt to determine the function name from the first code line\n following the comment. The patterns recognized are described by\n `regexps`, which defaults to FUNCTION_REGEXPS. If a match is successful, \n returns the function name. Otherwise, returns None." - }, - { - "code": "def __dispatch_request(self, handler_input):\n request_handler_chain = None\n for mapper in self.request_mappers:\n request_handler_chain = mapper.get_request_handler_chain(\n handler_input)\n if request_handler_chain is not None:\n break\n if request_handler_chain is None:\n raise DispatchException(\n \"Unable to find a suitable request handler\")\n request_handler = request_handler_chain.request_handler\n supported_handler_adapter = None\n for adapter in self.handler_adapters:\n if adapter.supports(request_handler):\n supported_handler_adapter = adapter\n break\n if supported_handler_adapter is None:\n raise DispatchException(\n \"Unable to find a suitable request adapter\")\n local_request_interceptors = request_handler_chain.request_interceptors\n for interceptor in local_request_interceptors:\n interceptor.process(handler_input=handler_input)\n output = supported_handler_adapter.execute(\n handler_input=handler_input, handler=request_handler)\n local_response_interceptors = (\n request_handler_chain.response_interceptors)\n for response_interceptor in local_response_interceptors:\n response_interceptor.process(\n handler_input=handler_input, dispatch_output=output)\n return output", - "docstring": "Process the request and return handler output.\n\n When the method is invoked, using the registered list of\n :py:class:`RequestMapper`, a Handler Chain is found that can\n handle the request. The handler invocation is delegated to the\n supported :py:class:`HandlerAdapter`. The registered\n request interceptors in the handler chain are processed before\n executing the handler. The registered response interceptors in\n the handler chain are processed after executing the handler.\n\n :param handler_input: generic input to the dispatcher containing\n incoming request and other context.\n :type handler_input: Input\n :return: Output from the 'handle' method execution of the\n supporting handler.\n :rtype: Union[None, Output]\n :raises DispatchException if there is no supporting\n handler chain or adapter" - }, - { - "code": "def add(self, properties):\n new_nic = super(FakedNicManager, self).add(properties)\n partition = self.parent\n if 'virtual-switch-uri' in new_nic.properties:\n vswitch_uri = new_nic.properties['virtual-switch-uri']\n try:\n vswitch = self.hmc.lookup_by_uri(vswitch_uri)\n except KeyError:\n raise InputError(\"The virtual switch specified in the \"\n \"'virtual-switch-uri' property does not \"\n \"exist: {!r}\".format(vswitch_uri))\n connected_uris = vswitch.properties['connected-vnic-uris']\n if new_nic.uri not in connected_uris:\n connected_uris.append(new_nic.uri)\n if 'device-number' not in new_nic.properties:\n devno = partition.devno_alloc()\n new_nic.properties['device-number'] = devno\n assert 'nic-uris' in partition.properties\n partition.properties['nic-uris'].append(new_nic.uri)\n return new_nic", - "docstring": "Add a faked NIC resource.\n\n Parameters:\n\n properties (dict):\n Resource properties.\n\n Special handling and requirements for certain properties:\n\n * 'element-id' will be auto-generated with a unique value across\n all instances of this resource type, if not specified.\n * 'element-uri' will be auto-generated based upon the element ID,\n if not specified.\n * 'class' will be auto-generated to 'nic',\n if not specified.\n * Either 'network-adapter-port-uri' (for backing ROCE adapters) or\n 'virtual-switch-uri'(for backing OSA or Hipersockets adapters) is\n required to be specified.\n * 'device-number' will be auto-generated with a unique value\n within the partition in the range 0x8000 to 0xFFFF, if not\n specified.\n\n This method also updates the 'nic-uris' property in the parent\n faked Partition resource, by adding the URI for the faked NIC\n resource.\n\n This method also updates the 'connected-vnic-uris' property in the\n virtual switch referenced by 'virtual-switch-uri' property,\n and sets it to the URI of the faked NIC resource.\n\n Returns:\n :class:`zhmcclient_mock.FakedNic`: The faked NIC resource.\n\n Raises:\n :exc:`zhmcclient_mock.InputError`: Some issue with the input\n properties." - }, - { - "code": "def featureCounts_chart (self):\n config = {\n 'id': 'featureCounts_assignment_plot',\n 'title': 'featureCounts: Assignments',\n 'ylab': '\n 'cpswitch_counts_label': 'Number of Reads'\n }\n return bargraph.plot(self.featurecounts_data, self.featurecounts_keys, config)", - "docstring": "Make the featureCounts assignment rates plot" - }, - { - "code": "def find_kwupdate_location(self,hdr,keyword):\n kw_list = None\n last_kw = None\n for extn in self.fullhdrs:\n if keyword in extn:\n indx = extn.index(keyword)\n kw_list = list(extn.keys())[:indx]\n break\n if kw_list:\n for kw in kw_list[::-1]:\n if kw in hdr:\n last_kw = kw\n break\n if last_kw is None:\n hdrkeys = list(hdr.keys())\n i = -1\n last_kw = hdrkeys[i]\n while last_kw == 'HISTORY':\n i -= 1\n last_kw = hdrkeys[i]\n return last_kw", - "docstring": "Find the last keyword in the output header that comes before the new\n keyword in the original, full input headers.\n This will rely on the original ordering of keywords from the original input\n files in order to place the updated keyword in the correct location in case\n the keyword was removed from the output header prior to calling this method." - }, - { - "code": "def send(self, msg, timeout=None):\n log.debug(\"We've been asked to write a message to the bus\")\n logger_tx = log.getChild(\"tx\")\n logger_tx.debug(\"sending: %s\", msg)\n started = time.time()\n if timeout is None:\n timeout = 0\n time_left = timeout\n data = build_can_frame(msg)\n while time_left >= 0:\n ready = select.select([], [self.socket], [], time_left)[1]\n if not ready:\n break\n sent = self._send_once(data, msg.channel)\n if sent == len(data):\n return\n data = data[sent:]\n time_left = timeout - (time.time() - started)\n raise can.CanError(\"Transmit buffer full\")", - "docstring": "Transmit a message to the CAN bus.\n\n :param can.Message msg: A message object.\n :param float timeout:\n Wait up to this many seconds for the transmit queue to be ready.\n If not given, the call may fail immediately.\n\n :raises can.CanError:\n if the message could not be written." - }, - { - "code": "def get_file_extension(filepath):\n _ext = os.path.splitext(filepath)[-1]\n if _ext:\n return _ext[1:] if _ext.startswith('.') else _ext\n return ''", - "docstring": "Copy if anyconfig.utils.get_file_extension is not available.\n\n >>> get_file_extension(\"/a/b/c\")\n ''\n >>> get_file_extension(\"/a/b.txt\")\n 'txt'\n >>> get_file_extension(\"/a/b/c.tar.xz\")\n 'xz'" - }, - { - "code": "def _init_index(root_dir, schema, index_name):\n index_dir = os.path.join(root_dir, index_name)\n try:\n if not os.path.exists(index_dir):\n os.makedirs(index_dir)\n return create_in(index_dir, schema), index_dir\n else:\n return open_dir(index_dir), index_dir\n except Exception as e:\n logger.error(\"Init error: failed to open search index at: '{}': {} \".format(index_dir, e))\n raise", - "docstring": "Creates new index or opens existing.\n\n Args:\n root_dir (str): root dir where to find or create index.\n schema (whoosh.fields.Schema): schema of the index to create or open.\n index_name (str): name of the index.\n\n Returns:\n tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory." - }, - { - "code": "def invalidate(self, cls, id_field, id_val):\n cache_key, flag_key = self.get_keys(cls, id_field, id_val)\n pipeline = self.redis.pipeline()\n pipeline.delete(cache_key)\n pipeline.delete(flag_key)\n pipeline.execute()", - "docstring": "Invalidate the cache for a given Mongo object by deleting the cached\n data and the cache flag." - }, - { - "code": "def country(self, value=None):\n if value is not None:\n try:\n value = str(value)\n except ValueError:\n raise ValueError('value {} need to be of type str '\n 'for field `country`'.format(value))\n if ',' in value:\n raise ValueError('value should not contain a comma '\n 'for field `country`')\n self._country = value", - "docstring": "Corresponds to IDD Field `country`\n\n Args:\n value (str): value for IDD Field `country`\n if `value` is None it will not be checked against the\n specification and is assumed to be a missing value\n\n Raises:\n ValueError: if `value` is not a valid value" - }, - { - "code": "def draw_residual(x, y, yerr, xerr,\n show_errbars=True, ax=None,\n zero_line=True, grid=True,\n **kwargs):\n from matplotlib import pyplot as plt\n ax = plt.gca() if ax is None else ax\n if show_errbars:\n plotopts = dict(fmt='b.', capsize=0)\n plotopts.update(kwargs)\n pp = ax.errorbar(x, y, yerr, xerr, zorder=0, **plotopts)\n else:\n plotopts = dict(color='k')\n plotopts.update(kwargs)\n pp = ax.bar(x - xerr, y, width=2*xerr, **plotopts)\n if zero_line:\n ax.plot([x[0] - xerr[0], x[-1] + xerr[-1]], [0, 0], 'r-', zorder=2)\n if grid:\n ax.grid(grid)\n return ax", - "docstring": "Draw a residual plot on the axis.\n\n By default, if show_errbars if True, residuals are drawn as blue points\n with errorbars with no endcaps. If show_errbars is False, residuals are\n drawn as a bar graph with black bars.\n\n **Arguments**\n\n - **x** array of numbers, x-coordinates\n\n - **y** array of numbers, y-coordinates\n\n - **yerr** array of numbers, the uncertainty on the y-values\n\n - **xerr** array of numbers, the uncertainty on the x-values\n\n - **show_errbars** If True, draw the data as a bar plot, else as an\n errorbar plot\n\n - **ax** Optional matplotlib axis instance on which to draw the plot\n\n - **zero_line** If True, draw a red line at :math:`y = 0` along the\n full extent in :math:`x`\n\n - **grid** If True, draw gridlines\n\n - **kwargs** passed to ``ax.errorbar`` (if ``show_errbars`` is True) or\n ``ax.bar`` (if ``show_errbars`` if False)\n\n **Returns**\n\n The matplotlib axis instance the plot was drawn on." - }, - { - "code": "def unwrap_json(self, type, json_data):\n try:\n data = json.loads(json_data)\n except ValueError:\n raise RedmineError(json_data)\n try:\n data = data[type]\n except KeyError:\n pass\n return data", - "docstring": "Decodes a json string, and unwraps any 'type' it finds within." - }, - { - "code": "def create_baseline(tag=\"baseline\", config='root'):\n return __salt__['snapper.create_snapshot'](config=config,\n snapshot_type='single',\n description=\"baseline snapshot\",\n cleanup_algorithm=\"number\",\n userdata={\"baseline_tag\": tag})", - "docstring": "Creates a snapshot marked as baseline\n\n tag\n Tag name for the baseline\n\n config\n Configuration name.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' snapper.create_baseline\n salt '*' snapper.create_baseline my_custom_baseline" - }, - { - "code": "def copycat(self,gridUnmapped=None,selectUnmapped=None,sourceColumn=None,\\\n\tsourceNetwork=None,targetColumn=None,targetNetwork=None,verbose=None):\n\t\tPARAMS=set_param(['gridUnmapped','selectUnmapped','sourceColumn',\\\n\t\t'sourceNetwork','targetColumn','targetNetwork'],[gridUnmapped,\\\n\t\tselectUnmapped,sourceColumn,sourceNetwork,targetColumn,targetNetwork])\n\t\tresponse=api(url=self.__url+\"/copycat\", PARAMS=PARAMS, method=\"POST\", verbose=verbose)\n\t\treturn response", - "docstring": "Sets the coordinates for each node in the target network to the coordinates\n\t\tof a matching node in the source network.\n\t\tOptional parameters such as gridUnmapped and selectUnmapped determine\n\t\tthe behavior of target network nodes that could not be matched.\n\n\t\t:param gridUnmapped (string, optional): If this is set to true, any nodes i\n\t\t\tn the target network that could not be matched to a node in the sour\n\t\t\tce network will be laid out in a grid\n\t\t:param selectUnmapped (string, optional): If this is set to true, any nodes\n\t\t\t\tin the target network that could not be matched to a node in the so\n\t\t\turce network will be selected in the target network\n\t\t:param sourceColumn (string): The name of column in the node table used to\n\t\t\tmatch nodes\n\t\t:param sourceNetwork (string): The name of network to get node coordinates\n\t\t\tfrom\n\t\t:param targetColumn (string): The name of column in the node table used to\n\t\t\tmatch nodes\n\t\t:param targetNetwork (string): The name of the network to apply coordinates\n\t\t\t\tto." - }, - { - "code": "def _infer_cell_type(self):\n _cell_type_dict = {'Multi-c-Si': 'multisi',\n 'Mono-c-Si': 'monosi',\n 'Thin Film': 'cigs',\n 'a-Si/nc': 'asi',\n 'CIS': 'cigs',\n 'CIGS': 'cigs',\n '1-a-Si': 'asi',\n 'CdTe': 'cdte',\n 'a-Si': 'asi',\n '2-a-Si': None,\n '3-a-Si': None,\n 'HIT-Si': 'monosi',\n 'mc-Si': 'multisi',\n 'c-Si': 'multisi',\n 'Si-Film': 'asi',\n 'EFG mc-Si': 'multisi',\n 'GaAs': None,\n 'a-Si / mono-Si': 'monosi'}\n if 'Technology' in self.module_parameters.keys():\n cell_type = _cell_type_dict[self.module_parameters['Technology']]\n elif 'Material' in self.module_parameters.keys():\n cell_type = _cell_type_dict[self.module_parameters['Material']]\n else:\n cell_type = None\n return cell_type", - "docstring": "Examines module_parameters and maps the Technology key for the CEC\n database and the Material key for the Sandia database to a common\n list of strings for cell type.\n\n Returns\n -------\n cell_type: str" - }, - { - "code": "def to_dict(self):\n return {\n 'hostname': self.hostname,\n 'port': self.port,\n 'transport': self.transport,\n 'virtual_host': self.virtual_host\n }", - "docstring": "Return a dictionary of the broker stats.\n\n Returns:\n dict: Dictionary of the stats." - }, - { - "code": "def _are_requirejs_deps_loaded(browser, deps):\n script = dedent(u\n).format(deps=json.dumps(list(deps)))\n browser.set_script_timeout(30)\n try:\n result = browser.execute_async_script(script)\n return result == 'Success'\n except TimeoutException:\n return False", - "docstring": "Return a boolean indicating whether all the RequireJS\n dependencies `deps` have loaded on the current page.\n\n `browser` is a WebDriver instance." - }, - { - "code": "def simple_merge(kls, skeletons):\n if len(skeletons) == 0:\n return PrecomputedSkeleton()\n if type(skeletons[0]) is np.ndarray:\n skeletons = [ skeletons ]\n ct = 0\n edges = []\n for skel in skeletons:\n edge = skel.edges + ct\n edges.append(edge)\n ct += skel.vertices.shape[0]\n return PrecomputedSkeleton(\n vertices=np.concatenate([ skel.vertices for skel in skeletons ], axis=0),\n edges=np.concatenate(edges, axis=0),\n radii=np.concatenate([ skel.radii for skel in skeletons ], axis=0),\n vertex_types=np.concatenate([ skel.vertex_types for skel in skeletons ], axis=0),\n segid=skeletons[0].id,\n )", - "docstring": "Simple concatenation of skeletons into one object \n without adding edges between them." - }, - { - "code": "def assembly(self, comments=False, symbolized=True):\n assembly = [ ]\n header = \"\\t.section\\t{section}\\n\\t.align\\t{alignment}\\n\".format(section=self.section,\n alignment=self.binary.section_alignment(self.section)\n )\n if self.addr is not None:\n procedure_name = \"%\n else:\n procedure_name = self._name\n header += \"\\t\n if self._output_function_label:\n if self.addr:\n function_label = self.binary.symbol_manager.new_label(self.addr)\n else:\n function_label = self.binary.symbol_manager.new_label(None, name=procedure_name, is_function=True)\n header += str(function_label) + \"\\n\"\n assembly.append((self.addr, header))\n if self.asm_code:\n s = self.asm_code\n assembly.append((self.addr, s))\n elif self.blocks:\n for b in sorted(self.blocks, key=lambda x:x.addr):\n s = b.assembly(comments=comments, symbolized=symbolized)\n assembly.append((b.addr, s))\n return assembly", - "docstring": "Get the assembly manifest of the procedure.\n\n :param comments:\n :param symbolized:\n :return: A list of tuples (address, basic block assembly), ordered by basic block addresses\n :rtype: list" - }, - { - "code": "def replace(self, key, val, time=0, min_compress_len=0):\n return self._set(\"replace\", key, val, time, min_compress_len)", - "docstring": "Replace existing key with value.\n\n Like L{set}, but only stores in memcache if the key already exists.\n The opposite of L{add}.\n\n @return: Nonzero on success.\n @rtype: int" - }, - { - "code": "def add_domain_name_list(list_name):\n payload = {\"jsonrpc\": \"2.0\",\n \"id\": \"ID0\",\n \"method\": \"add_policy_domain_names_list\",\n \"params\": [{\"list_name\": list_name}]}\n response = __proxy__['bluecoat_sslv.call'](payload, True)\n return _validate_change_result(response)", - "docstring": "Add a list of policy domain names.\n\n list_name(str): The name of the specific policy domain name list to add.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' bluecoat_sslv.add_domain_name_list MyDomainNameList" - }, - { - "code": "def render(self, sphinx_app: Sphinx, context):\n builder: StandaloneHTMLBuilder = sphinx_app.builder\n resource = sphinx_app.env.resources[self.docname]\n context['sphinx_app'] = sphinx_app\n context['widget'] = self\n context['resource'] = resource\n self.make_context(context, sphinx_app)\n template = self.template + '.html'\n html = builder.templates.render(template, context)\n return html", - "docstring": "Given a Sphinx builder and context with sphinx_app in it,\n generate HTML" - }, - { - "code": "def _mod_init(self, low):\n try:\n self.states['{0}.{1}'.format(low['state'], low['fun'])]\n except KeyError:\n return\n minit = '{0}.mod_init'.format(low['state'])\n if low['state'] not in self.mod_init:\n if minit in self.states._dict:\n mret = self.states[minit](low)\n if not mret:\n return\n self.mod_init.add(low['state'])", - "docstring": "Check the module initialization function, if this is the first run\n of a state package that has a mod_init function, then execute the\n mod_init function in the state module." - }, - { - "code": "def cmd_asterix(self, args):\n usage = \"usage: asterix \"\n if len(args) == 0:\n print(usage)\n return\n if args[0] == \"set\":\n self.asterix_settings.command(args[1:])\n elif args[0] == \"start\":\n self.start_listener()\n elif args[0] == \"stop\":\n self.stop_listener()\n elif args[0] == \"restart\":\n self.stop_listener()\n self.start_listener()\n elif args[0] == \"status\":\n self.print_status()\n else:\n print(usage)", - "docstring": "asterix command parser" - }, - { - "code": "def unique_event_labels(event_list):\n if isinstance(event_list, dcase_util.containers.MetaDataContainer):\n return event_list.unique_event_labels\n else:\n labels = []\n for event in event_list:\n if 'event_label' in event and event['event_label'] not in labels:\n labels.append(event['event_label'])\n labels.sort()\n return labels", - "docstring": "Find the unique event labels\n\n Parameters\n ----------\n event_list : list or dcase_util.containers.MetaDataContainer\n A list containing event dicts\n\n Returns\n -------\n list\n Unique labels in alphabetical order" - }, - { - "code": "def dict_keys_without_hyphens(a_dict):\n return dict(\n (key.replace('-', '_'), val) for key, val in a_dict.items())", - "docstring": "Return the a new dict with underscores instead of hyphens in keys." - }, - { - "code": "def update(self, other):\n acceptable_models = [ m for m in other._models if set(m.model.keys()) == self.variables ]\n self._models.update(acceptable_models)\n self._eval_exhausted.update(other._eval_exhausted)\n self._max_exhausted.update(other._max_exhausted)\n self._min_exhausted.update(other._min_exhausted)", - "docstring": "Updates this cache mixin with results discovered by the other split off one." - }, - { - "code": "def flag_field(self, move_x, move_y):\n field_status = self.info_map[move_y, move_x]\n if field_status != 9 and (field_status == 10 or field_status == 11):\n self.info_map[move_y, move_x] = 9", - "docstring": "Flag a grid by given position." - }, - { - "code": "def get_pause_time(self, speed=ETHER_SPEED_MBIT_1000):\n try:\n return self.pause_time * {\n ETHER_SPEED_MBIT_10: (0.0000001 * 512),\n ETHER_SPEED_MBIT_100: (0.00000001 * 512),\n ETHER_SPEED_MBIT_1000: (0.000000001 * 512 * 2)\n }[speed]\n except KeyError:\n raise MACControlInvalidSpeedException('Invalid speed selector given. '\n 'Must be one of ETHER_SPEED_MBIT_[10,100,1000]')", - "docstring": "get pause time for given link speed in seconds\n\n :param speed: select link speed to get the pause time for, must be ETHER_SPEED_MBIT_[10,100,1000] # noqa: E501\n :return: pause time in seconds\n :raises MACControlInvalidSpeedException: on invalid speed selector" - }, - { - "code": "def label_wrap(self, label):\n wrapped_label = r\"%s\\n%s\" % (label,\n self[label].name.replace(\",\", r\"\\n\"))\n return wrapped_label", - "docstring": "Label text for plot." - }, - { - "code": "def get_conf_attr(self, attr, default=None):\n if attr in self.conf:\n return self.conf[attr]\n else:\n return default", - "docstring": "Get the value of a attribute in the configuration\n\n :param attr: The attribute\n :param default: If the attribute doesn't appear in the configuration\n return this value\n :return: The value of attribute in the configuration or the default\n value" - }, - { - "code": "def decode_str(s, free=False):\n try:\n if s.len == 0:\n return u\"\"\n return ffi.unpack(s.data, s.len).decode(\"utf-8\", \"replace\")\n finally:\n if free:\n lib.semaphore_str_free(ffi.addressof(s))", - "docstring": "Decodes a SymbolicStr" - }, - { - "code": "def run_command(cmd_to_run):\n with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:\n popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file)\n popen.wait()\n stderr_file.seek(0)\n stdout_file.seek(0)\n stderr = stderr_file.read()\n stdout = stdout_file.read()\n if six.PY3:\n stderr = stderr.decode()\n stdout = stdout.decode()\n return stderr, stdout", - "docstring": "Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run`\n to temporary files. Using the temporary files gets around subprocess.PIPE's\n issues with handling large buffers.\n\n Note: this command will block the python process until `cmd_to_run` has completed.\n\n Returns a tuple, containing the stderr and stdout as strings." - }, - { - "code": "def set_stream_rates():\n if (not msg_period.trigger() and\n mpstate.status.last_streamrate1 == mpstate.settings.streamrate and\n mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):\n return\n mpstate.status.last_streamrate1 = mpstate.settings.streamrate\n mpstate.status.last_streamrate2 = mpstate.settings.streamrate2\n for master in mpstate.mav_master:\n if master.linknum == 0:\n rate = mpstate.settings.streamrate\n else:\n rate = mpstate.settings.streamrate2\n if rate != -1:\n master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,\n mavutil.mavlink.MAV_DATA_STREAM_ALL,\n rate, 1)", - "docstring": "set mavlink stream rates" - }, - { - "code": "def levenshtein(str1, s2):\n N1 = len(str1)\n N2 = len(s2)\n stringRange = [range(N1 + 1)] * (N2 + 1)\n for i in range(N2 + 1):\n stringRange[i] = range(i,i + N1 + 1)\n for i in range(0,N2):\n for j in range(0,N1):\n if str1[j] == s2[i]:\n stringRange[i+1][j+1] = min(stringRange[i+1][j] + 1,\n stringRange[i][j+1] + 1,\n stringRange[i][j])\n else:\n stringRange[i+1][j+1] = min(stringRange[i+1][j] + 1,\n stringRange[i][j+1] + 1,\n stringRange[i][j] + 1)\n return stringRange[N2][N1]", - "docstring": "Distance between two strings" - }, - { - "code": "def _get_prev_next(self, urn):\n urn = URN(urn)\n subreference = None\n textId = urn.upTo(URN.NO_PASSAGE)\n if urn.reference is not None:\n subreference = str(urn.reference)\n previous, nextious = self.resolver.getSiblings(textId=textId, subreference=subreference)\n r = render_template(\n \"cts/GetPrevNext.xml\",\n prev_urn=previous,\n next_urn=nextious,\n urn=textId,\n request_urn=str(urn)\n )\n return r, 200, {\"content-type\": \"application/xml\"}", - "docstring": "Provisional route for GetPrevNext request\n\n :param urn: URN to filter the resource\n :param inv: Inventory Identifier\n :return: GetPrevNext response" - }, - { - "code": "def _readResponse(self):\n traps = []\n reply_word = None\n while reply_word != '!done':\n reply_word, words = self._readSentence()\n if reply_word == '!trap':\n traps.append(TrapError(**words))\n elif reply_word in ('!re', '!done') and words:\n yield words\n if len(traps) > 1:\n raise MultiTrapError(*traps)\n elif len(traps) == 1:\n raise traps[0]", - "docstring": "Yield each row of response untill !done is received.\n\n :throws TrapError: If one !trap is received.\n :throws MultiTrapError: If > 1 !trap is received." - }, - { - "code": "def target(self, project_module):\n assert isinstance(project_module, basestring)\n if project_module not in self.module2target:\n self.module2target[project_module] = \\\n b2.build.targets.ProjectTarget(project_module, project_module,\n self.attribute(project_module, \"requirements\"))\n return self.module2target[project_module]", - "docstring": "Returns the project target corresponding to the 'project-module'." - }, - { - "code": "def container_migrate(name,\n stop_and_start=False,\n remote_addr=None,\n cert=None,\n key=None,\n verify_cert=True,\n src_remote_addr=None,\n src_cert=None,\n src_key=None,\n src_verify_cert=None):\n if src_cert is None:\n src_cert = cert\n if src_key is None:\n src_key = key\n if src_verify_cert is None:\n src_verify_cert = verify_cert\n container = container_get(\n name, src_remote_addr, src_cert, src_key, src_verify_cert, _raw=True\n )\n dest_client = pylxd_client_get(\n remote_addr, cert, key, verify_cert\n )\n for pname in container.profiles:\n try:\n dest_client.profiles.get(pname)\n except pylxd.exceptions.LXDAPIException:\n raise SaltInvocationError(\n 'not all the profiles from the source exist on the target'\n )\n was_running = container.status_code == CONTAINER_STATUS_RUNNING\n if stop_and_start and was_running:\n container.stop(wait=True)\n try:\n dest_container = container.migrate(dest_client, wait=True)\n dest_container.profiles = container.profiles\n dest_container.save()\n except pylxd.exceptions.LXDAPIException as e:\n raise CommandExecutionError(six.text_type(e))\n container.delete(wait=True)\n if stop_and_start and was_running:\n dest_container.start(wait=True)\n return _pylxd_model_to_dict(dest_container)", - "docstring": "Migrate a container.\n\n If the container is running, it either must be shut down\n first (use stop_and_start=True) or criu must be installed\n on the source and destination machines.\n\n For this operation both certs need to be authenticated,\n use :mod:`lxd.authenticate ~/.config/lxc/client.crt ~/.config/lxc/client.key false\n salt '*' lxd.authenticate https://srv02:8443 ~/.config/lxc/client.crt ~/.config/lxc/client.key false\n\n # Migrate phpmyadmin from srv01 to srv02\n salt '*' lxd.container_migrate phpmyadmin stop_and_start=true remote_addr=https://srv02:8443 cert=~/.config/lxc/client.crt key=~/.config/lxc/client.key verify_cert=False src_remote_addr=https://srv01:8443" - }, - { - "code": "def parse_limit(limit_def):\n lower, upper = get_limits(limit_def)\n reaction = limit_def.get('reaction')\n return reaction, lower, upper", - "docstring": "Parse a structured flux limit definition as obtained from a YAML file\n\n Returns a tuple of reaction, lower and upper bound." - }, - { - "code": "def GetUsername(self, event, default_username='-'):\n username = getattr(event, 'username', None)\n if username and username != '-':\n return username\n session_identifier = event.GetSessionIdentifier()\n if session_identifier is None:\n return default_username\n user_sid = getattr(event, 'user_sid', None)\n username = self._knowledge_base.GetUsernameByIdentifier(\n user_sid, session_identifier=session_identifier)\n return username or default_username", - "docstring": "Retrieves the username related to the event.\n\n Args:\n event (EventObject): event.\n default_username (Optional[str]): default username.\n\n Returns:\n str: username." - }, - { - "code": "def get_api_keys(self, api_id, stage_name):\n response = self.apigateway_client.get_api_keys(limit=500)\n stage_key = '{}/{}'.format(api_id, stage_name)\n for api_key in response.get('items'):\n if stage_key in api_key.get('stageKeys'):\n yield api_key.get('id')", - "docstring": "Generator that allows to iterate per API keys associated to an api_id and a stage_name." - }, - { - "code": "def ensure_loopback_device(path, size):\n for d, f in six.iteritems(loopback_devices()):\n if f == path:\n return d\n if not os.path.exists(path):\n cmd = ['truncate', '--size', size, path]\n check_call(cmd)\n return create_loopback(path)", - "docstring": "Ensure a loopback device exists for a given backing file path and size.\n If it a loopback device is not mapped to file, a new one will be created.\n\n TODO: Confirm size of found loopback device.\n\n :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)" - }, - { - "code": "def geojson(self, feature_id):\n lat, lon = self.lat_lon\n return {\n 'type': 'Feature',\n 'id': feature_id,\n 'geometry': {\n 'type': 'Point',\n 'coordinates': (lon, lat),\n },\n }", - "docstring": "GeoJSON representation of the marker as a point." - }, - { - "code": "def ensure_container(self, name=None):\n if get_lxc_version() < pkg_resources.parse_version('2.0.0'):\n return\n if name is None:\n name = self.container_name\n args = [\n 'lxc-create',\n '--name', name,\n '--template', 'none',\n '>', '/dev/null', '2>&1',\n ]\n os.system(' '.join(args))", - "docstring": "Make sure container exists. It's only needed on newer\n versions of LXC." - }, - { - "code": "def ttl(self, key, **opts):\n key, store = self._expand_opts(key, opts)\n if hasattr(store, 'ttl'):\n return store.ttl(key)\n data = store.get(key)\n if data is None:\n return None\n expiry = data[EXPIRY_INDEX]\n if expiry is not None:\n return max(0, expiry - time()) or None", - "docstring": "Get the time-to-live of a given key; None if not set." - }, - { - "code": "def get_all_plus_and_delete(self):\n res = {}\n props = list(self.plus.keys())\n for prop in props:\n res[prop] = self.get_plus_and_delete(prop)\n return res", - "docstring": "Get all self.plus items of list. We copy it, delete the original and return the copy list\n\n :return: list of self.plus\n :rtype: list" - }, - { - "code": "def drop_duplicates(self, keep='first', inplace=False):\n return super().drop_duplicates(keep=keep, inplace=inplace)", - "docstring": "Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n inplace : bool, default ``False``\n If ``True``, performs operation inplace and returns None.\n\n Returns\n -------\n Series\n Series with duplicates dropped.\n\n See Also\n --------\n Index.drop_duplicates : Equivalent method on Index.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Series.duplicated : Related method on Series, indicating duplicate\n Series values.\n\n Examples\n --------\n Generate an Series with duplicated entries.\n\n >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],\n ... name='animal')\n >>> s\n 0 lama\n 1 cow\n 2 lama\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n With the 'keep' parameter, the selection behaviour of duplicated values\n can be changed. The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> s.drop_duplicates()\n 0 lama\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n\n The value 'last' for parameter 'keep' keeps the last occurrence for\n each set of duplicated entries.\n\n >>> s.drop_duplicates(keep='last')\n 1 cow\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n The value ``False`` for parameter 'keep' discards all sets of\n duplicated entries. Setting the value of 'inplace' to ``True`` performs\n the operation inplace and returns ``None``.\n\n >>> s.drop_duplicates(keep=False, inplace=True)\n >>> s\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object" - }, - { - "code": "def background_chart(chart, foreground, colors):\n def convert_background(entry):\n try:\n attr = urwid.AttrSpec(foreground, entry, colors)\n except urwid.AttrSpecError:\n return None\n if colors > 16 and attr.background_basic and \\\n attr.background_number >= 8:\n entry = 'h%d'%attr.background_number\n attr = urwid.AttrSpec(foreground, entry, colors)\n return attr, entry\n return parse_chart(chart, convert_background)", - "docstring": "Create text markup for a background colour chart\n\n chart -- palette chart as string\n foreground -- colour to use for foreground of chart\n colors -- number of colors (88 or 256)\n\n This will remap 8 <= colour < 16 to high-colour versions\n in the hopes of greater compatibility" - }, - { - "code": "def plot(self, sizescale=10, color=None, alpha=0.5, label=None, edgecolor='none', **kw):\n size = np.maximum(sizescale*(1 + self.magnitudelimit - self.magnitude), 1)\n scatter = plt.scatter(self.ra, self.dec,\n s=size,\n color=color or self.color,\n label=label or '{} ({:.1f})'.format(self.name, self.epoch),\n alpha=alpha,\n edgecolor=edgecolor,\n **kw)\n return scatter", - "docstring": "Plot the ra and dec of the coordinates,\n at a given epoch, scaled by their magnitude.\n\n (This does *not* create a new empty figure.)\n\n Parameters\n ----------\n sizescale : (optional) float\n The marker size for scatter for a star at the magnitudelimit.\n color : (optional) any valid color\n The color to plot (but there is a default for this catalog.)\n **kw : dict\n Additional keywords will be passed on to plt.scatter.\n\n Returns\n -------\n\n plotted : outputs from the plots" - }, - { - "code": "def from_object(self, instance: Union[object, str]) -> None:\n if isinstance(instance, str):\n try:\n path, config = instance.rsplit('.', 1)\n except ValueError:\n path = instance\n instance = importlib.import_module(path)\n else:\n module = importlib.import_module(path)\n instance = getattr(module, config)\n for key in dir(instance):\n if key.isupper():\n self[key] = getattr(instance, key)", - "docstring": "Load the configuration from a Python object.\n\n This can be used to reference modules or objects within\n modules for example,\n\n .. code-block:: python\n\n app.config.from_object('module')\n app.config.from_object('module.instance')\n from module import instance\n app.config.from_object(instance)\n\n are valid.\n\n Arguments:\n instance: Either a str referencing a python object or the\n object itself." - }, - { - "code": "async def scan_for_devices_multi_run(self, runs=2):\n run = 1\n master = {}\n while run < runs + 1:\n await self.scan_for_devices()\n await self.get_scan_result()\n if master is None:\n for device in self._devices:\n mac = device['mac_address']\n master[mac] = {}\n master[mac]['rssi'] = device['rssi']\n master[mac]['device_class'] = device['device_class']\n master[mac]['name'] = device['name']\n master[mac]['device_type'] = device['device_type']\n master[mac]['count'] = 1\n else:\n for device in self._devices:\n mac = device['mac_address']\n if master.get(mac, False):\n master[mac]['rssi'] = device['rssi']\n master[mac]['count'] = str(1 + 1)\n else:\n master[mac] = {}\n master[mac]['rssi'] = device['rssi']\n master[mac]['device_class'] = device['device_class']\n master[mac]['name'] = device['name']\n master[mac]['device_type'] = device['device_type']\n master[mac]['count'] = 1\n run = run + 1\n result = []\n for device in master:\n if int(master[device]['count']) > 1:\n result.append(master[device])\n self._devices = result", - "docstring": "Scan for devices multiple times." - }, - { - "code": "def validate_item_cmd(description, price, quantity, reference=None):\n return ValidateItemCmd(description=description, price=price, quantity=quantity, reference=reference)", - "docstring": "Create a commando to save items from the order.\n A list of items or commands must be created to save a order\n @param description: Item's description\n @param price: Item's price\n @param quantity: Item's quantity\n @param reference: a product reference for the item. Must be a Node\n @return: A Command that validate and save a item" - }, - { - "code": "def bunzip2(filename):\n log.debug(\"Uncompressing %s\", filename)\n tmpfile = \"%s.tmp\" % filename\n os.rename(filename, tmpfile)\n b = bz2.BZ2File(tmpfile)\n f = open(filename, \"wb\")\n while True:\n block = b.read(512 * 1024)\n if not block:\n break\n f.write(block)\n f.close()\n b.close()\n shutil.copystat(tmpfile, filename)\n shutil.copymode(tmpfile, filename)\n os.unlink(tmpfile)", - "docstring": "Uncompress `filename` in place" - }, - { - "code": "def _draw_single_connector(context, width, height):\n c = context\n arrow_height = height / 2.0\n c.rel_move_to(-width / 2., height / 2.)\n c.rel_line_to(width, 0)\n c.rel_line_to(0, -(height - arrow_height))\n c.rel_line_to(-width / 2., -arrow_height)\n c.rel_line_to(-width / 2., arrow_height)\n c.close_path()", - "docstring": "Draw the connector for execution states\n\n Connector for execution states can only be connected to the outside. Thus the connector fills the whole\n border of the state.\n\n :param context: Cairo context\n :param float port_size: The side length of the port" - }, - { - "code": "def comment_urlview(self):\n data = self.get_selected_item()\n comment = data.get('body') or data.get('text') or data.get('url_full')\n if comment:\n self.term.open_urlview(comment)\n else:\n self.term.flash()", - "docstring": "Open the selected comment with the URL viewer" - }, - { - "code": "def help(self, command=None):\n from spython.utils import check_install\n check_install()\n cmd = ['singularity','--help']\n if command != None:\n cmd.append(command)\n help = self._run_command(cmd)\n return help", - "docstring": "help prints the general function help, or help for a specific command\n\n Parameters\n ========== \n command: the command to get help for, if none, prints general help" - }, - { - "code": "def get_error(self):\n err_str = hidapi.hid_error(self._device)\n if err_str == ffi.NULL:\n return None\n else:\n return ffi.string(err_str)", - "docstring": "Get an error string from the device" - }, - { - "code": "def as_dtype(self):\n return np.dtype(dict(\n names=self.names,\n formats=[v.dtype for v in self.attributes.values()]))", - "docstring": "represent the heading as a numpy dtype" - }, - { - "code": "def convert(self, targetunits):\n nunits = units.Units(targetunits)\n self.waveunits = nunits", - "docstring": "Set new user unit, for wavelength only.\n\n This effectively converts the spectrum wavelength\n to given unit. Note that actual data are always kept in\n internal unit (Angstrom), and only converted\n to user unit by :meth:`GetWaveSet` during actual computation.\n User unit is stored in ``self.waveunits``.\n Throughput is unitless and cannot be converted.\n\n Parameters\n ----------\n targetunits : str\n New unit name, as accepted by `~pysynphot.units.Units`." - }, - { - "code": "def load_api_folder(api_folder_path):\n api_definition_mapping = {}\n api_items_mapping = load_folder_content(api_folder_path)\n for api_file_path, api_items in api_items_mapping.items():\n if isinstance(api_items, list):\n for api_item in api_items:\n key, api_dict = api_item.popitem()\n api_id = api_dict.get(\"id\") or api_dict.get(\"def\") or api_dict.get(\"name\")\n if key != \"api\" or not api_id:\n raise exceptions.ParamsError(\n \"Invalid API defined in {}\".format(api_file_path))\n if api_id in api_definition_mapping:\n raise exceptions.ParamsError(\n \"Duplicated API ({}) defined in {}\".format(api_id, api_file_path))\n else:\n api_definition_mapping[api_id] = api_dict\n elif isinstance(api_items, dict):\n if api_file_path in api_definition_mapping:\n raise exceptions.ParamsError(\n \"Duplicated API defined: {}\".format(api_file_path))\n else:\n api_definition_mapping[api_file_path] = api_items\n return api_definition_mapping", - "docstring": "load api definitions from api folder.\n\n Args:\n api_folder_path (str): api files folder.\n\n api file should be in the following format:\n [\n {\n \"api\": {\n \"def\": \"api_login\",\n \"request\": {},\n \"validate\": []\n }\n },\n {\n \"api\": {\n \"def\": \"api_logout\",\n \"request\": {},\n \"validate\": []\n }\n }\n ]\n\n Returns:\n dict: api definition mapping.\n\n {\n \"api_login\": {\n \"function_meta\": {\"func_name\": \"api_login\", \"args\": [], \"kwargs\": {}}\n \"request\": {}\n },\n \"api_logout\": {\n \"function_meta\": {\"func_name\": \"api_logout\", \"args\": [], \"kwargs\": {}}\n \"request\": {}\n }\n }" - }, - { - "code": "def get_image(cls, url):\r\n from PIL.ImageFile import Parser as PILParser\r\n length = 0\r\n raw_image = None\r\n with closing(request.get(url, stream=True)) as response:\r\n response.raise_for_status()\r\n response_url = response.url\r\n parser = PILParser()\r\n for chunk in response.iter_content(config.CHUNK_SIZE):\r\n length += len(chunk)\r\n if length > config.IMAGE_MAX_BYTESIZE:\r\n del parser\r\n raise cls.MaxBytesException\r\n parser.feed(chunk)\r\n if parser.image and parser.image.size:\r\n raw_image = parser.image\r\n del parser\n break\r\n if length == 0:\r\n raise cls.ZeroBytesException\r\n if not raw_image:\r\n raise cls.NoImageException\r\n image = Image(response_url, raw_image.size, raw_image.format)\r\n return image", - "docstring": "Returned Image instance has response url.\r\n This might be different than the url param because of redirects." - }, - { - "code": "def _metric_to_riemann_event(self, metric):\n path = '%s.%s.%s' % (\n metric.getPathPrefix(),\n metric.getCollectorPath(),\n metric.getMetricPath()\n )\n return self.client.create_event({\n 'host': metric.host,\n 'service': path,\n 'time': metric.timestamp,\n 'metric_f': float(metric.value),\n 'ttl': metric.ttl,\n })", - "docstring": "Convert a metric to a dictionary representing a Riemann event." - }, - { - "code": "def handle_events(self):\n for event in sys.stdin:\n if event.startswith('['):\n continue\n name = json.loads(event.lstrip(','))['name']\n for obj in self.loader.objects:\n if obj.output_options['name'] == name:\n obj.on_click(json.loads(event.lstrip(',')))", - "docstring": "An event handler that processes events from stdin and calls the on_click\n function of the respective object. This function is run in another\n thread, so as to not stall the main thread." - }, - { - "code": "def end_comma(self, value):\n if not isinstance(value, bool):\n raise TypeError('end_comma attribute must be a logical type.')\n self._end_comma = value", - "docstring": "Validate and set the comma termination flag." - }, - { - "code": "def open_stream(self, class_attr_name=None, fn=None):\n if fn:\n self.fout_fn = fn\n else:\n fd, self.fout_fn = tempfile.mkstemp()\n os.close(fd)\n self.fout = open(self.fout_fn, 'w')\n if class_attr_name:\n self.class_attr_name = class_attr_name\n self.write(fout=self.fout, schema_only=True)\n self.write(fout=self.fout, data_only=True)\n self.fout.flush()", - "docstring": "Save an arff structure to a file, leaving the file object\n open for writing of new data samples.\n This prevents you from directly accessing the data via Python,\n but when generating a huge file, this prevents all your data\n from being stored in memory." - }, - { - "code": "def mac():\n from uuid import getnode as get_mac\n return ':'.join((\"%012x\" % get_mac())[i:i+2] for i in range(0, 12, 2))", - "docstring": "Get MAC." - }, - { - "code": "def search(self, **kwargs):\n return super(ApiObjectGroupPermission, self).get(self.prepare_url('api/v3/object-group-perm/',\n kwargs))", - "docstring": "Method to search object group permissions based on extends search.\n\n :param search: Dict containing QuerySets to find object group permissions.\n :param include: Array containing fields to include on response.\n :param exclude: Array containing fields to exclude on response.\n :param fields: Array containing fields to override default fields.\n :param kind: Determine if result will be detailed ('detail') or basic ('basic').\n :return: Dict containing object group permissions" - }, - { - "code": "def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False):\n for example in tfds.as_numpy(dataset):\n inp, out = example[0][input_name], example[1]\n if len(out.shape) > 1 and out.shape[-1] == 1:\n out = np.squeeze(out, axis=-1)\n if num_chunks > 0:\n inp = np.split(inp, num_chunks, axis=1)\n out = np.split(out, num_chunks, axis=1)\n if append_targets:\n inp = (inp, out)\n yield inp, out", - "docstring": "Takes a tf.Dataset and creates a numpy stream of ready batches." - }, - { - "code": "async def destroy(self):\n app_facade = client.ApplicationFacade.from_connection(self.connection)\n log.debug(\n 'Destroying %s', self.name)\n return await app_facade.Destroy(self.name)", - "docstring": "Remove this application from the model." - }, - { - "code": "def refresh_db(cache_valid_time=0, failhard=False, **kwargs):\n salt.utils.pkg.clear_rtag(__opts__)\n failhard = salt.utils.data.is_true(failhard)\n ret = {}\n error_repos = list()\n if cache_valid_time:\n try:\n latest_update = os.stat(APT_LISTS_PATH).st_mtime\n now = time.time()\n log.debug(\"now: %s, last update time: %s, expire after: %s seconds\", now, latest_update, cache_valid_time)\n if latest_update + cache_valid_time > now:\n return ret\n except TypeError as exp:\n log.warning(\"expected integer for cache_valid_time parameter, failed with: %s\", exp)\n except IOError as exp:\n log.warning(\"could not stat cache directory due to: %s\", exp)\n call = _call_apt(['apt-get', '-q', 'update'], scope=False)\n if call['retcode'] != 0:\n comment = ''\n if 'stderr' in call:\n comment += call['stderr']\n raise CommandExecutionError(comment)\n else:\n out = call['stdout']\n for line in out.splitlines():\n cols = line.split()\n if not cols:\n continue\n ident = ' '.join(cols[1:])\n if 'Get' in cols[0]:\n ident = re.sub(r' \\[.+B\\]$', '', ident)\n ret[ident] = True\n elif 'Ign' in cols[0]:\n ret[ident] = False\n elif 'Hit' in cols[0]:\n ret[ident] = None\n elif 'Err' in cols[0]:\n ret[ident] = False\n error_repos.append(ident)\n if failhard and error_repos:\n raise CommandExecutionError('Error getting repos: {0}'.format(', '.join(error_repos)))\n return ret", - "docstring": "Updates the APT database to latest packages based upon repositories\n\n Returns a dict, with the keys being package databases and the values being\n the result of the update attempt. Values can be one of the following:\n\n - ``True``: Database updated successfully\n - ``False``: Problem updating database\n - ``None``: Database already up-to-date\n\n cache_valid_time\n\n .. versionadded:: 2016.11.0\n\n Skip refreshing the package database if refresh has already occurred within\n seconds\n\n failhard\n\n If False, return results of Err lines as ``False`` for the package database that\n encountered the error.\n If True, raise an error with a list of the package databases that encountered\n errors.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.refresh_db" - }, - { - "code": "def sectionsWord(self,walkTrace=tuple(),case=None,element=None,doc=None):\n from docx.shared import Inches\n from io import BytesIO\n if case == 'sectionmain':\n if self.settings['clearpage']: doc.add_page_break()\n doc.add_heading(self.title, level = len(walkTrace))\n for p in renewliner(self.p).split('\\n'):\n doc.add_paragraph(p)\n if case == 'figure':\n bf=BytesIO()\n figtitle,fig = element\n width = fig.get_size_inches()[0]\n width = Inches(width if width < 6 else 6)\n fig.savefig(bf)\n doc.add_picture(bf, width=width)\n doc.add_heading('Figure {}: {}'.format(\n fig._leopardref,\n figtitle),level=6)\n if case == 'table':\n caption,t = element\n tableref = t._leopardref\n t = pdSeriesToFrame(t) if type(t) == pd.Series else t\n if self.settings['tablehead']:\n t = t.head(self.settings['tablehead'])\n if self.settings['tablecolumns']:\n t = t[self.settings['tablecolumns']]\n doc.add_heading('Table {}: {}'.format(\n tableref,\n caption),level=6)\n table = doc.add_table(t.shape[0]+1,t.shape[1]+1)\n for tcell,col in zip(table.rows[0].cells[1:],t.columns):\n tcell.text = str(col)\n for trow,rrow in zip(table.rows[1:],t.to_records()):\n for tcell,rcell in zip(trow.cells,rrow):\n tcell.text = str(rcell)", - "docstring": "Prepares section for word output." - }, - { - "code": "def _append_expectation(self, expectation_config):\n expectation_type = expectation_config['expectation_type']\n json.dumps(expectation_config)\n if 'column' in expectation_config['kwargs']:\n column = expectation_config['kwargs']['column']\n self._expectations_config.expectations = [f for f in filter(\n lambda exp: (exp['expectation_type'] != expectation_type) or (\n 'column' in exp['kwargs'] and exp['kwargs']['column'] != column),\n self._expectations_config.expectations\n )]\n else:\n self._expectations_config.expectations = [f for f in filter(\n lambda exp: exp['expectation_type'] != expectation_type,\n self._expectations_config.expectations\n )]\n self._expectations_config.expectations.append(expectation_config)", - "docstring": "Appends an expectation to `DataAsset._expectations_config` and drops existing expectations of the same type.\n\n If `expectation_config` is a column expectation, this drops existing expectations that are specific to \\\n that column and only if it is the same expectation type as `expectation_config`. Otherwise, if it's not a \\\n column expectation, this drops existing expectations of the same type as `expectation config`. \\\n After expectations of the same type are dropped, `expectation_config` is appended to `DataAsset._expectations_config`.\n\n Args:\n expectation_config (json): \\\n The JSON-serializable expectation to be added to the DataAsset expectations in `_expectations_config`.\n\n Notes:\n May raise future errors once json-serializable tests are implemented to check for correct arg formatting" - }, - { - "code": "def _syscal_write_quadpoles(fid, quadpoles):\n fid.write('\n for nr, quadpole in enumerate(quadpoles):\n fid.write(\n '{0} {1} {2} {3} {4}\\n'.format(\n nr, quadpole[0], quadpole[1], quadpole[2], quadpole[3]))", - "docstring": "helper function that writes the actual measurement configurations to a\n file descriptor.\n\n Parameters\n ----------\n fid: file descriptor\n data is written here\n quadpoles: numpy.ndarray\n measurement configurations" - }, - { - "code": "def get_form(self, request, obj=None, **kwargs):\n language = get_language_from_request(request)\n form = super(ArticleAdmin, self).get_form(\n request, obj,\n form=(obj and ArticleForm or ArticleCreateForm),\n **kwargs\n )\n for field in tuple(form.base_fields.keys()):\n form.base_fields[field] = copy.deepcopy(form.base_fields[field])\n if 'language' in form.base_fields:\n form.base_fields['language'].initial = language\n if obj:\n title_obj = obj.get_title_obj(language=language, fallback=False, force_reload=True)\n if hasattr(title_obj, 'id'):\n for name in ('title', 'description', 'page_title', 'menu_title', 'meta_description', 'image'):\n if name in form.base_fields:\n form.base_fields[name].initial = getattr(title_obj, name)\n try:\n slug = self.SLUG_REGEXP.search(title_obj.slug).groups()[settings.CMS_ARTICLES_SLUG_GROUP_INDEX]\n except AttributeError:\n warnings.warn('Failed to parse slug from CMS_ARTICLES_SLUG_REGEXP. '\n 'It probably doesn\\'t correspond to CMS_ARTICLES_SLUG_FORMAT.')\n slug = title_obj.slug\n form.base_fields['slug'].initial = slug\n return form", - "docstring": "Get ArticleForm for the Article model and modify its fields depending on\n the request." - }, - { - "code": "def get_account_from_name(self, name):\n for account in self.accounts:\n if account.get_name() == name:\n return account\n return None", - "docstring": "Returns the account with the given name.\n\n :type name: string\n :param name: The name of the account." - }, - { - "code": "def usable_id(cls, id, datacenter=None):\n try:\n qry_id = int(id)\n except Exception:\n qry_id = cls.from_sysdisk(id) or cls.from_label(id, datacenter)\n if not qry_id:\n msg = 'unknown identifier %s' % id\n cls.error(msg)\n return qry_id", - "docstring": "Retrieve id from input which can be label or id." - }, - { - "code": "def average(arr):\n if len(arr) == 0:\n sys.stderr.write(\"ERROR: no content in array to take average\\n\")\n sys.exit()\n if len(arr) == 1: return arr[0]\n return float(sum(arr))/float(len(arr))", - "docstring": "average of the values, must have more than 0 entries.\n\n :param arr: list of numbers\n :type arr: number[] a number array\n :return: average\n :rtype: float" - }, - { - "code": "def _get_pltdag_ancesters(self, hdrgo, usrgos, desc=\"\"):\n go_srcs = usrgos.union([hdrgo])\n gosubdag = GoSubDag(go_srcs,\n self.gosubdag.get_go2obj(go_srcs),\n relationships=self.gosubdag.relationships,\n rcntobj=self.gosubdag.rcntobj,\n go2nt=self.gosubdag.go2nt)\n tot_usrgos = len(set(gosubdag.go2obj.keys()).intersection(self.usrgos))\n return self.ntpltgo(\n hdrgo=hdrgo,\n gosubdag=gosubdag,\n tot_usrgos=tot_usrgos,\n parentcnt=False,\n desc=desc)", - "docstring": "Get GoSubDag containing hdrgo and all usrgos and their ancesters." - }, - { - "code": "def verify_gpg_version():\n existing_gpg = keyring.gpg_version().decode('ascii')\n required_gpg = '>=2.1.11'\n msg = 'Existing GnuPG has version \"{}\" ({} required)'.format(existing_gpg,\n required_gpg)\n if not semver.match(existing_gpg, required_gpg):\n log.error(msg)", - "docstring": "Make sure that the installed GnuPG is not too old." - }, - { - "code": "def bodc2s(code, lenout=_default_len_out):\n code = ctypes.c_int(code)\n name = stypes.stringToCharP(\" \" * lenout)\n lenout = ctypes.c_int(lenout)\n libspice.bodc2s_c(code, lenout, name)\n return stypes.toPythonString(name)", - "docstring": "Translate a body ID code to either the corresponding name or if no\n name to ID code mapping exists, the string representation of the\n body ID value.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodc2s_c.html\n\n :param code: Integer ID code to translate to a string.\n :type code: int\n :param lenout: Maximum length of output name.\n :type lenout: int\n :return: String corresponding to 'code'.\n :rtype: str" - }, - { - "code": "def oauth_register(form):\n if form.validate():\n data = form.to_dict()\n if not data.get('password'):\n data['password'] = ''\n user = register_user(**data)\n if not data['password']:\n user.password = None\n _datastore.commit()\n return user", - "docstring": "Register user if possible.\n\n :param form: A form instance.\n :returns: A :class:`invenio_accounts.models.User` instance." - }, - { - "code": "def read_sample_name(line_iter, clean_fn):\n try:\n while True:\n new_line = next(line_iter)\n new_line = new_line.strip()\n if 'BaseDistributionByCycle' in new_line and 'INPUT' in new_line:\n fn_search = re.search(r\"INPUT=?\\s*(\\[?[^\\s]+\\]?)\", new_line, flags=re.IGNORECASE)\n if fn_search:\n s_name = os.path.basename(fn_search.group(1).strip('[]'))\n s_name = clean_fn(s_name)\n return s_name\n except StopIteration:\n return None", - "docstring": "Consumes lines from the provided line_iter and parses those lines\n as a header for the picard base distribution file. The header\n file is assumed to contain a line with both 'INPUT' and\n 'BaseDistributionByCycle'.\n\n If the header parses correctly, the sample name is returned. If\n the header does not parse correctly, None is returned." - }, - { - "code": "def markdown(self, text, gfm=False, project=None, **kwargs):\n post_data = {'text': text, 'gfm': gfm}\n if project is not None:\n post_data['project'] = project\n data = self.http_post('/markdown', post_data=post_data, **kwargs)\n return data['html']", - "docstring": "Render an arbitrary Markdown document.\n\n Args:\n text (str): The markdown text to render\n gfm (bool): Render text using GitLab Flavored Markdown. Default is\n False\n project (str): Full path of a project used a context when `gfm` is\n True\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabMarkdownError: If the server cannot perform the request\n\n Returns:\n str: The HTML rendering of the markdown text." - }, - { - "code": "def connect(self, creds):\n return BlockBlobService(account_name=creds.account_name,\n account_key=creds.account_key,\n sas_token=creds.access_token,\n protocol='https')", - "docstring": "Return an azure BlockBlobService instance." - }, - { - "code": "def est_payouts(self):\n if len(self.choices) < 1:\n print('slots: No trials run so far.')\n return None\n else:\n return self.wins/(self.pulls+0.1)", - "docstring": "Calculate current estimate of average payout for each bandit.\n\n Returns\n -------\n array of floats or None" - }, - { - "code": "def disassociate_address(self, public_ip=None, association_id=None):\n params = {}\n if public_ip is not None:\n params['PublicIp'] = public_ip\n elif association_id is not None:\n params['AssociationId'] = association_id\n return self.get_status('DisassociateAddress', params, verb='POST')", - "docstring": "Disassociate an Elastic IP address from a currently running instance.\n\n :type public_ip: string\n :param public_ip: The public IP address for EC2 elastic IPs.\n\n :type association_id: string\n :param association_id: The association ID for a VPC based elastic ip.\n\n :rtype: bool\n :return: True if successful" - }, - { - "code": "def md5sum(content):\n md5 = hashlib.md5()\n md5.update(force_bytes(content))\n return md5.hexdigest()", - "docstring": "Calculate and returns an MD5 checksum for the specified content.\n\n :param content: text content\n :returns: hex-digest formatted MD5 checksum as a string" - }, - { - "code": "def timestamp(self, message=\"\", checkpoint=None,\n finished=False, raise_error=True):\n if self.halt_on_next:\n self.halt(checkpoint, finished, raise_error=raise_error)\n if checkpoint:\n if finished:\n self._checkpoint(checkpoint)\n self.prev_checkpoint = checkpoint\n self.curr_checkpoint = None\n else:\n self.prev_checkpoint = self.curr_checkpoint\n self.curr_checkpoint = checkpoint\n self._checkpoint(self.prev_checkpoint)\n if (finished and checkpoint == self.stop_after) or (not finished and checkpoint == self.stop_before):\n self.halt(checkpoint, finished, raise_error=raise_error)\n elif checkpoint == self.start_point:\n self._active = True\n if not finished and checkpoint == self.stop_after:\n self.halt_on_next = True\n elapsed = self.time_elapsed(self.last_timestamp)\n t = time.strftime(\"%m-%d %H:%M:%S\")\n if checkpoint is None:\n msg = \"{m} ({t}) elapsed: {delta_t} _TIME_\".\\\n format(m=message, t=t, delta_t=elapsed)\n else:\n msg = \"{m} ({t}) ({status} {stage}) elapsed: {delta_t} _TIME_\".\\\n format(m=message, t=t,\n status=\"finished\" if finished else \"starting\",\n stage=checkpoint, delta_t=elapsed)\n if re.match(\"^\n msg = \"\\n{}\\n\".format(msg)\n print(msg)\n self.last_timestamp = time.time()", - "docstring": "Print message, time, and time elapsed, perhaps creating checkpoint.\n\n This prints your given message, along with the current time, and time\n elapsed since the previous timestamp() call. If you specify a\n HEADING by beginning the message with \"###\", it surrounds the message\n with newlines for easier readability in the log file. If a checkpoint\n is designated, an empty file is created corresponding to the name\n given. Depending on how this manager's been configured, the value of\n the checkpoint, and whether this timestamp indicates initiation or\n completion of a group of pipeline steps, this call may stop the\n pipeline's execution.\n\n :param str message: Message to timestamp.\n :param str checkpoint: Name of checkpoint; this tends to be something\n that reflects the processing logic about to be or having just been\n completed. Provision of an argument to this parameter means that\n a checkpoint file will be created, facilitating arbitrary starting\n and stopping point for the pipeline as desired.\n :param bool finished: Whether this call represents the completion of a\n conceptual unit of a pipeline's processing\n :param raise_error: Whether to raise exception if\n checkpoint or current state indicates that a halt should occur." - }, - { - "code": "def per_distro_data(self):\n ret = {}\n for cache_date in self.cache_dates:\n data = self._cache_get(cache_date)\n ret[cache_date] = {}\n for distro_name, distro_data in data['by_distro'].items():\n if distro_name.lower() == 'red hat enterprise linux server':\n distro_name = 'RHEL'\n for distro_ver, count in distro_data.items():\n ver = self._shorten_version(distro_ver, num_components=1)\n if distro_name.lower() == 'os x':\n ver = self._shorten_version(distro_ver,\n num_components=2)\n k = self._compound_column_value(distro_name, ver)\n ret[cache_date][k] = count\n if len(ret[cache_date]) == 0:\n ret[cache_date]['unknown'] = 0\n return ret", - "docstring": "Return download data by distro name and version.\n\n :return: dict of cache data; keys are datetime objects, values are\n dict of distro name/version (str) to count (int).\n :rtype: dict" - }, - { - "code": "def create(self, body):\n return self.client.post(self._url(), data=body)", - "docstring": "Creates a new connection.\n\n Args:\n body (dict): Attributes used to create the connection. Mandatory\n attributes are: 'name' and 'strategy'.\n See: https://auth0.com/docs/api/management/v2#!/Connections/post_connections" - }, - { - "code": "def getKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n ns_alias = self.namespaces.getAlias(namespace)\n if ns_alias is None:\n return None\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n return 'openid.' + tail", - "docstring": "Get the key for a particular namespaced argument" - }, - { - "code": "def generate_matches(self, nodes):\n if self.content is None:\n for count in xrange(self.min, 1 + min(len(nodes), self.max)):\n r = {}\n if self.name:\n r[self.name] = nodes[:count]\n yield count, r\n elif self.name == \"bare_name\":\n yield self._bare_name_matches(nodes)\n else:\n if hasattr(sys, \"getrefcount\"):\n save_stderr = sys.stderr\n sys.stderr = StringIO()\n try:\n for count, r in self._recursive_matches(nodes, 0):\n if self.name:\n r[self.name] = nodes[:count]\n yield count, r\n except RuntimeError:\n for count, r in self._iterative_matches(nodes):\n if self.name:\n r[self.name] = nodes[:count]\n yield count, r\n finally:\n if hasattr(sys, \"getrefcount\"):\n sys.stderr = save_stderr", - "docstring": "Generator yielding matches for a sequence of nodes.\n\n Args:\n nodes: sequence of nodes\n\n Yields:\n (count, results) tuples where:\n count: the match comprises nodes[:count];\n results: dict containing named submatches." - }, - { - "code": "def validate(self):\n validate_url = \"https://api.pushover.net/1/users/validate.json\"\n payload = {\n 'token': self.api_token,\n 'user': self.user,\n }\n return requests.post(validate_url, data=payload)", - "docstring": "Validate the user and token, returns the Requests response." - }, - { - "code": "def set_querier_mode(self, dpid, server_port):\n self.dpid = dpid\n self.server_port = server_port\n if self._querier_thread:\n hub.kill(self._querier_thread)\n self._querier_thread = None", - "docstring": "set the datapath to work as a querier. note that you can set\n up only the one querier. when you called this method several\n times, only the last one becomes effective." - }, - { - "code": "def _repr_pretty_(self, builder, cycle):\n builder.text(self.__class__.__name__ + \"(\")\n if cycle:\n builder.text(\"\")\n else:\n builder.pretty(self.__dict__)\n builder.text(\")\")", - "docstring": "Custom pretty output for the IPython console" - }, - { - "code": "def account_id(self, value):\n if type(value) is not str:\n raise TypeError(\"commit value must be string\")\n self._account_id = value", - "docstring": "Sets the current account id\n\n Args:\n value: current account id (string)\n\n Returns:\n None" - }, - { - "code": "def handle_dims(opts):\n use,res = [],[];\n if opts['--X']:\n use.append('x');\n res.append(int(opts['--xres']));\n if opts['--Y']:\n use.append('y');\n res.append(int(opts['--yres']));\n if opts['--Z']:\n use.append('z');\n res.append(int(opts['--zres']));\n if use == []:\n use = ['x','y','z'];\n res = map(lambda k: int(opts[k]),['--xres','--yres','--zres']);\n return use,res;", - "docstring": "Script option handling." - }, - { - "code": "def _prepare_output_multi(self, model):\n model_name = model.__name__\n current_path = os.path.join(self._output_path, '{model}.{extension}'.format(\n model=model_name,\n extension=self.EXTENSION,\n ))\n self._outfile = codecs.open(current_path, 'w', encoding='utf-8')\n print('Dumping {model} to {file}'.format(model=model_name, file=current_path))", - "docstring": "If printing to a different file per model, change the file for the current model" - }, - { - "code": "def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None,\n\tpublishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None,\n\tlimit = 100, offset = None, **kwargs):\n\turl = gbif_baseurl + 'dataset/suggest'\n\targs = {'q': q, 'type': type, 'keyword': keyword,\n\t\t\t\t'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg,\n\t\t\t\t'owningOrg': owningOrg, 'decade': decade,\n\t\t\t\t'publishingCountry': publishingCountry,\n\t\t\t\t'limit': limit, 'offset': offset}\n\tout = gbif_GET(url, args, **kwargs)\n\treturn out", - "docstring": "Search that returns up to 20 matching datasets. Results are ordered by relevance.\n\n\n\t:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``\n\t:param type: [str] Type of dataset, options include OCCURRENCE, etc.\n\t:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.\n\t:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`\n\t:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`\n\t:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`\n\t:param publishingCountry: [str] Publishing country.\n\t:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)\n\t:param limit: [int] Number of results to return. Default: ``300``\n\t:param offset: [int] Record to start at. Default: ``0``\n\n\t:return: A dictionary\n\n\tReferences: http://www.gbif.org/developer/registry#datasetSearch\n\n\tUsage::\n\n\t\t\tfrom pygbif import registry\n\t\t\tregistry.dataset_suggest(q=\"Amazon\", type=\"OCCURRENCE\")\n\n\t\t\t# Suggest datasets tagged with keyword \"france\".\n\t\t\tregistry.dataset_suggest(keyword=\"france\")\n\n\t\t\t# Suggest datasets owned by the organization with key\n\t\t\t# \"07f617d0-c688-11d8-bf62-b8a03c50a862\" (UK NBN).\n\t\t\tregistry.dataset_suggest(owningOrg=\"07f617d0-c688-11d8-bf62-b8a03c50a862\")\n\n\t\t\t# Fulltext search for all datasets having the word \"amsterdam\" somewhere in\n\t\t\t# its metadata (title, description, etc).\n\t\t\tregistry.dataset_suggest(q=\"amsterdam\")\n\n\t\t\t# Limited search\n\t\t\tregistry.dataset_suggest(type=\"OCCURRENCE\", limit=2)\n\t\t\tregistry.dataset_suggest(type=\"OCCURRENCE\", limit=2, offset=10)\n\n\t\t\t# Return just descriptions\n\t\t\tregistry.dataset_suggest(type=\"OCCURRENCE\", limit = 5, description=True)\n\n\t\t\t# Search by decade\n\t\t\tregistry.dataset_suggest(decade=1980, limit = 30)" - }, - { - "code": "def tobinary(self):\n entrylen = struct.calcsize(self.ENTRYSTRUCT)\n rslt = []\n for (dpos, dlen, ulen, flag, typcd, nm) in self.data:\n nmlen = len(nm) + 1\n toclen = nmlen + entrylen\n if toclen % 16 == 0:\n pad = '\\0'\n else:\n padlen = 16 - (toclen % 16)\n pad = '\\0'*padlen\n nmlen = nmlen + padlen\n rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s',\n nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+pad))\n return ''.join(rslt)", - "docstring": "Return self as a binary string." - }, - { - "code": "def fetch(self):\n params = values.of({})\n payload = self._version.fetch(\n 'GET',\n self._uri,\n params=params,\n )\n return TaskQueueInstance(\n self._version,\n payload,\n workspace_sid=self._solution['workspace_sid'],\n sid=self._solution['sid'],\n )", - "docstring": "Fetch a TaskQueueInstance\n\n :returns: Fetched TaskQueueInstance\n :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance" - }, - { - "code": "def exactly_equal(self, other):\n return (\n self.__class__ == other.__class__ and\n len(self) == len(other) and\n all(x.exactly_equal(y) for (x, y) in zip(self, other)))", - "docstring": "Comparison between VariantCollection instances that takes into account\n the info field of Variant instances.\n\n Returns\n ----------\n True if the variants in this collection equal the variants in the other\n collection. The Variant.info fields are included in the comparison." - }, - { - "code": "def is_ancestor_of(self, other, include_self=False):\n return other.is_descendant_of(self, include_self=include_self)", - "docstring": "Is this node an ancestor of `other`?" - }, - { - "code": "def saveToClipboard(sheet, rows, filetype=None):\n 'copy rows from sheet to system clipboard'\n filetype = filetype or options.save_filetype\n vs = copy(sheet)\n vs.rows = rows\n status('copying rows to clipboard')\n clipboard().save(vs, filetype)", - "docstring": "copy rows from sheet to system clipboard" - }, - { - "code": "def read_dbf(dbf_path, index = None, cols = False, incl_index = False):\n db = ps.open(dbf_path)\n if cols:\n if incl_index:\n cols.append(index)\n vars_to_read = cols\n else:\n vars_to_read = db.header\n data = dict([(var, db.by_col(var)) for var in vars_to_read])\n if index:\n index = db.by_col(index)\n db.close()\n return pd.DataFrame(data, index=index)\n else:\n db.close()\n return pd.DataFrame(data)", - "docstring": "Read a dbf file as a pandas.DataFrame, optionally selecting the index\n variable and which columns are to be loaded.\n\n __author__ = \"Dani Arribas-Bel \"\n ...\n\n Arguments\n ---------\n dbf_path : str\n Path to the DBF file to be read\n index : str\n Name of the column to be used as the index of the DataFrame\n cols : list\n List with the names of the columns to be read into the\n DataFrame. Defaults to False, which reads the whole dbf\n incl_index : Boolean\n If True index is included in the DataFrame as a\n column too. Defaults to False\n\n Returns\n -------\n df : DataFrame\n pandas.DataFrame object created" - }, - { - "code": "def jacobian(self, maps):\n mchirp = maps[parameters.mchirp]\n eta = maps[parameters.eta]\n m1 = conversions.mass1_from_mchirp_eta(mchirp, eta)\n m2 = conversions.mass2_from_mchirp_eta(mchirp, eta)\n return mchirp * (m1 - m2) / (m1 + m2)**3", - "docstring": "Returns the Jacobian for transforming mchirp and eta to mass1 and\n mass2." - }, - { - "code": "def tostring(self):\n parser = etree.XMLParser(remove_blank_text=True)\n outputtree = etree.XML(etree.tostring(self.__doc), parser)\n return etree.tostring(outputtree, pretty_print=True)", - "docstring": "return a pretty-printed string output for rpc reply" - }, - { - "code": "def move_out_32(library, session, space, offset, length, data, extended=False):\n converted_buffer = (ViUInt32 * length)(*tuple(data))\n if extended:\n return library.viMoveOut32Ex(session, space, offset, length, converted_buffer)\n else:\n return library.viMoveOut32(session, space, offset, length, converted_buffer)", - "docstring": "Moves an 32-bit block of data from local memory to the specified address space and offset.\n\n Corresponds to viMoveOut32* functions of the VISA library.\n\n :param library: the visa library wrapped by ctypes.\n :param session: Unique logical identifier to a session.\n :param space: Specifies the address space. (Constants.*SPACE*)\n :param offset: Offset (in bytes) of the address or register from which to read.\n :param length: Number of elements to transfer, where the data width of the elements to transfer\n is identical to the source data width.\n :param data: Data to write to bus.\n :param extended: Use 64 bits offset independent of the platform.\n :return: return value of the library call.\n :rtype: :class:`pyvisa.constants.StatusCode`" - }, - { - "code": "def _draw(self, tree, indent=0):\n if all([\n isinstance(tree, dict),\n type(tree) != BarDescriptor\n ]):\n for k, v in sorted(tree.items()):\n bar_desc, subdict = v[0], v[1]\n args = [self.cursor.term] + bar_desc.get(\"args\", [])\n kwargs = dict(title_pos=\"above\", indent=indent, title=k)\n kwargs.update(bar_desc.get(\"kwargs\", {}))\n b = Bar(*args, **kwargs)\n b.draw(value=bar_desc[\"value\"].value, flush=False)\n self._draw(subdict, indent=indent + self.indent)", - "docstring": "Recurse through ``tree`` and draw all nodes" - }, - { - "code": "def add_notification(self, name, enabled=True):\n\t\tnotice = {}\n\t\tnotice['Notification-Name'] = name\n\t\tnotice['Notification-Enabled'] = enabled\n\t\tself.notifications.append(notice)\n\t\tself.add_header('Notifications-Count', len(self.notifications))", - "docstring": "Add new Notification to Registration message\n\n\t\t:param string name: Notification Name\n\t\t:param boolean enabled: Enable this notification by default" - }, - { - "code": "def get_stack_info():\n stack = traceback.walk_stack(sys._getframe().f_back)\n for frame, _ in stack:\n code = frame.f_code\n if code.co_name.startswith('test_'):\n return (frame.f_locals.copy(), frame.f_globals['__name__'],\n code.co_filename, frame.f_lineno)", - "docstring": "Capture locals, module name, filename, and line number from the\n stacktrace to provide the source of the assertion error and\n formatted note." - }, - { - "code": "def find_indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", - "docstring": "Returns the indices for all occurrences of 'element' in 'lst'.\n\n Args:\n lst (list): List to search.\n element: Element to find.\n\n Returns:\n list: List of indices or values" - }, - { - "code": "def process_omp_attachements(self, node, stmt, index=None):\n omp_directives = metadata.get(node, OMPDirective)\n if omp_directives:\n directives = list()\n for directive in omp_directives:\n directive.deps = [self.visit(dep) for dep in directive.deps]\n directives.append(directive)\n if index is None:\n stmt = AnnotatedStatement(stmt, directives)\n else:\n stmt[index] = AnnotatedStatement(stmt[index], directives)\n return stmt", - "docstring": "Add OpenMP pragma on the correct stmt in the correct order.\n\n stmt may be a list. On this case, index have to be specify to add\n OpenMP on the correct statement." - }, - { - "code": "def is_validated(self):\n if not self.receipt_number:\n return False\n try:\n return self.validation.result == ReceiptValidation.RESULT_APPROVED\n except ReceiptValidation.DoesNotExist:\n return False", - "docstring": "Returns True if this instance is validated.\n\n Note that resolving this property requires a DB query, so if you've a\n very large amount of receipts you should prefetch (see django's\n ``select_related``) the ``validation`` field. Even so, a DB query *may*\n be triggered.\n\n If you need a large list of validated receipts, you should actually\n filter them via a QuerySet::\n\n Receipt.objects.filter(validation__result==RESULT_APPROVED)\n\n :rtype: bool" - }, - { - "code": "def parse(cls, value, default=_no_default):\n if isinstance(value, cls):\n return value\n elif isinstance(value, int):\n e = cls._make_value(value)\n else:\n if not value:\n e = cls._make_value(0)\n else:\n r = 0\n for k in value.split(\",\"):\n v = cls._name_to_member.get(k, _no_default)\n if v is _no_default:\n if default is _no_default:\n raise _create_invalid_value_error(cls, value)\n else:\n return default\n r |= v.value\n e = cls._make_value(r)\n if not e.is_valid():\n if default is _no_default:\n raise _create_invalid_value_error(cls, value)\n return default\n return e", - "docstring": "Parses a flag integer or string into a Flags instance.\n\n Accepts the following types:\n - Members of this enum class. These are returned directly.\n - Integers. These are converted directly into a Flags instance with the given name.\n - Strings. The function accepts a comma-delimited list of flag names, corresponding to\n members of the enum. These are all ORed together.\n\n Examples:\n\n >>> class Car(Flags):\n ... is_big = 1\n ... has_wheels = 2\n >>> Car.parse(1)\n Car.is_big\n >>> Car.parse(3)\n Car.parse('has_wheels,is_big')\n >>> Car.parse('is_big,has_wheels')\n Car.parse('has_wheels,is_big')" - }, - { - "code": "def cleanString(someText):\n ret = ''\n if someText is not None:\n ret = filter(unicode.isalnum, someText.lower())\n return ret", - "docstring": "remove special characters and spaces from string\n and convert to lowercase" - }, - { - "code": "def family_directory(fonts):\n if fonts:\n dirname = os.path.dirname(fonts[0])\n if dirname == '':\n dirname = '.'\n return dirname", - "docstring": "Get the path of font project directory." - }, - { - "code": "def reset(self, label=None):\n if label:\n self.label = label\n self.times = []\n self.n_loops = None\n self.total_time = None\n return self", - "docstring": "clears all measurements, allowing the object to be reused\n\n Args:\n label (str, optional) : optionally change the label\n\n Example:\n >>> from timerit import Timerit\n >>> import math\n >>> ti = Timerit(num=10, unit='us', verbose=True)\n >>> _ = ti.reset(label='10!').call(math.factorial, 10)\n Timed best=...s, mean=...s for 10!\n >>> _ = ti.reset(label='20!').call(math.factorial, 20)\n Timed best=...s, mean=...s for 20!\n >>> _ = ti.reset().call(math.factorial, 20)\n Timed best=...s, mean=...s for 20!" - }, - { - "code": "def cli(env):\n account = env.client['Account']\n nas_accounts = account.getNasNetworkStorage(\n mask='eventCount,serviceResource[datacenter.name]')\n table = formatting.Table(['id', 'datacenter', 'size', 'server'])\n for nas_account in nas_accounts:\n table.add_row([\n nas_account['id'],\n utils.lookup(nas_account,\n 'serviceResource',\n 'datacenter',\n 'name') or formatting.blank(),\n formatting.FormattedItem(\n nas_account.get('capacityGb', formatting.blank()),\n \"%dGB\" % nas_account.get('capacityGb', 0)),\n nas_account.get('serviceResourceBackendIpAddress',\n formatting.blank())])\n env.fout(table)", - "docstring": "List NAS accounts." - }, - { - "code": "def instantiate(self, value_of_n):\n template = Cheetah.Template.Template(\n self.content,\n searchList={'n': value_of_n}\n )\n template.random_string = random_string\n return str(template)", - "docstring": "Instantiates the template" - }, - { - "code": "async def wait_until_serving(self) -> None:\n await asyncio.gather(\n self._receiving_loop_running.wait(),\n self._internal_loop_running.wait(),\n loop=self.event_loop\n )", - "docstring": "Await until the ``Endpoint`` is ready to receive events." - }, - { - "code": "def diskusage(*args):\n selected = set()\n fstypes = set()\n if not args:\n fstypes.add('*')\n else:\n for arg in args:\n if arg.startswith('/'):\n selected.add(arg)\n else:\n fstypes.add(arg)\n if fstypes:\n regex = re.compile(\n '|'.join(\n fnmatch.translate(fstype).format('(%s)') for fstype in fstypes\n )\n )\n if __grains__['kernel'] == 'Linux':\n try:\n with salt.utils.files.fopen('/proc/mounts', 'r') as fp_:\n ifile = salt.utils.stringutils.to_unicode(fp_.read()).splitlines()\n except OSError:\n return {}\n elif __grains__['kernel'] in ('FreeBSD', 'SunOS'):\n ifile = __salt__['cmd.run']('mount -p').splitlines()\n else:\n raise CommandExecutionError('status.diskusage not yet supported on this platform')\n for line in ifile:\n comps = line.split()\n if __grains__['kernel'] == 'SunOS':\n if len(comps) >= 4:\n mntpt = comps[2]\n fstype = comps[3]\n if regex.match(fstype):\n selected.add(mntpt)\n else:\n if len(comps) >= 3:\n mntpt = comps[1]\n fstype = comps[2]\n if regex.match(fstype):\n selected.add(mntpt)\n ret = {}\n for path in selected:\n fsstats = os.statvfs(path)\n blksz = fsstats.f_bsize\n available = fsstats.f_bavail * blksz\n total = fsstats.f_blocks * blksz\n ret[path] = {\"available\": available, \"total\": total}\n return ret", - "docstring": "Return the disk usage for this minion\n\n Usage::\n\n salt '*' status.diskusage [paths and/or filesystem types]\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' status.diskusage # usage for all filesystems\n salt '*' status.diskusage / /tmp # usage for / and /tmp\n salt '*' status.diskusage ext? # usage for ext[234] filesystems\n salt '*' status.diskusage / ext? # usage for / and all ext filesystems" - }, - { - "code": "def check_block_spacing(\n self,\n first_block_type: LineType,\n second_block_type: LineType,\n error_message: str,\n ) -> typing.Generator[AAAError, None, None]:\n numbered_lines = list(enumerate(self))\n first_block_lines = filter(lambda l: l[1] is first_block_type, numbered_lines)\n try:\n first_block_lineno = list(first_block_lines)[-1][0]\n except IndexError:\n return\n second_block_lines = filter(lambda l: l[1] is second_block_type, numbered_lines)\n try:\n second_block_lineno = next(second_block_lines)[0]\n except StopIteration:\n return\n blank_lines = [\n bl for bl in numbered_lines[first_block_lineno + 1:second_block_lineno] if bl[1] is LineType.blank_line\n ]\n if not blank_lines:\n yield AAAError(\n line_number=self.fn_offset + second_block_lineno - 1,\n offset=0,\n text=error_message.format('none'),\n )\n return\n if len(blank_lines) > 1:\n yield AAAError(\n line_number=self.fn_offset + blank_lines[1][0],\n offset=0,\n text=error_message.format(len(blank_lines)),\n )", - "docstring": "Checks there is a clear single line between ``first_block_type`` and\n ``second_block_type``.\n\n Note:\n Is tested via ``check_arrange_act_spacing()`` and\n ``check_act_assert_spacing()``." - }, - { - "code": "def _load_cached_tlds(self):\n if not os.access(self._tld_list_path, os.R_OK):\n self._logger.error(\"Cached file is not readable for current \"\n \"user. ({})\".format(self._tld_list_path))\n raise CacheFileError(\n \"Cached file is not readable for current user.\"\n )\n set_of_tlds = set()\n with open(self._tld_list_path, 'r') as f_cache_tld:\n for line in f_cache_tld:\n tld = line.strip().lower()\n if not tld:\n continue\n if tld[0] == '\n continue\n set_of_tlds.add(\".\" + tld)\n set_of_tlds.add(\".\" + idna.decode(tld))\n return set_of_tlds", - "docstring": "Loads TLDs from cached file to set.\n\n :return: Set of current TLDs\n :rtype: set" - }, - { - "code": "def classify(self, peer_dir_meta):\n assert self.operation is None\n peer_entry_meta = peer_dir_meta.get(self.name, False) if peer_dir_meta else None\n if self.local:\n self.local.classify(peer_dir_meta)\n self.local_classification = self.local.classification\n elif peer_entry_meta:\n self.local_classification = \"deleted\"\n else:\n self.local_classification = \"missing\"\n if self.remote:\n self.remote.classify(peer_dir_meta)\n self.remote_classification = self.remote.classification\n elif peer_entry_meta:\n self.remote_classification = \"deleted\"\n else:\n self.remote_classification = \"missing\"\n c_pair = (self.local_classification, self.remote_classification)\n self.operation = operation_map.get(c_pair)\n if not self.operation:\n raise RuntimeError(\n \"Undefined operation for pair classification {}\".format(c_pair)\n )\n if PRINT_CLASSIFICATIONS:\n write(\"classify {}\".format(self))\n assert self.operation in PAIR_OPERATIONS\n return self.operation", - "docstring": "Classify entry pair." - }, - { - "code": "def confirm_not_old_proxy(loaded_cert):\n last_cn = loaded_cert.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)[\n -1\n ]\n if last_cn.value in (\"proxy\", \"limited proxy\"):\n raise ValueError(\n \"Proxy certificate is in an outdated format \" \"that is no longer supported\"\n )", - "docstring": "Given a cryptography object for the issuer cert, checks if the cert is\n an \"old proxy\" and raise an error if so." - }, - { - "code": "def _load_neighbors(self) -> None:\r\n if not self.are_neighbors_cached:\r\n self._load_neighbors_from_external_source()\r\n db: GraphDatabaseInterface = self._graph.database\r\n db_node: DBNode = db.Node.find_by_name(self.name)\r\n db_node.are_neighbors_cached = True\r\n db.session.commit()\r\n self.are_neighbors_cached = True\r\n if not self._are_neighbors_loaded:\r\n self._load_neighbors_from_database()", - "docstring": "Loads all neighbors of the node from the local database and\r\n from the external data source if needed." - }, - { - "code": "def start(self):\n logger.info('Starting task {0}'.format(self.name))\n self.reset()\n if self.hostname:\n host = self.parent_job.parent.get_host(self.hostname)\n if host:\n self.remote_ssh(host)\n else:\n self.remote_failure = True\n else:\n self.process = subprocess.Popen(self.command,\n shell=True,\n env=os.environ.copy(),\n stdout=self.stdout_file,\n stderr=self.stderr_file)\n self.started_at = datetime.utcnow()\n self._start_check_timer()", - "docstring": "Begin execution of this task." - }, - { - "code": "def is_readable(path):\n if os.access(path, os.F_OK) and os.access(path, os.R_OK):\n return True\n return False", - "docstring": "Check if a given path is readable by the current user.\n\n :param path: The path to check\n :returns: True or False" - }, - { - "code": "def copy(source, dest):\n logger.info(\"copying {} -> {}\".format(source, dest))\n return client.copy(source, dest)", - "docstring": "use the vospace service to get a file.\n\n @param source:\n @param dest:\n @return:" - }, - { - "code": "def update_models(new_obj, current_table, tables, relations):\n _update_check_inputs(current_table, tables, relations)\n _check_no_current_table(new_obj, current_table)\n if isinstance(new_obj, Table):\n tables_names = [t.name for t in tables]\n _check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException)\n return new_obj, tables + [new_obj], relations\n if isinstance(new_obj, Relation):\n tables_names = [t.name for t in tables]\n _check_colname_in_lst(new_obj.right_col, tables_names)\n _check_colname_in_lst(new_obj.left_col, tables_names)\n return current_table, tables, relations + [new_obj]\n if isinstance(new_obj, Column):\n columns_names = [c.name for c in current_table.columns]\n _check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException)\n current_table.columns.append(new_obj)\n return current_table, tables, relations\n msg = \"new_obj cannot be of type {}\"\n raise ValueError(msg.format(new_obj.__class__.__name__))", - "docstring": "Update the state of the parsing." - }, - { - "code": "def shutdown(opts):\n log.debug('dummy proxy shutdown() called...')\n DETAILS = _load_state()\n if 'filename' in DETAILS:\n os.unlink(DETAILS['filename'])", - "docstring": "For this proxy shutdown is a no-op" - }, - { - "code": "def repository_name(doc):\n for repository_id, repo in doc.get('repositories', {}).items():\n repo['id'] = repository_id\n repo['organisation_id'] = doc['_id']\n name = repo.get('name', None)\n if name:\n yield name, repository_id", - "docstring": "View for checking repository name is unique" - }, - { - "code": "def update(self, slug):\n post_data = self.get_post_data()\n post_data['user_name'] = self.userinfo.user_name\n pageinfo = MWiki.get_by_uid(slug)\n cnt_old = tornado.escape.xhtml_unescape(pageinfo.cnt_md).strip()\n cnt_new = post_data['cnt_md'].strip()\n if cnt_old == cnt_new:\n pass\n else:\n MWikiHist.create_wiki_history(MWiki.get_by_uid(slug))\n MWiki.update(slug, post_data)\n tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)\n self.redirect('/page/{0}'.format(post_data['slug']))", - "docstring": "Update the page." - }, - { - "code": "def get_injuries(self, season, week):\n result = self._method_call(\"Injuries/{season}/{week}\", \"stats\", season=season, week=week)\n return result", - "docstring": "Injuries by week" - }, - { - "code": "def get_composition_ids_by_repositories(self, repository_ids):\n id_list = []\n for composition in self.get_compositions_by_repositories(repository_ids):\n id_list.append(composition.get_id())\n return IdList(id_list)", - "docstring": "Gets the list of ``Composition`` ``Ids`` corresponding to a list of ``Repository`` objects.\n\n arg: repository_ids (osid.id.IdList): list of repository\n ``Ids``\n return: (osid.id.IdList) - list of composition ``Ids``\n raise: NullArgument - ``repository_ids`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def calculateLocalElasticity(self, bp, frames=None, helical=False, unit='kT'):\n r\n acceptedUnit = ['kT', 'kJ/mol', 'kcal/mol']\n if unit not in acceptedUnit:\n raise ValueError(\" {0} not accepted. Use any of the following: {1} \".format(unit, acceptedUnit))\n frames = self._validateFrames(frames)\n name = '{0}-{1}-{2}-{3}-local-{4}'.format(bp[0], bp[1], frames[0], frames[1], int(helical))\n if bp[1]-bp[0]+1 > 4:\n raise ValueError(\"Selected span {0} is larger than 4, and therefore, not recommended for local elasticity\".format(bp[1]-bp[0]+1))\n if name not in self.esMatrix:\n time, array = self.extractLocalParameters(self.dna, bp, helical=helical, frames=frames)\n mean = np.mean(array, axis = 1)\n esMatrix = self.getElasticMatrix(array)\n self.esMatrix[name] = esMatrix\n self.minimumPoint[name] = mean\n else:\n esMatrix = self.esMatrix[name]\n mean = self.minimumPoint[name]\n if unit == 'kJ/mol':\n result = 2.4946938107879997 * esMatrix\n elif unit == 'kcal/mol':\n result = 0.5962461306854684 * esMatrix\n else:\n result = esMatrix\n return mean, result", - "docstring": "r\"\"\"Calculate local elastic matrix or stiffness matrix for local DNA segment\n\n .. note:: Here local DNA segment referred to less than 5 base-pair long.\n\n In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),\n Tilt (:math:`\\tau`), Roll (:math:`\\rho`) and Twist (:math:`\\omega`), following elastic matrix is calculated.\n\n .. math::\n\n \\mathbf{K}_{base-step} = \\begin{bmatrix}\n K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\\tau} & K_{Dx,\\rho} & K_{Dx,\\omega} \\\\\n K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\\tau} & K_{Dy,\\rho} & K_{Dy,\\omega} \\\\\n K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\\tau} & K_{Dz,\\rho} & K_{Dz,\\omega} \\\\\n K_{Dx,\\tau} & K_{Dy,\\tau} & K_{Dz,\\tau} & K_{\\tau} & K_{\\tau, \\rho} & K_{\\tau,\\omega} \\\\\n K_{Dx,\\rho} & K_{Dy,\\rho} & K_{Dz,\\rho} & K_{\\tau, \\rho} & K_{\\rho} & K_{\\rho,\\omega} \\\\\n K_{Dx,\\omega} & K_{Dy,\\omega} & K_{Dz,\\omega} & K_{\\tau, \\omega} & K_{\\rho, \\omega} & K_{\\omega} \\\\\n \\end{bmatrix}\n\n\n In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),\n inclination (:math:`\\eta`), tip (:math:`\\theta`) and twist (:math:`\\Omega`), following elastic matrix is calculated.\n\n .. math::\n\n \\mathbf{K}_{helical-base-step} = \\begin{bmatrix}\n K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\\eta} & K_{dx,\\theta} & K_{dx,\\Omega} \\\\\n K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\\eta} & K_{dy,\\theta} & K_{dy,\\Omega} \\\\\n K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\\eta} & K_{h,\\theta} & K_{h,\\Omega} \\\\\n K_{dx,\\eta} & K_{dy,\\eta} & K_{h,\\eta} & K_{\\eta} & K_{\\eta, \\theta} & K_{\\eta,\\Omega} \\\\\n K_{dx,\\theta} & K_{dy,\\theta} & K_{h,\\theta} & K_{\\eta, \\theta} & K_{\\theta} & K_{\\theta,\\Omega} \\\\\n K_{dx,\\Omega} & K_{dy,\\Omega} & K_{h,\\Omega} & K_{\\eta, \\Omega} & K_{\\theta, \\Omega} & K_{\\Omega} \\\\\n \\end{bmatrix}\n\n\n Parameters\n ----------\n bp : list\n List of two base-steps forming the DNA segment.\n For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.\n\n frames : list\n List of two trajectory frames between which parameters will be extracted. It can be used to select portions\n of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be\n considered.\n\n helical : bool\n If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,\n by default, elastic matrix for **base-step** parameters are calculated.\n\n unit : str\n Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.\n\n Return\n ------\n mean : numpy.ndarray\n Value of parameters at which energy is zero. Minimum point on energy landscape.\n\n if ``helical=False``\n\n .. math::\n \\begin{bmatrix}\n Dx_0 & Dy_0 & Dz_0 & \\tau_0 & \\rho_0 & \\omega_0\n \\end{bmatrix}\n\n if ``helical=True``\n\n .. math::\n \\begin{bmatrix}\n dx_0 & dy_0 & h_0 & \\eta_0 & \\theta_0 & \\Omega_0\n \\end{bmatrix}\n\n result : numpy.ndarray\n Elastic matrix." - }, - { - "code": "def manufacturer(self):\n if self._manufacturer is None:\n self._manufacturer = util.get_string(self, self.iManufacturer)\n return self._manufacturer", - "docstring": "Return the USB device's manufacturer string descriptor.\n\n This property will cause some USB traffic the first time it is accessed\n and cache the resulting value for future use." - }, - { - "code": "def load_data_and_build(self, filename, delimiter=\",\"):\n data = np.genfromtxt(\n filename, dtype=float, delimiter=delimiter, names=True\n )\n data = data.view(np.float64).reshape(data.shape + (-1,))\n X = data[:, 0:-1]\n Y = data[:, -1]\n self.build(X=X, Y=Y)", - "docstring": "Convenience function for directly working with a data file.\n This opens a file and reads the data into an array, sets the\n data as an nparray and list of dimnames\n @ In, filename, string representing the data file" - }, - { - "code": "def _worst_case_load(self, worst_case_scale_factors,\n peakload_consumption_ratio, modes):\n sectors = ['residential', 'retail', 'industrial', 'agricultural']\n lv_power_scaling = np.array(\n [worst_case_scale_factors['lv_{}_load'.format(mode)]\n for mode in modes])\n mv_power_scaling = np.array(\n [worst_case_scale_factors['mv_{}_load'.format(mode)]\n for mode in modes])\n lv = {(sector, 'lv'): peakload_consumption_ratio[sector] *\n lv_power_scaling\n for sector in sectors}\n mv = {(sector, 'mv'): peakload_consumption_ratio[sector] *\n mv_power_scaling\n for sector in sectors}\n self.timeseries.load = pd.DataFrame({**lv, **mv},\n index=self.timeseries.timeindex)", - "docstring": "Define worst case load time series for each sector.\n\n Parameters\n ----------\n worst_case_scale_factors : dict\n Scale factors defined in config file 'config_timeseries.cfg'.\n Scale factors describe actual power to nominal power ratio of in\n worst-case scenarios.\n peakload_consumption_ratio : dict\n Ratios of peak load to annual consumption per sector, defined in\n config file 'config_timeseries.cfg'\n modes : list\n List with worst-cases to generate time series for. Can be\n 'feedin_case', 'load_case' or both." - }, - { - "code": "def parseInline(self, src):\n self.cssBuilder.beginInline()\n try:\n try:\n src, properties = self._parseDeclarationGroup(src.strip(), braces=False)\n except self.ParseError as err:\n err.setFullCSSSource(src, inline=True)\n raise\n result = self.cssBuilder.inline(properties)\n finally:\n self.cssBuilder.endInline()\n return result", - "docstring": "Parses CSS inline source string using the current cssBuilder.\n Use to parse a tag's 'sytle'-like attribute." - }, - { - "code": "def find_minimum_spanning_forest(graph):\n msf = []\n if graph.num_nodes() == 0:\n return msf\n if graph.num_edges() == 0:\n return msf\n connected_components = get_connected_components_as_subgraphs(graph)\n for subgraph in connected_components:\n edge_list = kruskal_mst(subgraph)\n msf.append(edge_list)\n return msf", - "docstring": "Calculates the minimum spanning forest of a disconnected graph.\n Returns a list of lists, each containing the edges that define that tree.\n Returns an empty list for an empty graph." - }, - { - "code": "def map_representer(dumper, value):\n value = ODict(value.items())\n if len(value.keys()) == 1:\n key = list(value.keys())[0]\n if key in CONVERTED_SUFFIXES:\n return fn_representer(dumper, key, value[key])\n if key.startswith(FN_PREFIX):\n return fn_representer(dumper, key[4:], value[key])\n return dumper.represent_mapping(TAG_MAP, value, flow_style=False)", - "docstring": "Deal with !Ref style function format and OrderedDict" - }, - { - "code": "def find_method_params(self):\n req = self.request\n args = req.controller_info[\"method_args\"]\n kwargs = req.controller_info[\"method_kwargs\"]\n return args, kwargs", - "docstring": "Return the method params\n\n :returns: tuple (args, kwargs) that will be passed as *args, **kwargs" - }, - { - "code": "def retrieve_products(self, reviewer):\n if not isinstance(reviewer, self._reviewer_cls):\n raise TypeError(\n \"Type of given reviewer isn't acceptable:\", reviewer,\n \", expected:\", self._reviewer_cls)\n return list(self.graph.successors(reviewer))", - "docstring": "Retrieve products reviewed by a given reviewer.\n\n Args:\n reviewer: A reviewer.\n\n Returns:\n A list of products which the reviewer reviews.\n\n Raises:\n TypeError: when given reviewer isn't instance of specified reviewer\n class when this graph is constructed." - }, - { - "code": "def get_row_list(self, row_idx):\n try:\n row = self._rows[row_idx]\n except TypeError:\n row = self._rows[self._row_name_idx[row_idx]]\n if isinstance(row, list):\n extra = [ self._default_value ] * (len(self._column_name_list) - len(row))\n return row + extra\n else:\n if row_idx not in self._row_memo:\n self._row_memo[row_idx] = [ row[k] if k in row else self._default_value for k in self._column_name_list ]\n return self._row_memo[row_idx]", - "docstring": "get a feature vector for the nth row\n\n :param row_idx: which row\n :return: a list of feature values, ordered by column_names" - }, - { - "code": "def qft(qubits: List[int]) -> Program:\n p = Program().inst(_core_qft(qubits, 1))\n return p + bit_reversal(qubits)", - "docstring": "Generate a program to compute the quantum Fourier transform on a set of qubits.\n\n :param qubits: A list of qubit indexes.\n :return: A Quil program to compute the Fourier transform of the qubits." - }, - { - "code": "def is_same_vectors(self, vec_set1, vec_set2):\n if (np.absolute(rel_strain(vec_set1[0], vec_set2[0])) >\n self.max_length_tol):\n return False\n elif (np.absolute(rel_strain(vec_set1[1], vec_set2[1])) >\n self.max_length_tol):\n return False\n elif (np.absolute(rel_angle(vec_set1, vec_set2)) >\n self.max_angle_tol):\n return False\n else:\n return True", - "docstring": "Determine if two sets of vectors are the same within length and angle\n tolerances\n\n Args:\n vec_set1(array[array]): an array of two vectors\n vec_set2(array[array]): second array of two vectors" - }, - { - "code": "def get_predicates(self, class_, controller=None):\n if class_ not in self._predicates:\n if controller is None:\n controller = self._find_controller(class_)\n else:\n classes = self.request_controllers.get(controller, None)\n if classes is None:\n raise ValueError(\n 'Unknown request controller {!r}'.format(controller))\n if class_ not in classes:\n raise ValueError(\n 'Unknown request class {!r}'.format(class_))\n predicates_data = self._download_predicate_data(class_, controller)\n predicate_objects = self._parse_predicates_data(predicates_data)\n self._predicates[class_] = predicate_objects\n return self._predicates[class_]", - "docstring": "Get full predicate information for given request class, and cache\n for subsequent calls." - }, - { - "code": "def readInfo(stream):\n try:\n for line in stream:\n (toUUID, fromUUID, size) = line.split()\n try:\n size = int(size)\n except Exception:\n logger.warning(\"Bad size: %s\", size)\n continue\n logger.debug(\"diff info: %s %s %d\", toUUID, fromUUID, size)\n Diff.theKnownSizes[toUUID][fromUUID] = size\n except Exception as error:\n logger.warn(\"Can't read .bs info file (%s)\", error)", - "docstring": "Read previously-written information about diffs." - }, - { - "code": "def replace_between_tags(text, repl_, start_tag, end_tag=None):\n r\n new_lines = []\n editing = False\n lines = text.split('\\n')\n for line in lines:\n if not editing:\n new_lines.append(line)\n if line.strip().startswith(start_tag):\n new_lines.append(repl_)\n editing = True\n if end_tag is not None and line.strip().startswith(end_tag):\n editing = False\n new_lines.append(line)\n new_text = '\\n'.join(new_lines)\n return new_text", - "docstring": "r\"\"\"\n Replaces text between sentinal lines in a block of text.\n\n Args:\n text (str):\n repl_ (str):\n start_tag (str):\n end_tag (str): (default=None)\n\n Returns:\n str: new_text\n\n CommandLine:\n python -m utool.util_str --exec-replace_between_tags\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_str import * # NOQA\n >>> text = ut.codeblock(\n '''\n class:\n # \n bar\n # \n baz\n ''')\n >>> repl_ = 'spam'\n >>> start_tag = '# '\n >>> end_tag = '# '\n >>> new_text = replace_between_tags(text, repl_, start_tag, end_tag)\n >>> result = ('new_text =\\n%s' % (str(new_text),))\n >>> print(result)\n new_text =\n class:\n # \n spam\n # \n baz" - }, - { - "code": "def clean(self):\n super().clean()\n if self.voter is None and self.anonymous_key is None:\n raise ValidationError(_('A user id or an anonymous key must be used.'))\n if self.voter and self.anonymous_key:\n raise ValidationError(_('A user id or an anonymous key must be used, but not both.'))", - "docstring": "Validates the considered instance." - }, - { - "code": "def cancelPendingResult( self, jobid ):\n if jobid in self._pending.keys():\n k = self._pending[jobid]\n del self._pending[jobid]\n if k in self._results.keys():\n rs = self._results[k]\n j = rs.index(jobid)\n del rs[j]\n else:\n raise RuntimeError('Internal structure error for {j} -> {ps}'.format(j = jobid,\n ps = k))\n else:\n raise KeyError('No pending result with id {j}'.format(j = jobid))", - "docstring": "Cancel a particular pending result. Note that this only affects the\n notebook's record, not any job running in a lab.\n\n :param jobid: job id for pending result" - }, - { - "code": "def star_assign_item_check(self, original, loc, tokens):\n return self.check_py(\"3\", \"starred assignment (add 'match' to front to produce universal code)\", original, loc, tokens)", - "docstring": "Check for Python 3 starred assignment." - }, - { - "code": "def search_features(\n self, feature_set_id=None, parent_id=\"\", reference_name=\"\",\n start=0, end=0, feature_types=[], name=\"\", gene_symbol=\"\"):\n request = protocol.SearchFeaturesRequest()\n request.feature_set_id = feature_set_id\n request.parent_id = parent_id\n request.reference_name = reference_name\n request.name = name\n request.gene_symbol = gene_symbol\n request.start = start\n request.end = end\n request.feature_types.extend(feature_types)\n request.page_size = pb.int(self._page_size)\n return self._run_search_request(\n request, \"features\",\n protocol.SearchFeaturesResponse)", - "docstring": "Returns the result of running a search_features method\n on a request with the passed-in parameters.\n\n :param str feature_set_id: ID of the feature Set being searched\n :param str parent_id: ID (optional) of the parent feature\n :param str reference_name: name of the reference to search\n (ex: \"chr1\")\n :param int start: search start position on reference\n :param int end: end position on reference\n :param feature_types: array of terms to limit search by (ex: \"gene\")\n :param str name: only return features with this name\n :param str gene_symbol: only return features on this gene\n :return: an iterator over Features as returned in the\n SearchFeaturesResponse object." - }, - { - "code": "def parse_block(lines, header=False):\n block_lines = []\n while lines and lines[0] and (not header or lines[0].startswith('\n block_lines.append(lines.pop(0))\n return block_lines", - "docstring": "Parse and return a single block, popping off the start of `lines`.\n\n If parsing a header block, we stop after we reach a line that is not a\n comment. Otherwise, we stop after reaching an empty line.\n\n :param lines: list of lines\n :param header: whether we are parsing a header block\n :return: list of lines that form the single block" - }, - { - "code": "def extract_feed(\n inpath: str, outpath: str, view: View, config: nx.DiGraph = None\n) -> str:\n config = default_config() if config is None else config\n config = remove_node_attributes(config, \"converters\")\n feed = load_feed(inpath, view, config)\n return write_feed_dangerously(feed, outpath)", - "docstring": "Extract a subset of a GTFS zip into a new file" - }, - { - "code": "def job_started(self, job, queue):\n job.hmset(start=str(datetime.utcnow()), status=STATUSES.RUNNING)\n job.tries.hincrby(1)\n self.log(self.job_started_message(job, queue))\n if hasattr(job, 'on_started'):\n job.on_started(queue)", - "docstring": "Called just before the execution of the job" - }, - { - "code": "def alias(self):\n if self._alias is None:\n if self.name in self.aliases_fix:\n self._alias = self.aliases_fix[self.name]\n else:\n self._alias = self.name.lower()\\\n .replace(' ', '-')\\\n .replace('(', '')\\\n .replace(')', '')\n return self._alias", - "docstring": "If the _alias cache is None, just build the alias from the item\n name." - }, - { - "code": "def has_child_families(self, family_id):\n if self._catalog_session is not None:\n return self._catalog_session.has_child_catalogs(catalog_id=family_id)\n return self._hierarchy_session.has_children(id_=family_id)", - "docstring": "Tests if a family has any children.\n\n arg: family_id (osid.id.Id): the ``Id`` of a family\n return: (boolean) - ``true`` if the ``family_id`` has children,\n ``false`` otherwise\n raise: NotFound - ``family_id`` is not found\n raise: NullArgument - ``family_id`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def msg_(self, label, *msg):\n txt = self._unpack_msg(*msg)\n return \"[\" + label + \"] \" + txt", - "docstring": "Returns a message with a label" - }, - { - "code": "def make_multisig_segwit_info( m, pks ):\n pubs = []\n privkeys = []\n for pk in pks:\n priv = BitcoinPrivateKey(pk, compressed=True)\n priv_hex = priv.to_hex()\n pub_hex = priv.public_key().to_hex()\n privkeys.append(priv_hex)\n pubs.append(keylib.key_formatting.compress(pub_hex))\n script = None\n if len(pubs) == 1:\n if m != 1:\n raise ValueError(\"invalid m: len(pubkeys) == 1\")\n key_hash = hashing.bin_hash160(pubs[0].decode('hex')).encode('hex')\n script = '160014' + key_hash\n addr = btc_make_p2sh_address(script[2:])\n else:\n script = make_multisig_script(pubs, m)\n addr = make_multisig_segwit_address_from_witness_script(script)\n return {\n 'address': addr,\n 'redeem_script': script,\n 'private_keys': privkeys,\n 'segwit': True,\n 'm': m\n }", - "docstring": "Make either a p2sh-p2wpkh or p2sh-p2wsh\n redeem script and p2sh address.\n\n Return {'address': p2sh address, 'redeem_script': **the witness script**, 'private_keys': privkeys, 'segwit': True}\n * privkeys and redeem_script will be hex-encoded" - }, - { - "code": "def list_datasets(self):\n def _row_gen(attributes):\n for attr in attributes.values():\n yield (attr.name, attr.display_name)\n return pd.DataFrame.from_records(\n _row_gen(self.datasets),\n columns=['name', 'display_name'])", - "docstring": "Lists available datasets in a readable DataFrame format.\n\n Returns:\n pd.DataFrame: Frame listing available datasets." - }, - { - "code": "def local_services(self):\n if not self._loop.inside_loop():\n self._state_lock.acquire()\n try:\n return sorted([(index, name) for index, name in self._name_map.items()], key=lambda element: element[0])\n finally:\n if not self._loop.inside_loop():\n self._state_lock.release()", - "docstring": "Get a list of id, name pairs for all of the known synced services.\n\n This method is safe to call outside of the background event loop\n without any race condition. Internally it uses a thread-safe mutex to\n protect the local copies of supervisor data and ensure that it cannot\n change while this method is iterating over it.\n\n Returns:\n list (id, name): A list of tuples with id and service name sorted by id\n from low to high" - }, - { - "code": "def _pad_former_ports(self, port_handler):\n if not port_handler.need_padding():\n return\n for port_idx in range(1, port_handler.port_idx):\n pad_handler = port_handler.__class__(\n port_handler.slot_type,\n port_handler.card_type,\n port_handler.slot_idx,\n port_handler.card_idx,\n port_idx)\n if not self._find_port(pad_handler):\n self._add_port(pad_handler,\n pad_handler.create_lan_port())", - "docstring": "Create ports with former port index.\n\n :param port_handler: Port information to be registered.\n\n Depending on slot type and card type, it is necessary to register\n LAN ports with former index to VIOM table." - }, - { - "code": "def select(self, cols, mode='list'):\n if isinstance(cols, stringtypes):\n cols = _split_cols(cols)\n if not cols:\n cols = [f.name for f in self.fields]\n return select_rows(cols, self, mode=mode)", - "docstring": "Select columns from each row in the table.\n\n See :func:`select_rows` for a description of how to use the\n *mode* parameter.\n\n Args:\n cols: an iterable of Field (column) names\n mode: how to return the data" - }, - { - "code": "def neclusters(l, K):\n for c in clusters(l, K):\n if all(x for x in c):\n yield c", - "docstring": "Partition list ``l`` in ``K`` partitions, without empty parts.\n\n >>> l = [0, 1, 2]\n >>> list(neclusters(l, 2))\n [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]]\n >>> list(neclusters(l, 1))\n [[[0, 1, 2]]]" - }, - { - "code": "def make_metatiles(size, tiles, date_time=None):\n groups = defaultdict(list)\n for tile in tiles:\n key = tile['layer']\n groups[key].append(tile)\n metatiles = []\n for group in groups.itervalues():\n parent = _parent_tile(t['coord'] for t in group)\n metatiles.extend(make_multi_metatile(parent, group, date_time))\n return metatiles", - "docstring": "Group by layers, and make metatiles out of all the tiles which share those\n properties relative to the \"top level\" tile which is parent of them all.\n Provide a 6-tuple date_time to set the timestamp on each tile within the\n metatile, or leave it as None to use the current time." - }, - { - "code": "def write_content(self, content, destination):\n directory = os.path.dirname(destination)\n if directory and not os.path.exists(directory):\n os.makedirs(directory)\n with io.open(destination, 'w', encoding='utf-8') as f:\n f.write(content)\n return destination", - "docstring": "Write given content to destination path.\n\n It will create needed directory structure first if it contain some\n directories that does not allready exists.\n\n Args:\n content (str): Content to write to target file.\n destination (str): Destination path for target file.\n\n Returns:\n str: Path where target file has been written." - }, - { - "code": "def set_polling_override(self, override):\n polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)\n if polling_override is None:\n logger.warn('Failed to find handle for device name')\n return False\n if self.dongle._write_attribute(self.conn_handle, polling_override, struct.pack('B', override)):\n return True\n return False", - "docstring": "Set the sensor polling timer override value in milliseconds. \n\n Due to the time it takes to poll all the sensors on up to 5 IMUs, it's not \n possible for the SK8 firmware to define a single fixed rate for reading \n new samples without it being artificially low for most configurations. \n \n Instead the firmware tries to define a sensible default value for each \n combination of IMUs and sensors that can be enabled (any combination of \n 1-5 IMUs and 1-3 sensors on each IMU). In most cases this should work well,\n but for example if you have multiple SK8s connected through the same dongle\n and have multiple IMUs enabled on each, you may find packets start to be\n dropped quite frequently. \n\n To mitigate this, you can adjust the period of the timer used by the firmware\n to poll for new sensor data (and send data packets to the host device). The\n value should be in integer milliseconds, and have a minimum value of 20. Values\n below 20 will be treated as a request to disable the override and return to the\n default polling period. \n\n The method can be called before or after streaming is activated, and will take\n effect immediately. \n\n NOTE1: the value is stored in RAM and will not persist across reboots, although\n it should persist for multiple connections.\n NOTE2: once set, the override applies to ALL sensor configurations, so for \n example if you set it while using 5 IMUs on 2 SK8s, then switch to using \n 1 IMU on each SK8, you will probably want to disable it again as the \n latter configuration should work fine with the default period.\n\n Args:\n override (int): polling timer override period in milliseconds. Values\n below 20 are treated as 0, and have the effect of disabling the\n override in favour of the default periods. \n\n Returns:\n True on success, False on error." - }, - { - "code": "def cutoff(poly, *args):\n if len(args) == 1:\n low, high = 0, args[0]\n else:\n low, high = args[:2]\n core_old = poly.A\n core_new = {}\n for key in poly.keys:\n if low <= numpy.sum(key) < high:\n core_new[key] = core_old[key]\n return Poly(core_new, poly.dim, poly.shape, poly.dtype)", - "docstring": "Remove polynomial components with order outside a given interval.\n\n Args:\n poly (Poly):\n Input data.\n low (int):\n The lowest order that is allowed to be included. Defaults to 0.\n high (int):\n The upper threshold for the cutoff range.\n\n Returns:\n (Poly):\n The same as `P`, except that all terms that have a order not within\n the bound `low <= order < high` are removed.\n\n Examples:\n >>> poly = chaospy.prange(4, 1) + chaospy.prange(4, 2)[::-1]\n >>> print(poly) # doctest: +SKIP\n [q1^3+1, q0+q1^2, q0^2+q1, q0^3+1]\n >>> print(chaospy.cutoff(poly, 3)) # doctest: +SKIP\n [1, q0+q1^2, q0^2+q1, 1]\n >>> print(chaospy.cutoff(poly, 1, 3)) # doctest: +SKIP\n [0, q0+q1^2, q0^2+q1, 0]" - }, - { - "code": "def delete_model(self, model):\n if SessionActivity.is_current(sid_s=model.sid_s):\n flash('You could not remove your current session', 'error')\n return\n delete_session(sid_s=model.sid_s)\n db.session.commit()", - "docstring": "Delete a specific session." - }, - { - "code": "def get_query_errors(self):\n if self._query_errors: return self._query_errors\n v = []\n for i in range(len(self._query_hpas)):\n v.append(self.get_query_error(i))\n return v", - "docstring": "Return a list of base-wise error observations for the query \n\n :returns: list of base-wise errors\n :rtype: list of HPA groups" - }, - { - "code": "def requestConnection(self, wanInterfaceId=1, timeout=1):\n namespace = Wan.getServiceType(\"requestConnection\") + str(wanInterfaceId)\n uri = self.getControlURL(namespace)\n self.execute(uri, namespace, \"RequestConnection\", timeout=timeout)", - "docstring": "Request the connection to be established\n\n :param int wanInterfaceId: the id of the WAN interface\n :param float timeout: the timeout to wait for the action to be executed" - }, - { - "code": "def upload():\n env=os.environ.copy()\n print(env)\n env['PYTHONPATH']= \"./pynt\"\n print(env)\n pipe=subprocess.Popen(['python', 'setup.py', 'sdist','upload'], env=env)\n pipe.wait()", - "docstring": "Uploads to PyPI" - }, - { - "code": "def features(self):\n if 'Features' in self._signature.subpackets:\n return next(iter(self._signature.subpackets['Features'])).flags\n return set()", - "docstring": "A ``set`` of implementation features specified in this signature, if any. Otherwise, an empty ``set``." - }, - { - "code": "def find_first_tag(tags, entity_type, after_index=-1):\n for tag in tags:\n for entity in tag.get('entities'):\n for v, t in entity.get('data'):\n if t.lower() == entity_type.lower() and tag.get('start_token', 0) > after_index:\n return tag, v, entity.get('confidence')\n return None, None, None", - "docstring": "Searches tags for entity type after given index\n\n Args:\n tags(list): a list of tags with entity types to be compaired too entity_type\n entity_type(str): This is he entity type to be looking for in tags\n after_index(int): the start token must be greaterthan this.\n\n Returns:\n ( tag, v, confidence ):\n tag(str): is the tag that matched\n v(str): ? the word that matched?\n confidence(float): is a mesure of accuacy. 1 is full confidence and 0 is none." - }, - { - "code": "def get_objective(self, sampler):\n def objective(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy(circuit, sampler)\n return objective", - "docstring": "Get an objective function to be optimized." - }, - { - "code": "def cancel(self, **kwargs):\n path = '%s/%s/cancel' % (self.manager.path, self.get_id())\n self.manager.gitlab.http_post(path)", - "docstring": "Cancel the job.\n\n Args:\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabJobCancelError: If the job could not be canceled" - }, - { - "code": "def activationFunctionASIG(self, x):\n def act(v):\n if v < -15.0: return 0.0\n elif v > 15.0: return 1.0\n else: return 1.0 / (1.0 + Numeric.exp(-v))\n return Numeric.array(list(map(act, x)), 'f')", - "docstring": "Determine the activation of a node based on that nodes net input." - }, - { - "code": "def derive_from(cls, section, name=None):\n new_section = deepcopy(section)\n if name:\n new_section.name = name\n return new_section", - "docstring": "Creates a new section based on the given.\n\n :param Section section: Section to derive from,\n\n :param str|unicode name: New section name.\n\n :rtype: Section" - }, - { - "code": "def new_run(self):\n self.current_run += 1\n self.runs.append(RunData(self.current_run + 1))", - "docstring": "Creates a new RunData object and increments pointers" - }, - { - "code": "def _ExtractRequestSummaryFields(self, request, error=None):\n headers = request.headers\n summary_fields = {\n 'server': request.get_full_url(),\n 'contentRange': headers['Content-range'],\n 'contentLength': headers['Content-length']\n }\n if error:\n summary_fields['isError'] = True\n summary_fields['errorMessage'] = error.reason\n else:\n summary_fields['isError'] = False\n return summary_fields", - "docstring": "Extract fields used in the summary logs.\n\n Args:\n request: a urllib2.Request instance configured to make the request.\n [optional]\n error: a urllib2.HttpError instance used to retrieve error details.\n\n Returns:\n A dict containing the fields to be output in the summary logs." - }, - { - "code": "def _init_sub_groups(self, parent):\n if self._sub_groups:\n for sub_group in self._sub_groups:\n for component in split_path_components(sub_group):\n fp = os.path.join(parent.full_path, component)\n if os.path.exists(fp):\n node = Node(name=component, parent=parent)\n parent.children.append(node)\n else:\n node = parent.create_cgroup(component)\n parent = node\n self._init_children(node)\n else:\n self._init_children(parent)", - "docstring": "Initialise sub-groups, and create any that do not already exist." - }, - { - "code": "def workaround_lowering_pass(ir_blocks, query_metadata_table):\n new_ir_blocks = []\n for block in ir_blocks:\n if isinstance(block, Filter):\n new_block = _process_filter_block(query_metadata_table, block)\n else:\n new_block = block\n new_ir_blocks.append(new_block)\n return new_ir_blocks", - "docstring": "Extract locations from TernaryConditionals and rewrite their Filter blocks as necessary." - }, - { - "code": "def handle_exception (self):\n etype, evalue = sys.exc_info()[:2]\n log.debug(LOG_CHECK, \"Error in %s: %s %s\", self.url, etype, evalue, exception=True)\n if (etype in ExcNoCacheList) or \\\n (etype == socket.error and evalue.args[0]==errno.EBADF) or \\\n not evalue:\n self.caching = False\n errmsg = unicode(etype.__name__)\n uvalue = strformat.unicode_safe(evalue)\n if uvalue:\n errmsg += u\": %s\" % uvalue\n return strformat.limit(errmsg, length=240)", - "docstring": "An exception occurred. Log it and set the cache flag." - }, - { - "code": "def _sdkmanager(self, *args, **kwargs):\n kwargs['cwd'] = kwargs.get('cwd', self.android_sdk_dir)\n command = self.sdkmanager_path + ' ' + ' '.join(args)\n return_child = kwargs.pop('return_child', False)\n if return_child:\n return self.buildozer.cmd_expect(command, **kwargs)\n else:\n kwargs['get_stdout'] = kwargs.get('get_stdout', True)\n return self.buildozer.cmd(command, **kwargs)", - "docstring": "Call the sdkmanager in our Android SDK with the given arguments." - }, - { - "code": "def extract_name_from_path(path):\n base_path, query_string = path.split('?')\n infos = base_path.strip('/').split('/')[2:]\n if len(infos) > 1:\n name = '{category} / {name}'.format(\n category=infos[0].title(),\n name=infos[1].replace('-', ' ').title()\n )\n else:\n name = '{category}'.format(category=infos[0].title())\n return safe_unicode(name)", - "docstring": "Return a readable name from a URL path.\n\n Useful to log requests on Piwik with categories tree structure.\n See: http://piwik.org/faq/how-to/#faq_62" - }, - { - "code": "def enumeration(values, converter=str, default=''):\n values = tuple(converter(value) for value in values)\n if not values:\n return default\n if len(values) == 1:\n return values[0]\n if len(values) == 2:\n return ' and '.join(values)\n return ', and '.join((', '.join(values[:-1]), values[-1]))", - "docstring": "Return an enumeration string based on the given values.\n\n The following four examples show the standard output of function\n |enumeration|:\n\n >>> from hydpy.core.objecttools import enumeration\n >>> enumeration(('text', 3, []))\n 'text, 3, and []'\n >>> enumeration(('text', 3))\n 'text and 3'\n >>> enumeration(('text',))\n 'text'\n >>> enumeration(())\n ''\n\n All given objects are converted to strings by function |str|, as shown\n by the first two examples. This behaviour can be changed by another\n function expecting a single argument and returning a string:\n\n >>> from hydpy.core.objecttools import classname\n >>> enumeration(('text', 3, []), converter=classname)\n 'str, int, and list'\n\n Furthermore, you can define a default string that is returned\n in case an empty iterable is given:\n\n >>> enumeration((), default='nothing')\n 'nothing'" - }, - { - "code": "def set_exception(self, exception):\n if not self.is_done():\n raise TransferNotDoneError(\n 'set_exception can only be called once the transfer is '\n 'complete.')\n self._coordinator.set_exception(exception, override=True)", - "docstring": "Sets the exception on the future." - }, - { - "code": "def nlargest(n, mapping):\n try:\n it = mapping.iteritems()\n except AttributeError:\n it = iter(mapping.items())\n pq = minpq()\n try:\n for i in range(n):\n pq.additem(*next(it))\n except StopIteration:\n pass\n try:\n while it:\n pq.pushpopitem(*next(it))\n except StopIteration:\n pass\n out = list(pq.popkeys())\n out.reverse()\n return out", - "docstring": "Takes a mapping and returns the n keys associated with the largest values\n in descending order. If the mapping has fewer than n items, all its keys\n are returned.\n\n Equivalent to:\n ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))``\n\n Returns\n -------\n list of up to n keys from the mapping" - }, - { - "code": "def calculate_amr(cls, is_extended, from_id, to_id, rtr_only=False, rtr_too=True):\n return (((from_id ^ to_id) << 3) | (0x7 if rtr_too and not rtr_only else 0x3)) if is_extended else \\\n (((from_id ^ to_id) << 21) | (0x1FFFFF if rtr_too and not rtr_only else 0xFFFFF))", - "docstring": "Calculates AMR using CAN-ID range as parameter.\n\n :param bool is_extended: If True parameters from_id and to_id contains 29-bit CAN-ID.\n :param int from_id: First CAN-ID which should be received.\n :param int to_id: Last CAN-ID which should be received.\n :param bool rtr_only: If True only RTR-Messages should be received, and rtr_too will be ignored.\n :param bool rtr_too: If True CAN data frames and RTR-Messages should be received.\n :return: Value for AMR.\n :rtype: int" - }, - { - "code": "def status_get(self, unit):\n raw_status, return_code = unit.run(\n \"status-get --format=json --include-data\")\n if return_code != 0:\n return (\"unknown\", \"\")\n status = json.loads(raw_status)\n return (status[\"status\"], status[\"message\"])", - "docstring": "Return the current service status of this unit." - }, - { - "code": "def ReadMedia(self, media_link):\n default_headers = self.default_headers\n path = base.GetPathFromLink(media_link)\n media_id = base.GetResourceIdOrFullNameFromLink(media_link)\n attachment_id = base.GetAttachmentIdFromMediaId(media_id)\n headers = base.GetHeaders(self,\n default_headers,\n 'get',\n path,\n attachment_id,\n 'media',\n {})\n request = request_object._RequestObject('media', documents._OperationType.Read)\n result, self.last_response_headers = self.__Get(path,\n request,\n headers)\n return result", - "docstring": "Reads a media.\n\n When self.connection_policy.MediaReadMode ==\n documents.MediaReadMode.Streamed, returns a file-like stream object;\n otherwise, returns a str.\n\n :param str media_link:\n The link to the media.\n\n :return:\n The read Media.\n :rtype:\n str or file-like stream object" - }, - { - "code": "def map_ids(x, indices, map_fn):\n indices = tf.reshape(indices, [-1])\n t_i = tf.constant(0)\n t_batch_size = tf.reduce_max(indices) + 1\n ta_stack_out = tf.TensorArray(\n x.dtype,\n size=t_batch_size,\n )\n while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size)\n def body(t_i, ta_stack_out):\n current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i)))\n t_row = tf.gather_nd(x, indices=current_ids)\n t_row = tf.expand_dims(t_row, axis=0)\n t_row = map_fn(t_row)\n t_row = tf.squeeze(t_row, axis=0)\n ta_stack_out = ta_stack_out.write(t_i, t_row)\n return [tf.add(t_i, 1), ta_stack_out]\n _, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out])\n return ta_stack_out.concat()", - "docstring": "Apply a function to each coordinate ids of a multidimensional tensor.\n\n This allows to process each sequence of a batch independently. This is\n similar to tf.map_fn but with tensor where the batch dim has been flatten.\n\n Warning: The indices ids have to be contiguous and ordered in memory as the\n output vector for each of the ids are simply concatenated after being\n processed.\n Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed\n rows in the following order: [0,0,1,2,2,2]\n\n Args:\n x (Tensor): The tensor to be dispatched of shape [length,...]\n indices (Tensor): A int32 tensor of size [length, 1] containing the batch\n coordinate of x\n map_fn (fct): Function called for every ids of the original tensor. Take\n as input a tensor of same rank than x and from shape [length_id,...] with\n length_id <= length. Isn't called if length_id == 0\n\n Returns:\n a tensor of same shape as x, where each elements has been processed" - }, - { - "code": "def _format_pr(pr_):\n ret = {'id': pr_.get('id'),\n 'pr_number': pr_.get('number'),\n 'state': pr_.get('state'),\n 'title': pr_.get('title'),\n 'user': pr_.get('user').get('login'),\n 'html_url': pr_.get('html_url'),\n 'base_branch': pr_.get('base').get('ref')}\n return ret", - "docstring": "Helper function to format API return information into a more manageable\n and useful dictionary for pull request information.\n\n pr_\n The pull request to format." - }, - { - "code": "def _reset_offset(self, partition):\n timestamp = self._subscriptions.assignment[partition].reset_strategy\n if timestamp is OffsetResetStrategy.EARLIEST:\n strategy = 'earliest'\n elif timestamp is OffsetResetStrategy.LATEST:\n strategy = 'latest'\n else:\n raise NoOffsetForPartitionError(partition)\n log.debug(\"Resetting offset for partition %s to %s offset.\",\n partition, strategy)\n offsets = self._retrieve_offsets({partition: timestamp})\n if partition not in offsets:\n raise NoOffsetForPartitionError(partition)\n offset = offsets[partition][0]\n if self._subscriptions.is_assigned(partition):\n self._subscriptions.seek(partition, offset)", - "docstring": "Reset offsets for the given partition using the offset reset strategy.\n\n Arguments:\n partition (TopicPartition): the partition that needs reset offset\n\n Raises:\n NoOffsetForPartitionError: if no offset reset strategy is defined" - }, - { - "code": "def parse_magmoms(self, data, lattice=None):\n if lattice is None:\n raise Exception(\n 'Magmoms given in terms of crystal axes in magCIF spec.')\n try:\n magmoms = {\n data[\"_atom_site_moment_label\"][i]:\n np.array(\n [str2float(data[\"_atom_site_moment_crystalaxis_x\"][i]),\n str2float(data[\"_atom_site_moment_crystalaxis_y\"][i]),\n str2float(data[\"_atom_site_moment_crystalaxis_z\"][i])]\n )\n for i in range(len(data[\"_atom_site_moment_label\"]))\n }\n except (ValueError, KeyError):\n return None\n return magmoms", - "docstring": "Parse atomic magnetic moments from data dictionary" - }, - { - "code": "def get_session():\n if hasattr(g, 'session'):\n return g.session\n sess = create_session(bind=current_app.config['DATABASE_ENGINE'])\n try:\n g.session = sess\n except RuntimeError:\n pass\n return sess", - "docstring": "Gets a session. If there's no yet, creates one.\n\n :returns: a session" - }, - { - "code": "def create_parser_options(lazy_mfcollection_parsing: bool = False) -> Dict[str, Dict[str, Any]]:\n return {MultifileCollectionParser.__name__: {'lazy_parsing': lazy_mfcollection_parsing}}", - "docstring": "Utility method to create a default options structure with the lazy parsing inside\n\n :param lazy_mfcollection_parsing:\n :return: the options structure filled with lazyparsing option (for the MultifileCollectionParser)" - }, - { - "code": "def get_index_from_coord(coord, base_index):\n try:\n values = coord.values\n except AttributeError:\n values = coord\n if values.ndim == 0:\n return base_index.get_loc(values[()])\n if len(values) == len(base_index) and (values == base_index).all():\n return slice(None)\n values = np.array(list(map(lambda i: base_index.get_loc(i), values)))\n return to_slice(values) or values", - "docstring": "Function to return the coordinate as integer, integer array or slice\n\n If `coord` is zero-dimensional, the corresponding integer in `base_index`\n will be supplied. Otherwise it is first tried to return a slice, if that\n does not work an integer array with the corresponding indices is returned.\n\n Parameters\n ----------\n coord: xarray.Coordinate or xarray.Variable\n Coordinate to convert\n base_index: pandas.Index\n The base index from which the `coord` was extracted\n\n Returns\n -------\n int, array of ints or slice\n The indexer that can be used to access the `coord` in the\n `base_index`" - }, - { - "code": "def active(self) -> bool:\n states = self._client.get_state(self._state_url)['states']\n for state in states:\n state = state['State']\n if int(state['Id']) == self._state_id:\n return state['IsActive'] == \"1\"\n return False", - "docstring": "Indicate if this RunState is currently active." - }, - { - "code": "def register_proper_name(self, name):\n with self.proper_names_db_path.open(\"a\") as f:\n f.write(u\"{0}\\n\".format(name))", - "docstring": "Registers a proper name to the database." - }, - { - "code": "def create_or_reference_batch(self):\n client = self.aq_parent\n batch_headers = self.get_batch_header_values()\n if not batch_headers:\n return False\n batch_title = batch_headers.get('title', False)\n if batch_title:\n existing_batch = [x for x in client.objectValues('Batch')\n if x.title == batch_title]\n if existing_batch:\n self.setBatch(existing_batch[0])\n return existing_batch[0]\n if 'title' in batch_headers:\n if 'id' in batch_headers:\n del (batch_headers['id'])\n if '' in batch_headers:\n del (batch_headers[''])\n batch = _createObjectByType('Batch', client, tmpID())\n batch.processForm()\n batch.edit(**batch_headers)\n self.setBatch(batch)", - "docstring": "Save reference to batch, if existing batch specified\n Create new batch, if possible with specified values" - }, - { - "code": "def read_ucs2(self, num_chars):\n buf = readall(self, num_chars * 2)\n return ucs2_codec.decode(buf)[0]", - "docstring": "Reads num_chars UCS2 string from the stream" - }, - { - "code": "def get_original_order_unique_ids(id_array):\n assert isinstance(id_array, np.ndarray)\n assert len(id_array.shape) == 1\n original_unique_id_indices =\\\n np.sort(np.unique(id_array, return_index=True)[1])\n original_order_unique_ids = id_array[original_unique_id_indices]\n return original_order_unique_ids", - "docstring": "Get the unique id's of id_array, in their original order of appearance.\n\n Parameters\n ----------\n id_array : 1D ndarray.\n Should contain the ids that we want to extract the unique values from.\n\n Returns\n -------\n original_order_unique_ids : 1D ndarray.\n Contains the unique ids from `id_array`, in their original order of\n appearance." - }, - { - "code": "def is_syscall_addr(self, addr):\n if self.kernel_base is None or addr < self.kernel_base:\n return False\n addr -= self.kernel_base\n if addr % self.syscall_addr_alignment != 0:\n return False\n addr //= self.syscall_addr_alignment\n return addr <= self.unknown_syscall_number", - "docstring": "Return whether or not the given address corresponds to a syscall implementation." - }, - { - "code": "def prepare(self, context):\n\t\tif __debug__:\n\t\t\tlog.debug(\"Preparing request context.\", extra=dict(request=id(context)))\n\t\tcontext.request = Request(context.environ)\n\t\tcontext.response = Response(request=context.request)\n\t\tcontext.environ['web.base'] = context.request.script_name\n\t\tcontext.request.remainder = context.request.path_info.split('/')\n\t\tif context.request.remainder and not context.request.remainder[0]:\n\t\t\tdel context.request.remainder[0]\n\t\tcontext.path = Bread()", - "docstring": "Add the usual suspects to the context.\n\t\t\n\t\tThis adds `request`, `response`, and `path` to the `RequestContext` instance." - }, - { - "code": "def clear_buffer(self):\n command_url = self.hub_url + '/1?XB=M=1'\n response = self.post_direct_command(command_url)\n self.logger.info(\"clear_buffer: %s\", response)\n return response", - "docstring": "Clear the hub buffer" - }, - { - "code": "def _parse_arguments(self, method, parameters):\n arguments = _fetch_arguments(self, method)\n arg_dict = {}\n errors = []\n for key, properties in parameters:\n if key in arguments:\n value = arguments[key]\n try:\n arg_dict[key] = _apply_validator_chain(\n properties.get('validators', []), value, self)\n except validators.ValidationError as err:\n errors.append(err)\n else:\n if properties.get('required', False):\n raise web.HTTPError(\n 400,\n ('Missing required parameter: %s'\n % (key, ))\n )\n else:\n if properties.get('default', None) is not None:\n arg_dict[key] = properties['default']\n else:\n arg_dict[key] = None\n if errors:\n raise web.HTTPError(400, 'There were %s errors' % len(errors))\n return arg_dict", - "docstring": "Parse arguments to method, returning a dictionary." - }, - { - "code": "def masks(list_of_index_lists, n):\n for il,l in enumerate(list_of_index_lists):\n mask = np.zeros(n,dtype=bool)\n mask[l] = True\n list_of_index_lists[il] = mask\n masks = np.array(list_of_index_lists)\n return masks", - "docstring": "Make an array in which rows store 1d mask arrays from list of index lists.\n\n Parameters\n ----------\n n : int\n Maximal index / number of samples." - }, - { - "code": "def _persist_result(self):\n self._prepare_persistence_engine()\n return self._persistence_engine.store_async_result(\n self.id, self.result)", - "docstring": "Store this Async's result in persistent storage." - }, - { - "code": "def _xml_to_dict(xmltree):\n if sys.version_info < (2, 7):\n children_len = len(xmltree.getchildren())\n else:\n children_len = len(xmltree)\n if children_len < 1:\n name = xmltree.tag\n if '}' in name:\n comps = name.split('}')\n name = comps[1]\n return {name: xmltree.text}\n xmldict = {}\n for item in xmltree:\n name = item.tag\n if '}' in name:\n comps = name.split('}')\n name = comps[1]\n if name not in xmldict:\n if sys.version_info < (2, 7):\n children_len = len(item.getchildren())\n else:\n children_len = len(item)\n if children_len > 0:\n xmldict[name] = _xml_to_dict(item)\n else:\n xmldict[name] = item.text\n else:\n if not isinstance(xmldict[name], list):\n tempvar = xmldict[name]\n xmldict[name] = []\n xmldict[name].append(tempvar)\n xmldict[name].append(_xml_to_dict(item))\n return xmldict", - "docstring": "Convert an XML tree into a dict" - }, - { - "code": "def handle_login_failure(self, provider, reason):\n logger.error('Authenication Failure: {0}'.format(reason))\n messages.error(self.request, 'Authenication Failed. Please try again')\n return redirect(self.get_error_redirect(provider, reason))", - "docstring": "Message user and redirect on error." - }, - { - "code": "def nonlocal_check(self, original, loc, tokens):\n return self.check_py(\"3\", \"nonlocal statement\", original, loc, tokens)", - "docstring": "Check for Python 3 nonlocal statement." - }, - { - "code": "def payments(self, virtual_account_id, data={}, **kwargs):\n url = \"{}/{}/payments\".format(self.base_url, virtual_account_id)\n return self.get_url(url, data, **kwargs)", - "docstring": "Fetch Payment for Virtual Account Id\n\n Args:\n virtual_account_id :\n Id for which Virtual Account objects has to be retrieved\n\n Returns:\n Payment dict for given Virtual Account Id" - }, - { - "code": "def calculate_authentication_data(self, key):\n if self.key_id == KEY_ID_NONE:\n return ''\n if self.key_id == KEY_ID_HMAC_SHA_1_96:\n digestmod = hashlib.sha1\n data_length = 20\n elif self.key_id == KEY_ID_HMAC_SHA_256_128:\n digestmod = hashlib.sha256\n data_length = 32\n else:\n raise ValueError('Unknown Key ID')\n current_authentication_data = self.authentication_data\n self.authentication_data = '\\x00' * data_length\n msg = self.to_bytes()\n self.authentication_data = current_authentication_data\n return hmac.new(key, msg, digestmod).digest()", - "docstring": "Calculate the authentication data based on the current key-id and the\n given key." - }, - { - "code": "def _clean_flags(args, caller):\n flags = ''\n if args is None:\n return flags\n allowed = ('a', 'B', 'h', 'H', 'i', 'k', 'l', 'P', 't', 'T', 'x', 'v')\n for flag in args:\n if flag in allowed:\n flags += flag\n else:\n raise CommandExecutionError(\n 'Invalid flag passed to {0}'.format(caller)\n )\n return flags", - "docstring": "Sanitize flags passed into df" - }, - { - "code": "def execDetails(self, reqId, contract, execution):\n if execution.orderId == UNSET_INTEGER:\n execution.orderId = 0\n key = self.orderKey(\n execution.clientId, execution.orderId, execution.permId)\n trade = self.trades.get(key)\n if trade and contract.conId == trade.contract.conId:\n contract = trade.contract\n else:\n contract = Contract.create(**contract.dict())\n execId = execution.execId\n execution.time = util.parseIBDatetime(execution.time). \\\n astimezone(datetime.timezone.utc)\n isLive = reqId not in self._futures\n time = self.lastTime if isLive else execution.time\n fill = Fill(contract, execution, CommissionReport(), time)\n if execId not in self.fills:\n self.fills[execId] = fill\n if trade:\n trade.fills.append(fill)\n logEntry = TradeLogEntry(\n self.lastTime,\n trade.orderStatus.status,\n f'Fill {execution.shares}@{execution.price}')\n trade.log.append(logEntry)\n if isLive:\n self._logger.info(f'execDetails: {fill}')\n self.ib.execDetailsEvent.emit(trade, fill)\n trade.fillEvent(trade, fill)\n if not isLive:\n self._results[reqId].append(fill)", - "docstring": "This wrapper handles both live fills and responses to reqExecutions." - }, - { - "code": "def _pys_assert_version(self, line):\n if float(line.strip()) > 1.0:\n msg = _(\"File version {version} unsupported (>1.0).\").format(\n version=line.strip())\n raise ValueError(msg)", - "docstring": "Asserts pys file version" - }, - { - "code": "def copy_file(self, filepath):\n copy_file = False\n try:\n copy_file = self.data[filepath]['copy']\n except KeyError:\n return False\n return copy_file", - "docstring": "Returns flag which says to copy rather than link a file." - }, - { - "code": "def search_song(self, song_name, quiet=False, limit=9):\n result = self.search(song_name, search_type=1, limit=limit)\n if result['result']['songCount'] <= 0:\n LOG.warning('Song %s not existed!', song_name)\n raise SearchNotFound('Song {} not existed.'.format(song_name))\n else:\n songs = result['result']['songs']\n if quiet:\n song_id, song_name = songs[0]['id'], songs[0]['name']\n song = Song(song_id, song_name)\n return song\n else:\n return self.display.select_one_song(songs)", - "docstring": "Search song by song name.\n\n :params song_name: song name.\n :params quiet: automatically select the best one.\n :params limit: song count returned by weapi.\n :return: a Song object." - }, - { - "code": "def do_invite(self, sender, body, args):\n for invitee in args:\n new_member = { 'JID': invitee }\n self.invite_user(new_member, inviter=sender)", - "docstring": "Invite members to the chatroom on a user's behalf" - }, - { - "code": "def fit(\n model,\n params,\n X_train,\n y_train,\n X_test,\n y_test,\n additional_calls,\n fit_params=None,\n scorer=None,\n random_state=None,\n):\n return default_client().sync(\n _fit,\n model,\n params,\n X_train,\n y_train,\n X_test,\n y_test,\n additional_calls,\n fit_params=fit_params,\n scorer=scorer,\n random_state=random_state,\n )", - "docstring": "Find a good model and search among a space of hyper-parameters\n\n This does a hyper-parameter search by creating many models and then fitting\n them incrementally on batches of data and reducing the number of models based\n on the scores computed during training. Over time fewer and fewer models\n remain. We train these models for increasingly long times.\n\n The model, number of starting parameters, and decay can all be provided as\n configuration parameters.\n\n Training data should be given as Dask arrays. It can be large. Testing\n data should be given either as a small dask array or as a numpy array. It\n should fit on a single worker.\n\n Parameters\n ----------\n model : Estimator\n params : List[Dict]\n Parameters to start training on model\n X_train : dask Array\n y_train : dask Array\n X_test : Array\n Numpy array or small dask array. Should fit in single node's memory.\n y_test : Array\n Numpy array or small dask array. Should fit in single node's memory.\n additional_calls : callable\n A function that takes information about scoring history per model and\n returns the number of additional partial fit calls to run on each model\n fit_params : dict\n Extra parameters to give to partial_fit\n scorer : callable\n A scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n random_state : int, RandomState instance or None, optional, default: None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from dask_ml.datasets import make_classification\n >>> X, y = make_classification(n_samples=5000000, n_features=20,\n ... chunks=100000, random_state=0)\n\n >>> from sklearn.linear_model import SGDClassifier\n >>> model = SGDClassifier(tol=1e-3, penalty='elasticnet', random_state=0)\n\n >>> from sklearn.model_selection import ParameterSampler\n >>> params = {'alpha': np.logspace(-2, 1, num=1000),\n ... 'l1_ratio': np.linspace(0, 1, num=1000),\n ... 'average': [True, False]}\n >>> params = list(ParameterSampler(params, 10, random_state=0))\n\n >>> X_test, y_test = X[:100000], y[:100000]\n >>> X_train = X[100000:]\n >>> y_train = y[100000:]\n\n >>> def remove_worst(scores):\n ... last_score = {model_id: info[-1]['score']\n ... for model_id, info in scores.items()}\n ... worst_score = min(last_score.values())\n ... out = {}\n ... for model_id, score in last_score.items():\n ... if score != worst_score:\n ... out[model_id] = 1 # do one more training step\n ... if len(out) == 1:\n ... out = {k: 0 for k in out} # no more work to do, stops execution\n ... return out\n\n >>> from dask.distributed import Client\n >>> client = Client(processes=False)\n\n >>> from dask_ml.model_selection._incremental import fit\n >>> info, models, history, best = fit(model, params,\n ... X_train, y_train,\n ... X_test, y_test,\n ... additional_calls=remove_worst,\n ... fit_params={'classes': [0, 1]},\n ... random_state=0)\n\n >>> models\n {2: >> models[2].result()\n SGDClassifier(...)\n >>> info[2][-1] # doctest: +SKIP\n {'model_id': 2,\n 'params': {'l1_ratio': 0.9529529529529529, 'average': False,\n 'alpha': 0.014933932161242525},\n 'partial_fit_calls': 8,\n 'partial_fit_time': 0.17334818840026855,\n 'score': 0.58765,\n 'score_time': 0.031442880630493164}\n\n Returns\n -------\n info : Dict[int, List[Dict]]\n Scoring history of each successful model, keyed by model ID.\n This has the parameters, scores, and timing information over time\n models : Dict[int, Future]\n Dask futures pointing to trained models\n history : List[Dict]\n A history of all models scores over time" - }, - { - "code": "def check_nova():\n if HAS_NOVA:\n novaclient_ver = _LooseVersion(novaclient.__version__)\n min_ver = _LooseVersion(NOVACLIENT_MINVER)\n if min_ver <= novaclient_ver:\n return HAS_NOVA\n log.debug('Newer novaclient version required. Minimum: %s', NOVACLIENT_MINVER)\n return False", - "docstring": "Check version of novaclient" - }, - { - "code": "def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2'):\n out_vrt = os.path.splitext(out_csv)[0]+'.vrt'\n out_csv = os.path.split(out_csv)[-1]\n f = open(out_vrt, 'w')\n f.write('\\n')\n f.write(' \\n' % os.path.splitext(out_csv)[0])\n f.write(' %s\\n' % out_csv)\n f.write(' wkbPoint\\n')\n f.write(' %s\\n' % srs)\n f.write(' \\n' % (x, y))\n f.write(' \\n')\n f.write('\\n')\n f.close()", - "docstring": "Write out a vrt to accompany a csv of points" - }, - { - "code": "def ensure_specification_cols_are_in_dataframe(specification, dataframe):\n try:\n assert isinstance(specification, OrderedDict)\n except AssertionError:\n raise TypeError(\"`specification` must be an OrderedDict.\")\n assert isinstance(dataframe, pd.DataFrame)\n problem_cols = []\n dataframe_cols = dataframe.columns\n for key in specification:\n if key not in dataframe_cols:\n problem_cols.append(key)\n if problem_cols != []:\n msg = \"The following keys in the specification are not in 'data':\\n{}\"\n raise ValueError(msg.format(problem_cols))\n return None", - "docstring": "Checks whether each column in `specification` is in `dataframe`. Raises\n ValueError if any of the columns are not in the dataframe.\n\n Parameters\n ----------\n specification : OrderedDict.\n Keys are a proper subset of the columns in `data`. Values are either a\n list or a single string, \"all_diff\" or \"all_same\". If a list, the\n elements should be:\n - single objects that are in the alternative ID column of `data`\n - lists of objects that are within the alternative ID column of\n `data`. For each single object in the list, a unique column will\n be created (i.e. there will be a unique coefficient for that\n variable in the corresponding utility equation of the\n corresponding alternative). For lists within the\n `specification` values, a single column will be created for all\n the alternatives within the iterable (i.e. there will be one\n common coefficient for the variables in the iterable).\n dataframe : pandas DataFrame.\n Dataframe containing the data for the choice model to be estimated.\n\n Returns\n -------\n None." - }, - { - "code": "def verbose(f):\n @click.pass_context\n def new_func(ctx, *args, **kwargs):\n global log\n verbosity = [\"critical\", \"error\", \"warn\", \"info\", \"debug\"][\n int(min(ctx.obj.get(\"verbose\", 0), 4))\n ]\n log.setLevel(getattr(logging, verbosity.upper()))\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n ch = logging.StreamHandler()\n ch.setLevel(getattr(logging, verbosity.upper()))\n ch.setFormatter(formatter)\n log.addHandler(ch)\n if ctx.obj.get(\"verbose\", 0) > 4:\n verbosity = [\"critical\", \"error\", \"warn\", \"info\", \"debug\"][\n int(min(ctx.obj.get(\"verbose\", 4) - 4, 4))\n ]\n log = logging.getLogger(\"grapheneapi\")\n log.setLevel(getattr(logging, verbosity.upper()))\n log.addHandler(ch)\n if ctx.obj.get(\"verbose\", 0) > 8:\n verbosity = [\"critical\", \"error\", \"warn\", \"info\", \"debug\"][\n int(min(ctx.obj.get(\"verbose\", 8) - 8, 4))\n ]\n log = logging.getLogger(\"graphenebase\")\n log.setLevel(getattr(logging, verbosity.upper()))\n log.addHandler(ch)\n return ctx.invoke(f, *args, **kwargs)\n return update_wrapper(new_func, f)", - "docstring": "Add verbose flags and add logging handlers" - }, - { - "code": "def indexXY(self, index):\n rect = self.visualRect(index)\n return rect.x(), rect.y()", - "docstring": "Returns the top left coordinates of the item for the given index\n\n :param index: index for the item\n :type index: :qtdoc:`QModelIndex`\n :returns: (int, int) -- (x, y) view coordinates of item" - }, - { - "code": "def unregister_project_nodes(self, node):\n for node in reversed(list(foundations.walkers.nodes_walker(node))):\n if node.family == \"Directory\":\n self.unregister_directory(node)\n elif node.family == \"File\":\n self.unregister_file(node)", - "docstring": "Unregisters given Node children.\n\n :param node: Node.\n :type node: ProjectNode or DirectoryNode" - }, - { - "code": "def _make_signature(self, header_b64, payload_b64, signing_key):\n token_segments = [header_b64, payload_b64]\n signing_input = b'.'.join(token_segments)\n signer = self._get_signer(signing_key)\n signer.update(signing_input)\n signature = signer.finalize()\n raw_signature = der_to_raw_signature(signature, signing_key.curve)\n return base64url_encode(raw_signature)", - "docstring": "Sign a serialized header and payload.\n Return the urlsafe-base64-encoded signature." - }, - { - "code": "def only_self(self):\n others, self.others = self.others, []\n try:\n yield\n finally:\n self.others = others + self.others", - "docstring": "Only match in self not others." - }, - { - "code": "def next(self):\n location = self.chain.index(self)\n if not self.end():\n return self.chain[location + 1]", - "docstring": "Returns next error checking strategy." - }, - { - "code": "def sys_update_char(\n asciiCode: int,\n fontx: int,\n fonty: int,\n img: tcod.image.Image,\n x: int,\n y: int,\n) -> None:\n lib.TCOD_sys_update_char(_int(asciiCode), fontx, fonty, img, x, y)", - "docstring": "Dynamically update the current font with img.\n\n All cells using this asciiCode will be updated\n at the next call to :any:`tcod.console_flush`.\n\n Args:\n asciiCode (int): Ascii code corresponding to the character to update.\n fontx (int): Left coordinate of the character\n in the bitmap font (in tiles)\n fonty (int): Top coordinate of the character\n in the bitmap font (in tiles)\n img (Image): An image containing the new character bitmap.\n x (int): Left pixel of the character in the image.\n y (int): Top pixel of the character in the image." - }, - { - "code": "def disconnect(self, receiver=None, sender=None, dispatch_uid=None):\n if dispatch_uid:\n lookup_key = (dispatch_uid, _make_id(sender))\n else:\n lookup_key = (_make_id(receiver), _make_id(sender))\n disconnected = False\n with self.lock:\n self._clear_dead_receivers()\n for index in range(len(self.receivers)):\n (r_key, _) = self.receivers[index]\n if r_key == lookup_key:\n disconnected = True\n del self.receivers[index]\n break\n self.sender_receivers_cache.clear()\n return disconnected", - "docstring": "Disconnect receiver from sender for signal.\n\n If weak references are used, disconnect need not be called. The receiver\n will be remove from dispatch automatically.\n\n Arguments:\n\n receiver\n The registered receiver to disconnect. May be none if\n dispatch_uid is specified.\n\n sender\n The registered sender to disconnect\n\n dispatch_uid\n the unique identifier of the receiver to disconnect" - }, - { - "code": "def clean_session_table():\n sessions = SessionActivity.query_by_expired().all()\n for session in sessions:\n delete_session(sid_s=session.sid_s)\n db.session.commit()", - "docstring": "Automatically clean session table.\n\n To enable a periodically clean of the session table, you should configure\n the task as a celery periodic task.\n\n .. code-block:: python\n\n from datetime import timedelta\n CELERYBEAT_SCHEDULE = {\n 'session_cleaner': {\n 'task': 'invenio_accounts.tasks.clean_session_table',\n 'schedule': timedelta(days=1),\n },\n }\n\n See `Invenio-Celery `_\n documentation for further details." - }, - { - "code": "def draw_widget(self, item):\n if item:\n self.filter_remove(remember=True)\n selected_id = self.treedata[item]['id']\n item = self.get_toplevel_parent(item)\n widget_id = self.treedata[item]['id']\n wclass = self.treedata[item]['class']\n xmlnode = self.tree_node_to_xml('', item)\n self.previewer.draw(item, widget_id, xmlnode, wclass)\n self.previewer.show_selected(item, selected_id)\n self.filter_restore()", - "docstring": "Create a preview of the selected treeview item" - }, - { - "code": "def get_policy_for_vhost(self, vhost, name):\n return self._api_get('/api/policies/{0}/{1}'.format(\n urllib.parse.quote_plus(vhost),\n urllib.parse.quote_plus(name),\n ))", - "docstring": "Get a specific policy for a vhost.\n\n :param vhost: The virtual host the policy is for\n :type vhost: str\n :param name: The name of the policy\n :type name: str" - }, - { - "code": "def reset(self):\n self._current_index = -1\n self._current_value = self._default_value\n self._current_rendered = self._default_rendered\n self.offset = None", - "docstring": "Reset the field to its default state" - }, - { - "code": "def get_paginator(self, operation_name):\n if not self.can_paginate(operation_name):\n raise OperationNotPageableError(operation_name=operation_name)\n else:\n Paginator.PAGE_ITERATOR_CLS = AioPageIterator\n actual_operation_name = self._PY_TO_OP_NAME[operation_name]\n def paginate(self, **kwargs):\n return Paginator.paginate(self, **kwargs)\n paginator_config = self._cache['page_config'][\n actual_operation_name]\n paginator_class_name = str('%s.Paginator.%s' % (\n get_service_module_name(self.meta.service_model),\n actual_operation_name))\n documented_paginator_cls = type(\n paginator_class_name, (Paginator,), {'paginate': paginate})\n operation_model = self._service_model.\\\n operation_model(actual_operation_name)\n paginator = documented_paginator_cls(\n getattr(self, operation_name),\n paginator_config,\n operation_model)\n return paginator", - "docstring": "Create a paginator for an operation.\n\n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is ``create_foo``, and you'd normally invoke the\n operation as ``client.create_foo(**kwargs)``, if the\n ``create_foo`` operation can be paginated, you can use the\n call ``client.get_paginator(\"create_foo\")``.\n\n :raise OperationNotPageableError: Raised if the operation is not\n pageable. You can use the ``client.can_paginate`` method to\n check if an operation is pageable.\n\n :rtype: L{botocore.paginate.Paginator}\n :return: A paginator object." - }, - { - "code": "def fetch_group_cached(group_id, failures=True, wait=0, count=None, broker=None):\n if not broker:\n broker = get_broker()\n start = time()\n if count:\n while True:\n if count_group_cached(group_id) == count or wait and (time() - start) * 1000 >= wait >= 0:\n break\n sleep(0.01)\n while True:\n group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))\n if group_list:\n task_list = []\n for task_key in group_list:\n task = SignedPackage.loads(broker.cache.get(task_key))\n if task['success'] or failures:\n t = Task(id=task['id'],\n name=task['name'],\n func=task['func'],\n hook=task.get('hook'),\n args=task['args'],\n kwargs=task['kwargs'],\n started=task['started'],\n stopped=task['stopped'],\n result=task['result'],\n group=task.get('group'),\n success=task['success'])\n task_list.append(t)\n return task_list\n if (time() - start) * 1000 >= wait >= 0:\n break\n sleep(0.01)", - "docstring": "Return a list of Tasks for a task group in the cache backend" - }, - { - "code": "def _serve_environment(self, request):\n return http_util.Respond(\n request,\n {\n 'data_location': self._logdir or self._db_uri,\n 'mode': 'db' if self._db_uri else 'logdir',\n 'window_title': self._window_title,\n },\n 'application/json')", - "docstring": "Serve a JSON object containing some base properties used by the frontend.\n\n * data_location is either a path to a directory or an address to a\n database (depending on which mode TensorBoard is running in).\n * window_title is the title of the TensorBoard web page." - }, - { - "code": "def _recurse_replace(obj, key, new_key, sub, remove):\n if isinstance(obj, list):\n return [_recurse_replace(x, key, new_key, sub, remove) for x in obj]\n if isinstance(obj, dict):\n for k, v in list(obj.items()):\n if k == key and v in sub:\n obj[new_key] = sub[v]\n if remove:\n del obj[key]\n else:\n obj[k] = _recurse_replace(v, key, new_key, sub, remove)\n return obj", - "docstring": "Recursive helper for `replace_by_key`" - }, - { - "code": "def connect(self):\n try:\n self.initialize_connection()\n except ChromecastConnectionError:\n self._report_connection_status(\n ConnectionStatus(CONNECTION_STATUS_DISCONNECTED,\n NetworkAddress(self.host, self.port)))\n return", - "docstring": "Connect socket connection to Chromecast device.\n\n Must only be called if the worker thread will not be started." - }, - { - "code": "def gameloop(self):\n try:\n while True:\n self.handle_events()\n self.update()\n self.render()\n except KeyboardInterrupt:\n pass", - "docstring": "A game loop that circles through the methods." - }, - { - "code": "def update_centroids_from_list(self, list_values):\n [self.update(value['m'], value['c']) for value in list_values]\n return self", - "docstring": "Add or update Centroids from a Python list.\n Any existing centroids in the digest object are appropriately updated.\n\n Example:\n digest.update_centroids([{'c': 1.0, 'm': 1.0}, {'c': 1.0, 'm': 2.0}, {'c': 1.0, 'm': 3.0}])" - }, - { - "code": "def Dlmk(l,m,k,phi1,phi2,theta1,theta2):\n return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \\\n exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))", - "docstring": "returns value of D^l_mk as defined in allen, ottewill 97." - }, - { - "code": "def append_field(self, fieldname):\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('RR record not yet initialized!')\n if fieldname == 'PX':\n bit = 0\n elif fieldname == 'PN':\n bit = 1\n elif fieldname == 'SL':\n bit = 2\n elif fieldname == 'NM':\n bit = 3\n elif fieldname == 'CL':\n bit = 4\n elif fieldname == 'PL':\n bit = 5\n elif fieldname == 'RE':\n bit = 6\n elif fieldname == 'TF':\n bit = 7\n else:\n raise pycdlibexception.PyCdlibInternalError('Unknown RR field name %s' % (fieldname))\n self.rr_flags |= (1 << bit)", - "docstring": "Mark a field as present in the Rock Ridge records.\n\n Parameters:\n fieldname - The name of the field to mark as present; should be one\n of 'PX', 'PN', 'SL', 'NM', 'CL', 'PL', 'RE', or 'TF'.\n Returns:\n Nothing." - }, - { - "code": "def pathName(self, pathName: str):\n if self.pathName == pathName:\n return\n pathName = self.sanitise(pathName)\n before = self.realPath\n after = self._realPath(pathName)\n assert (not os.path.exists(after))\n newRealDir = os.path.dirname(after)\n if not os.path.exists(newRealDir):\n os.makedirs(newRealDir, DirSettings.defaultDirChmod)\n shutil.move(before, after)\n oldPathName = self._pathName\n self._pathName = pathName\n self._directory()._fileMoved(oldPathName, self)", - "docstring": "Path Name Setter\n\n Set path name with passed in variable, create new directory and move\n previous directory contents to new path name.\n\n @param pathName: New path name string.\n @type pathName: String" - }, - { - "code": "def db_from_hass_config(path=None, **kwargs):\n if path is None:\n path = config.find_hass_config()\n url = config.db_url_from_hass_config(path)\n return HassDatabase(url, **kwargs)", - "docstring": "Initialize a database from HASS config." - }, - { - "code": "def from_config(config, **options):\n required_args = ('storage-backends',)\n optional_args = {'events_per_batch': 25000}\n rconfig.check_config_options(\"SyncedRotationEventStores\",\n required_args,\n tuple(optional_args.keys()), options)\n if \"events_per_batch\" in options:\n events_per_batch = int(options[\"events_per_batch\"])\n else:\n events_per_batch = optional_args[\"events_per_batch\"]\n estore = SyncedRotationEventStores(events_per_batch)\n for section in options['storage-backends'].split(' '):\n try:\n substore = rconfig.construct_eventstore(config, section)\n estore.add_rotated_store(substore)\n except Exception as e:\n _logger.exception('Could not instantiate substore from'\n ' section %s', section)\n estore.close()\n raise\n return estore", - "docstring": "Instantiate an `SyncedRotationEventStores` from config.\n\n Parameters:\n config -- the configuration file options read from file(s).\n **options -- various options given to the specific event store. Shall\n not be used with this event store. Warning will be logged\n for every extra non-recognized option. The only required\n key to this function is 'path'.\n\n returns -- a newly instantiated `SyncedRotationEventStores`." - }, - { - "code": "def check_scicrunch_for_label(self, label: str) -> dict:\n list_of_crude_matches = self.crude_search_scicrunch_via_label(label)\n for crude_match in list_of_crude_matches:\n if crude_match['label'].lower().strip() == label.lower().strip():\n complete_data_of_crude_match = self.get_entity(crude_match['ilx'])\n crude_match_label = crude_match['label']\n crude_match_user_id = complete_data_of_crude_match['uid']\n if str(self.user_id) == str(crude_match_user_id):\n return complete_data_of_crude_match\n return {}", - "docstring": "Sees if label with your user ID already exists\n\n There are can be multiples of the same label in interlex, but there should only be one\n label with your user id. Therefore you can create labels if there already techniqually\n exist, but not if you are the one to create it." - }, - { - "code": "def get_profile_user_fieldname(profile_model=None, user_model=None):\n Profile = profile_model or get_profile_model()\n User = user_model or get_user_model()\n for field in Profile._meta.fields:\n if field.rel and field.rel.to == User:\n return field.name\n raise ImproperlyConfigured(\"Value for ACCOUNTS_PROFILE_MODEL does not \"\n \"contain a ForeignKey field for auth.User: %s\"\n % Profile.__name__)", - "docstring": "Returns the name of the first field on the profile model that\n points to the ``auth.User`` model." - }, - { - "code": "def _parse(self):\n self.response = self.resource\n self.resource = self.resource.xpath(\"//ti:passage/tei:TEI\", namespaces=XPATH_NAMESPACES)[0]\n self._prev_id, self._next_id = _SharedMethod.prevnext(self.response)\n if not self.citation.is_set() and len(self.resource.xpath(\"//ti:citation\", namespaces=XPATH_NAMESPACES)):\n self.citation = CtsCollection.XmlCtsCitation.ingest(\n self.response,\n xpath=\".//ti:citation[not(ancestor::ti:citation)]\"\n )", - "docstring": "Given self.resource, split information from the CTS API\n\n :return: None" - }, - { - "code": "def wgan(cls, data:DataBunch, generator:nn.Module, critic:nn.Module, switcher:Callback=None, clip:float=0.01, **learn_kwargs):\n \"Create a WGAN from `data`, `generator` and `critic`.\"\n return cls(data, generator, critic, NoopLoss(), WassersteinLoss(), switcher=switcher, clip=clip, **learn_kwargs)", - "docstring": "Create a WGAN from `data`, `generator` and `critic`." - }, - { - "code": "def align_data(*args):\n res = pd.DataFrame(pd.concat([\n d.loc[~d.index.duplicated(keep='first')].rename(\n columns=lambda vv: '%s_%d' % (vv, i + 1)\n ) for i, d in enumerate(args)\n ], axis=1))\n data_cols = [col for col in res.columns if col[-2:] == '_1']\n other_cols = [col for col in res.columns if col[-2:] != '_1']\n res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad')\n return res.dropna(subset=data_cols)", - "docstring": "Resample and aligh data for defined frequency\n\n Args:\n *args: DataFrame of data to be aligned\n\n Returns:\n pd.DataFrame: aligned data with renamed columns\n\n Examples:\n >>> start = '2018-09-10T10:10:00'\n >>> tz = 'Australia/Sydney'\n >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz)\n >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04]\n >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718]\n >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx)\n >>> d1\n price volume\n 2018-09-10 10:10:00+10:00 31.08 10166\n 2018-09-10 10:11:00+10:00 31.10 69981\n 2018-09-10 10:12:00+10:00 31.11 14343\n 2018-09-10 10:13:00+10:00 31.07 10096\n 2018-09-10 10:14:00+10:00 31.04 11506\n 2018-09-10 10:15:00+10:00 31.04 9718\n >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79]\n >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791]\n >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx)\n >>> d2\n price volume\n 2018-09-10 10:10:00+10:00 70.81 4749\n 2018-09-10 10:11:00+10:00 70.78 6762\n 2018-09-10 10:12:00+10:00 70.85 4908\n 2018-09-10 10:13:00+10:00 70.79 2002\n 2018-09-10 10:14:00+10:00 70.79 9170\n 2018-09-10 10:15:00+10:00 70.79 9791\n >>> align_data(d1, d2)\n price_1 volume_1 price_2 volume_2\n 2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749\n 2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762\n 2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908\n 2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002\n 2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170\n 2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791" - }, - { - "code": "def resolve_local(self, uri, base_uri, ref):\n file_path = None\n item_name = None\n if (uri.startswith(u\"file\") or\n uri.startswith(u\"File\")):\n if ref.startswith(u\"./\"):\n ref = ref.split(u\"./\")[-1]\n org_ref = ref\n if ref.find(u\"\n ref = ref.split(u\"\n if ref.find(u\".json\") != -1:\n item_name = ref.split(u\".json\")[0]\n if base_uri.startswith(u\"file://\") is True:\n base_uri = base_uri.split(u\"file://\")[1]\n elif base_uri.startswith(u\"File://\") is True:\n base_uri = base_uri.split(u\"File://\")[1]\n file_path = os.path.join(base_uri, ref)\n result = None\n try:\n schema_file = open(file_path, \"r\").read()\n result = json.loads(schema_file.decode(\"utf-8\"))\n except IOError as e:\n log.error(u\"file not found %s\" % e)\n msg = \"Could not find schema file. %s\" % file_path\n raise SalesKingException(\"SCHEMA_NOT_FOUND\", msg)\n if self.cache_remote:\n self.store[uri] = result\n return result", - "docstring": "Resolve a local ``uri``.\n Does not check the store first.\n\n :argument str uri: the URI to resolve\n :returns: the retrieved document" - }, - { - "code": "def do_account_info(self):\n s, metadata = self.n.getRegisterUserInfo()\n pprint.PrettyPrinter(indent=2).pprint(metadata)", - "docstring": "display account information" - }, - { - "code": "def update_kb_mapping(kb_name, old_key, key, value):\n db.session.query(models.KnwKBRVAL).join(models.KnwKB) \\\n .filter(models.KnwKB.name == kb_name,\n models.KnwKBRVAL.m_key == old_key) \\\n .update({\"m_key\": key, \"m_value\": value}, synchronize_session=False)", - "docstring": "Update an existing kb mapping with key old_key with a new key and value.\n\n :param kb_name: the name of the kb where to insert the new value\n :param old_key: the key of the mapping in the kb\n :param key: the new key of the mapping\n :param value: the new value of the mapping" - }, - { - "code": "def find_whitespace_pattern(self):\n name_ws = []\n value_ws = []\n for entry in self._entries:\n name_ws.append(get_whitespace(entry.name))\n if entry.value != '':\n value_ws.append(get_whitespace(entry._value))\n if len(value_ws) >= 1:\n value_ws = most_common(value_ws)\n else:\n value_ws = ('', ' ')\n if len(name_ws) >= 1:\n name_ws = most_common(name_ws)\n else:\n name_ws = (' ', '')\n return name_ws, value_ws", - "docstring": "Try to find a whitespace pattern in the existing parameters\n to be applied to a newly added parameter" - }, - { - "code": "def verboselogs_module_transform(mod):\n if mod.name == 'logging':\n for const in ['NOTICE', 'SPAM', 'SUCCESS', 'VERBOSE']:\n mod.locals[const] = [nodes.Const(const)]", - "docstring": "Make Pylint aware of our custom log levels." - }, - { - "code": "def extend_memory(self, start_position: int, size: int) -> None:\n validate_uint256(start_position, title=\"Memory start position\")\n validate_uint256(size, title=\"Memory size\")\n before_size = ceil32(len(self._memory))\n after_size = ceil32(start_position + size)\n before_cost = memory_gas_cost(before_size)\n after_cost = memory_gas_cost(after_size)\n if self.logger.show_debug2:\n self.logger.debug2(\n \"MEMORY: size (%s -> %s) | cost (%s -> %s)\",\n before_size,\n after_size,\n before_cost,\n after_cost,\n )\n if size:\n if before_cost < after_cost:\n gas_fee = after_cost - before_cost\n self._gas_meter.consume_gas(\n gas_fee,\n reason=\" \".join((\n \"Expanding memory\",\n str(before_size),\n \"->\",\n str(after_size),\n ))\n )\n self._memory.extend(start_position, size)", - "docstring": "Extend the size of the memory to be at minimum ``start_position + size``\n bytes in length. Raise `eth.exceptions.OutOfGas` if there is not enough\n gas to pay for extending the memory." - }, - { - "code": "def delete_budget(self, model_uuid):\n return make_request(\n '{}model/{}/budget'.format(self.url, model_uuid),\n method='DELETE',\n timeout=self.timeout,\n client=self._client)", - "docstring": "Delete a budget.\n\n @param the name of the wallet.\n @param the model UUID.\n @return a success string from the plans server.\n @raise ServerError via make_request." - }, - { - "code": "def traverse(self):\n if self.verbose > 1:\n print_('\\t' + self._id + ' Running Traverse proposal kernel')\n phi = self.phi\n theta = self.traverse_theta\n if (random() < (theta - 1) / (2 * theta)):\n beta = exp(1 / (theta + 1) * log(random()))\n else:\n beta = exp(1 / (1 - theta) * log(random()))\n if self._prime:\n xp, x = self.values\n else:\n x, xp = self.values\n if self.verbose > 1:\n print_('\\t' + 'Current value = ' + str(x))\n x = (xp + beta * (xp - x)) * phi + x * (phi == False)\n if self.verbose > 1:\n print_('\\t' + 'Proposed value = ' + str(x))\n self.stochastic.value = x\n self.hastings_factor = (sum(phi) - 2) * log(beta)", - "docstring": "Traverse proposal kernel" - }, - { - "code": "def read_many(self, start_sequence, min_count, max_count):\n check_not_negative(start_sequence, \"sequence can't be smaller than 0\")\n check_true(max_count >= min_count, \"max count should be greater or equal to min count\")\n check_true(min_count <= self.capacity().result(), \"min count should be smaller or equal to capacity\")\n check_true(max_count < MAX_BATCH_SIZE, \"max count can't be greater than %d\" % MAX_BATCH_SIZE)\n return self._encode_invoke(ringbuffer_read_many_codec, response_handler=self._read_many_response_handler,\n start_sequence=start_sequence, min_count=min_count,\n max_count=max_count, filter=None)", - "docstring": "Reads a batch of items from the Ringbuffer. If the number of available items after the first read item is\n smaller than the max_count, these items are returned. So it could be the number of items read is smaller than\n the max_count. If there are less items available than min_count, then this call blocks. Reading a batch of items\n is likely to perform better because less overhead is involved.\n\n :param start_sequence: (long), the start_sequence of the first item to read.\n :param min_count: (int), the minimum number of items to read.\n :param max_count: (int), the maximum number of items to read.\n :return: (Sequence), the list of read items." - }, - { - "code": "def getPassagePlus(self, urn, inventory=None, context=None):\n return self.call({\n \"inv\": inventory,\n \"urn\": urn,\n \"context\": context,\n \"request\": \"GetPassagePlus\"\n })", - "docstring": "Retrieve a passage and information about it\n\n :param urn: URN identifying the text's passage (Minimum depth : 1)\n :type urn: text\n :param inventory: Name of the inventory\n :type inventory: text\n :param context: Number of citation units at the same level of the citation hierarchy as the requested urn, immediately preceding and immediately following the requested urn to include in the reply\n :type context: int\n :rtype: str" - }, - { - "code": "def copy(self, source, dest):\n self._check_valid_key(source)\n self._check_valid_key(dest)\n return self._copy(source, dest)", - "docstring": "Copies a key. The destination is overwritten if does exist.\n\n :param source: The source key to copy\n :param dest: The destination for the copy\n\n :returns: The destination key\n\n :raises: exceptions.ValueError: If the source or target key are not valid\n :raises: exceptions.KeyError: If the source key was not found" - }, - { - "code": "def _add_stage(self, name):\n def stage_func(self, *args, **kwargs):\n self._pipe.append(Stage(name, args, kwargs))\n return self\n setattr(Pipeline, name, stage_func)", - "docstring": "Add stage methods at runtime.\n\n Stage methods all follow the same pattern.\n\n :param name: Stage name." - }, - { - "code": "def get_lang_array(self):\n r = self.yandex_translate_request(\"getLangs\", \"\")\n self.handle_errors(r)\n return r.json()[\"dirs\"]", - "docstring": "gets supported langs as an array" - }, - { - "code": "def execute_dynamo_definition(self, definition_path,\n show_ui=False, shutdown=True,\n automation=False, path_exec=True):\n self._add_entry(templates.DYNAMO_COMMAND\n .format(dynamo_def_path=definition_path,\n dyn_show_ui=show_ui,\n dyn_automation=automation,\n dyn_path_exec=path_exec,\n dyn_shutdown=shutdown))", - "docstring": "Execute a dynamo definition.\n\n Args:\n definition_path (str): full path to dynamo definition file\n show_ui (bool): show dynamo UI at execution\n shutdown (bool): shutdown model after execution\n automation (bool): activate dynamo automation\n path_exec (bool): activate dynamo path execute\n\n Examples:\n >>> jm = JournalMaker()\n >>> jm.execute_dynamo_definition(\n ... definition_path='C:/testdef.dyn',\n ... show_ui=True,\n ... shutdown=True\n ... )" - }, - { - "code": "def get_connections(self):\n con = []\n maxconn = self.max_connectivity\n for ii in range(0, maxconn.shape[0]):\n for jj in range(0, maxconn.shape[1]):\n if maxconn[ii][jj] != 0:\n dist = self.s.get_distance(ii, jj)\n con.append([ii, jj, dist])\n return con", - "docstring": "Returns a list of site pairs that are Voronoi Neighbors, along\n with their real-space distances." - }, - { - "code": "def prune(self):\n if not self[0] or not self[1]:\n direction = not self[0]\n result = self[direction]\n return result\n else:\n heir, self[0] = self[0].pop_greatest_child()\n (heir[0], heir[1]) = (self[0], self[1])\n heir.refresh_balance()\n heir = heir.rotate()\n return heir", - "docstring": "On a subtree where the root node's s_center is empty,\n return a new subtree with no empty s_centers." - }, - { - "code": "def makedir(dir_name):\n if os.path.exists(dir_name):\n delete(dir_name)\n os.mkdir(dir_name)", - "docstring": "\"Strong\" directory maker.\n\n \"Strong\" version of `os.mkdir`. If `dir_name` already exists, this deletes \n it first.\n\n **Parameters**\n\n **dir_name** : string\n\n Path to a file directory that may or may not already exist.\n\n **See Also:**\n\n :func:`tabular.io.delete`, \n `os `_" - }, - { - "code": "def to_cartesian(r, theta, theta_units=\"radians\"):\n assert theta_units in ['radians', 'degrees'],\\\n \"kwarg theta_units must specified in radians or degrees\"\n if theta_units == \"degrees\":\n theta = to_radians(theta)\n theta = to_proper_radians(theta)\n x = r * cos(theta)\n y = r * sin(theta)\n return x, y", - "docstring": "Converts polar r, theta to cartesian x, y." - }, - { - "code": "def find_censored_md5ext(post_id: int) -> Optional[str]:\n \"Find MD5 for a censored post's ID, return None if can't find.\"\n try:\n last_pull_date = LAST_PULL_DATE_FILE.read_text().strip()\n except FileNotFoundError:\n last_pull_date = \"\"\n date = datetime.utcnow()\n date = f\"{date.year}{date.month}{date.day}\"\n if last_pull_date != date:\n update_batches()\n LAST_PULL_DATE_FILE.parent.mkdir(exist_ok=True, parents=True)\n LAST_PULL_DATE_FILE.write_text(date)\n post_id = str(post_id)\n for batch in BATCHES_DIR.iterdir():\n with open(batch, \"r\") as content:\n for line in content:\n an_id, its_md5_ext = line.split(\":\")\n if post_id == an_id:\n return its_md5_ext.rstrip().split(\".\")\n return None", - "docstring": "Find MD5 for a censored post's ID, return None if can't find." - }, - { - "code": "def error(self, msg, *args, **kwargs):\n kwargs.setdefault('inc_stackinfo', True)\n kwargs.setdefault('inc_multiproc', True)\n self.log(ERROR, msg, args, **kwargs)", - "docstring": "Log a message with ERROR level. Automatically includes stack and\n process info unless they are specifically not included." - }, - { - "code": "def do_gather(flist):\n hlist = []\n nskip = 3\n for fname in flist:\n fin = fits.open(fname)\n if len(hlist) == 0:\n if fin[1].name == 'SKYMAP':\n nskip = 4\n start = 0\n else:\n start = nskip\n for h in fin[start:]:\n hlist.append(h)\n hdulistout = fits.HDUList(hlist)\n return hdulistout", - "docstring": "Gather all the HDUs from a list of files" - }, - { - "code": "def get_pp_name(self):\n ppnames = []\n natomtypes = int(self._get_line('number of atomic types', self.outputf).split()[5])\n with open(self.outputf) as fp:\n for line in fp:\n if \"PseudoPot.\n ppnames.append(Scalar(value=next(fp).split('/')[-1].rstrip()))\n if len(ppnames) == natomtypes:\n return Value(scalars=ppnames)\n raise Exception('Could not find %i pseudopotential names'%natomtypes)", - "docstring": "Determine the pseudopotential names from the output" - }, - { - "code": "def validate(self, value, messages=None):\n valid = True\n primitive = value\n def log(msg):\n if messages is not None:\n messages.append(msg)\n if self.enum:\n if value not in self.enum.values():\n valid = False\n flds = (self.name, str(value))\n log(\"%s value '%s' not in allowed enumerated values.\" % flds)\n else:\n primitive = int(self.enum.keys()[self.enum.values().index(value)])\n if self.type:\n if self.type.validate(primitive, messages, self.name) is False:\n valid = False\n return valid", - "docstring": "Returns True if the given field value is valid, False otherwise.\n Validation error messages are appended to an optional messages\n array." - }, - { - "code": "def get_all_formatters():\n for info in itervalues(FORMATTERS):\n if info[1] not in _formatter_cache:\n _load_formatters(info[0])\n yield _formatter_cache[info[1]]\n for _, formatter in find_plugin_formatters():\n yield formatter", - "docstring": "Return a generator for all formatter classes." - }, - { - "code": "def addIVMInputs(imageObjectList,ivmlist):\n if ivmlist is None:\n return\n for img,ivmname in zip(imageObjectList,ivmlist):\n img.updateIVMName(ivmname)", - "docstring": "Add IVM filenames provided by user to outputNames dictionary for each input imageObject." - }, - { - "code": "def visit_dict(self, node, parent):\n newnode = nodes.Dict(node.lineno, node.col_offset, parent)\n items = list(self._visit_dict_items(node, parent, newnode))\n newnode.postinit(items)\n return newnode", - "docstring": "visit a Dict node by returning a fresh instance of it" - }, - { - "code": "def plot_recurrence_model(\n input_model, catalogue, completeness, dmag=0.1, filename=None,\n figure_size=(8, 6), filetype='png', dpi=300, ax=None):\n annual_rates, cumulative_rates = _get_recurrence_model(input_model)\n if not catalogue.end_year:\n catalogue.update_end_year()\n cent_mag, t_per, n_obs = get_completeness_counts(catalogue,\n completeness,\n dmag)\n obs_rates = n_obs / t_per\n cum_obs_rates = np.array([np.sum(obs_rates[i:])\n for i in range(len(obs_rates))])\n if ax is None:\n fig, ax = plt.subplots(figsize=figure_size)\n else:\n fig = ax.get_figure()\n ax.semilogy(cent_mag, obs_rates, 'bo')\n ax.semilogy(annual_rates[:, 0], annual_rates[:, 1], 'b-')\n ax.semilogy(cent_mag, cum_obs_rates, 'rs')\n ax.semilogy(annual_rates[:, 0], cumulative_rates, 'r-')\n ax.grid(which='both')\n ax.set_xlabel('Magnitude')\n ax.set_ylabel('Annual Rate')\n ax.legend(['Observed Incremental Rate',\n 'Model Incremental Rate',\n 'Observed Cumulative Rate',\n 'Model Cumulative Rate'])\n ax.tick_params(labelsize=12)\n _save_image(fig, filename, filetype, dpi)", - "docstring": "Plot a calculated recurrence model over an observed catalogue, adjusted for\n time-varying completeness" - }, - { - "code": "def checkUserManage(self):\n granted = False\n can_access = self.checkUserAccess()\n if can_access is True:\n pm = getToolByName(self, 'portal_membership')\n edit_allowed = pm.checkPermission(EditWorksheet, self)\n if edit_allowed:\n member = pm.getAuthenticatedMember()\n analyst = self.getAnalyst().strip()\n if analyst != _c(member.getId()):\n if pm.checkPermission(ManageWorksheets, self):\n granted = True\n else:\n granted = True\n return granted", - "docstring": "Checks if the current user has granted access to this worksheet\n and if has also privileges for managing it." - }, - { - "code": "def consume_all(self, max_loops=None):\n for row in self:\n self.rows[row.row_key] = row", - "docstring": "Consume the streamed responses until there are no more.\n\n .. warning::\n This method will be removed in future releases. Please use this\n class as a generator instead.\n\n :type max_loops: int\n :param max_loops: (Optional) Maximum number of times to try to consume\n an additional ``ReadRowsResponse``. You can use this\n to avoid long wait times." - }, - { - "code": "def known(self, words):\n tmp = [w.lower() for w in words]\n return set(\n w\n for w in tmp\n if w in self._word_frequency.dictionary\n or not self._check_if_should_check(w)\n )", - "docstring": "The subset of `words` that appear in the dictionary of words\n\n Args:\n words (list): List of words to determine which are in the \\\n corpus\n Returns:\n set: The set of those words from the input that are in the \\\n corpus" - }, - { - "code": "def generate_p_star(num_groups):\n p_star = np.eye(num_groups, num_groups)\n rd.shuffle(p_star)\n return p_star", - "docstring": "Describe the order in which groups move\n\n Arguments\n ---------\n num_groups : int\n\n Returns\n -------\n np.ndarray\n Matrix P* - size (g-by-g)" - }, - { - "code": "def latcyl(radius, lon, lat):\n radius = ctypes.c_double(radius)\n lon = ctypes.c_double(lon)\n lat = ctypes.c_double(lat)\n r = ctypes.c_double()\n lonc = ctypes.c_double()\n z = ctypes.c_double()\n libspice.latcyl_c(radius, lon, lat, ctypes.byref(r), ctypes.byref(lonc),\n ctypes.byref(z))\n return r.value, lonc.value, z.value", - "docstring": "Convert from latitudinal coordinates to cylindrical coordinates.\n\n http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/latcyl_c.html\n\n :param radius: Distance of a point from the origin.\n :type radius:\n :param lon: Angle of the point from the XZ plane in radians.\n :param lat: Angle of the point from the XY plane in radians.\n :return: (r, lonc, z)\n :rtype: tuple" - }, - { - "code": "def get_issues_by_resource(resource_id, table):\n v1_utils.verify_existence_and_get(resource_id, table)\n if table.name == 'jobs':\n JJI = models.JOIN_JOBS_ISSUES\n JJC = models.JOIN_JOBS_COMPONENTS\n JCI = models.JOIN_COMPONENTS_ISSUES\n j1 = sql.join(\n _TABLE,\n sql.join(\n JCI,\n JJC,\n sql.and_(\n JCI.c.component_id == JJC.c.component_id,\n JJC.c.job_id == resource_id,\n ),\n ),\n _TABLE.c.id == JCI.c.issue_id,\n )\n query = sql.select([_TABLE]).select_from(j1)\n rows = flask.g.db_conn.execute(query)\n rows = [dict(row) for row in rows]\n j2 = sql.join(\n _TABLE,\n JJI,\n sql.and_(\n _TABLE.c.id == JJI.c.issue_id,\n JJI.c.job_id == resource_id\n )\n )\n query2 = sql.select([_TABLE]).select_from(j2)\n rows2 = flask.g.db_conn.execute(query2)\n rows += [dict(row) for row in rows2]\n else:\n JCI = models.JOIN_COMPONENTS_ISSUES\n query = (sql.select([_TABLE])\n .select_from(JCI.join(_TABLE))\n .where(JCI.c.component_id == resource_id))\n rows = flask.g.db_conn.execute(query)\n rows = [dict(row) for row in rows]\n for row in rows:\n if row['tracker'] == 'github':\n l_tracker = github.Github(row['url'])\n elif row['tracker'] == 'bugzilla':\n l_tracker = bugzilla.Bugzilla(row['url'])\n row.update(l_tracker.dump())\n return flask.jsonify({'issues': rows,\n '_meta': {'count': len(rows)}})", - "docstring": "Get all issues for a specific job." - }, - { - "code": "def make_gpg_tmphome( prefix=None, config_dir=None ):\n if prefix is None:\n prefix = \"tmp\"\n config_dir = get_config_dir( config_dir )\n tmppath = os.path.join( config_dir, \"tmp\" )\n if not os.path.exists( tmppath ):\n os.makedirs( tmppath, 0700 )\n tmpdir = tempfile.mkdtemp( prefix=(\"%s-\" % prefix), dir=tmppath )\n return tmpdir", - "docstring": "Make a temporary directory to hold GPG keys that are not \n going to be stored to the application's keyring." - }, - { - "code": "def execute_streaming_sql(\n self,\n session,\n sql,\n transaction=None,\n params=None,\n param_types=None,\n resume_token=None,\n query_mode=None,\n partition_token=None,\n seqno=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if \"execute_streaming_sql\" not in self._inner_api_calls:\n self._inner_api_calls[\n \"execute_streaming_sql\"\n ] = google.api_core.gapic_v1.method.wrap_method(\n self.transport.execute_streaming_sql,\n default_retry=self._method_configs[\"ExecuteStreamingSql\"].retry,\n default_timeout=self._method_configs[\"ExecuteStreamingSql\"].timeout,\n client_info=self._client_info,\n )\n request = spanner_pb2.ExecuteSqlRequest(\n session=session,\n sql=sql,\n transaction=transaction,\n params=params,\n param_types=param_types,\n resume_token=resume_token,\n query_mode=query_mode,\n partition_token=partition_token,\n seqno=seqno,\n )\n if metadata is None:\n metadata = []\n metadata = list(metadata)\n try:\n routing_header = [(\"session\", session)]\n except AttributeError:\n pass\n else:\n routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(\n routing_header\n )\n metadata.append(routing_metadata)\n return self._inner_api_calls[\"execute_streaming_sql\"](\n request, retry=retry, timeout=timeout, metadata=metadata\n )", - "docstring": "Like ``ExecuteSql``, except returns the result set as a stream. Unlike\n ``ExecuteSql``, there is no limit on the size of the returned result\n set. However, no individual row in the result set can exceed 100 MiB,\n and no column value can exceed 10 MiB.\n\n Example:\n >>> from google.cloud import spanner_v1\n >>>\n >>> client = spanner_v1.SpannerClient()\n >>>\n >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')\n >>>\n >>> # TODO: Initialize `sql`:\n >>> sql = ''\n >>>\n >>> for element in client.execute_streaming_sql(session, sql):\n ... # process element\n ... pass\n\n Args:\n session (str): Required. The session in which the SQL query should be performed.\n sql (str): Required. The SQL string.\n transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a\n temporary read-only transaction with strong concurrency.\n\n The transaction to use.\n\n For queries, if none is provided, the default is a temporary read-only\n transaction with strong concurrency.\n\n Standard DML statements require a ReadWrite transaction. Single-use\n transactions are not supported (to avoid replay). The caller must\n either supply an existing transaction ID or begin a new transaction.\n\n Partitioned DML requires an existing PartitionedDml transaction ID.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.spanner_v1.types.TransactionSelector`\n params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter\n placeholder consists of ``'@'`` followed by the parameter name.\n Parameter names consist of any combination of letters, numbers, and\n underscores.\n\n Parameters can appear anywhere that a literal value is expected. The\n same parameter name can be used more than once, for example:\n ``\"WHERE id > @msg_id AND id < @msg_id + 100\"``\n\n It is an error to execute an SQL statement with unbound parameters.\n\n Parameter values are specified using ``params``, which is a JSON object\n whose keys are parameter names, and whose values are the corresponding\n parameter values.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.spanner_v1.types.Struct`\n param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type\n from a JSON value. For example, values of type ``BYTES`` and values of\n type ``STRING`` both appear in ``params`` as JSON strings.\n\n In these cases, ``param_types`` can be used to specify the exact SQL\n type for some or all of the SQL statement parameters. See the definition\n of ``Type`` for more information about SQL types.\n\n If a dict is provided, it must be of the same form as the protobuf\n message :class:`~google.cloud.spanner_v1.types.Type`\n resume_token (bytes): If this request is resuming a previously interrupted SQL statement\n execution, ``resume_token`` should be copied from the last\n ``PartialResultSet`` yielded before the interruption. Doing this enables\n the new SQL statement execution to resume where the last one left off.\n The rest of the request parameters must exactly match the request that\n yielded this token.\n query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in\n ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can\n only be set to ``QueryMode.NORMAL``.\n partition_token (bytes): If present, results will be restricted to the specified partition\n previously created using PartitionQuery(). There must be an exact match\n for the values of fields common to this message and the\n PartitionQueryRequest message used to create this partition\\_token.\n seqno (long): A per-transaction sequence number used to identify this request. This\n makes each request idempotent such that if the request is received multiple\n times, at most one will succeed.\n\n The sequence number must be monotonically increasing within the\n transaction. If a request arrives for the first time with an out-of-order\n sequence number, the transaction may be aborted. Replays of previously\n handled requests will yield the same response as the first execution.\n\n Required for DML statements. Ignored for queries.\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will not\n be retried.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata\n that is provided to the method.\n\n Returns:\n Iterable[~google.cloud.spanner_v1.types.PartialResultSet].\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError: If the request\n failed for any reason.\n google.api_core.exceptions.RetryError: If the request failed due\n to a retryable error and retry attempts failed.\n ValueError: If the parameters are invalid." - }, - { - "code": "def move_out_64(library, session, space, offset, length, data, extended=False):\n converted_buffer = (ViUInt64 * length)(*tuple(data))\n if extended:\n return library.viMoveOut64Ex(session, space, offset, length, converted_buffer)\n else:\n return library.viMoveOut64(session, space, offset, length, converted_buffer)", - "docstring": "Moves an 64-bit block of data from local memory to the specified address space and offset.\n\n Corresponds to viMoveOut64* functions of the VISA library.\n\n :param library: the visa library wrapped by ctypes.\n :param session: Unique logical identifier to a session.\n :param space: Specifies the address space. (Constants.*SPACE*)\n :param offset: Offset (in bytes) of the address or register from which to read.\n :param length: Number of elements to transfer, where the data width of the elements to transfer\n is identical to the source data width.\n :param data: Data to write to bus.\n :param extended: Use 64 bits offset independent of the platform.\n :return: return value of the library call.\n :rtype: :class:`pyvisa.constants.StatusCode`" - }, - { - "code": "def to_string(self, indent=\"\", newl=\"\", addindent=\"\"):\n buf = StringIO()\n self.to_xml().writexml(buf, indent=indent, addindent=addindent,\n newl=newl)\n return buf.getvalue()", - "docstring": "Returns a string representation of the XMLi element.\n @return: str" - }, - { - "code": "def recoverMemory(buffer, size):\n ret = libxml2mod.xmlRecoverMemory(buffer, size)\n if ret is None:raise treeError('xmlRecoverMemory() failed')\n return xmlDoc(_obj=ret)", - "docstring": "parse an XML in-memory block and build a tree. In the case\n the document is not Well Formed, an attempt to build a tree\n is tried anyway" - }, - { - "code": "def set_title(self, value: Union[Literal, Identifier, str], lang: str= None):\n return self.metadata.add(key=DC.title, value=value, lang=lang)", - "docstring": "Set the DC Title literal value\n\n :param value: Value of the title node\n :param lang: Language in which the value is" - }, - { - "code": "def idxmin(self, **kwargs):\n if self._is_transposed:\n kwargs[\"axis\"] = kwargs.get(\"axis\", 0) ^ 1\n return self.transpose().idxmin(**kwargs)\n axis = kwargs.get(\"axis\", 0)\n index = self.index if axis == 0 else self.columns\n def idxmin_builder(df, **kwargs):\n if axis == 0:\n df.index = index\n else:\n df.columns = index\n return df.idxmin(**kwargs)\n func = self._build_mapreduce_func(idxmin_builder, **kwargs)\n return self._full_axis_reduce(axis, func)", - "docstring": "Returns the first occurrence of the minimum over requested axis.\n\n Returns:\n A new QueryCompiler object containing the minimum of each column or axis." - }, - { - "code": "def diff_texts(a, b, filename):\n a = a.splitlines()\n b = b.splitlines()\n return difflib.unified_diff(a, b, filename, filename,\n \"(original)\", \"(refactored)\",\n lineterm=\"\")", - "docstring": "Return a unified diff of two strings." - }, - { - "code": "def add_package(package_name, package_path='templates', encoding='utf-8'):\n if not _has_jinja:\n raise RuntimeError(_except_text)\n _jload.add_loader(PackageLoader(package_name, package_path, encoding))", - "docstring": "Adds the given package to the template search routine" - }, - { - "code": "def sometimes(fn):\n def wrapped(*args, **kwargs):\n wrapped.x += 1\n if wrapped.x % 2 == 1:\n return fn(*args, **kwargs)\n wrapped.x = 0\n return wrapped", - "docstring": "They've done studies, you know. 50% of the time,\n it works every time." - }, - { - "code": "def to_geoviews(self, gvtype=None, datasets=None, kdims=None, vdims=None, dynamic=False):\n try:\n import geoviews as gv\n from cartopy import crs\n except ImportError:\n import warnings\n warnings.warn(\"This method needs the geoviews package installed.\")\n if gvtype is None:\n gvtype = gv.Image\n ds = self.to_xarray_dataset(datasets)\n if vdims is None:\n vdims = ds.data_vars[list(ds.data_vars.keys())[0]].name\n if hasattr(ds, \"area\") and hasattr(ds.area, 'to_cartopy_crs'):\n dscrs = ds.area.to_cartopy_crs()\n gvds = gv.Dataset(ds, crs=dscrs)\n else:\n gvds = gv.Dataset(ds)\n if \"latitude\" in ds.coords.keys():\n gview = gvds.to(gv.QuadMesh, kdims=[\"longitude\", \"latitude\"], vdims=vdims, dynamic=dynamic)\n else:\n gview = gvds.to(gvtype, kdims=[\"x\", \"y\"], vdims=vdims, dynamic=dynamic)\n return gview", - "docstring": "Convert satpy Scene to geoviews.\n\n Args:\n gvtype (gv plot type):\n One of gv.Image, gv.LineContours, gv.FilledContours, gv.Points\n Default to :class:`geoviews.Image`.\n See Geoviews documentation for details.\n datasets (list): Limit included products to these datasets\n kdims (list of str):\n Key dimensions. See geoviews documentation for more information.\n vdims : list of str, optional\n Value dimensions. See geoviews documentation for more information.\n If not given defaults to first data variable\n dynamic : boolean, optional, default False\n\n Returns: geoviews object\n\n Todo:\n * better handling of projection information in datasets which are\n to be passed to geoviews" - }, - { - "code": "def get_container_wait_kwargs(self, action, container_name, kwargs=None):\n c_kwargs = dict(container=container_name)\n timeout = action.client_config.get('wait_timeout')\n if timeout is not None:\n c_kwargs['timeout'] = timeout\n update_kwargs(c_kwargs, kwargs)\n return c_kwargs", - "docstring": "Generates keyword arguments for the Docker client to wait for a container.\n\n :param action: Action configuration.\n :type action: ActionConfig\n :param container_name: Container name or id.\n :type container_name: unicode | str\n :param kwargs: Additional keyword arguments to complement or override the configuration-based values.\n :type kwargs: dict\n :return: Resulting keyword arguments.\n :rtype: dict" - }, - { - "code": "def parse_action(action, parsed):\n if action == \"list\":\n list_env()\n elif action == \"new\":\n new_env(parsed.environment)\n elif action == \"remove\":\n remove_env(parsed.environment)\n elif action == \"show\":\n show_env(parsed.environment)\n elif action == \"start\":\n start_env(parsed.environment, parsed.path)", - "docstring": "Parse the action to execute." - }, - { - "code": "def from_metadata(self, db_path, db_name='engine_metadata.db'):\n self.__engine.fromMetadata(db_path, db_name)\n return self", - "docstring": "Registers in the current session the views of the MetadataSource so the\n data is obtained from the metadata database instead of reading the\n repositories with the DefaultSource.\n\n :param db_path: path to the folder that contains the database.\n :type db_path: str\n :param db_name: name of the database file (engine_metadata.db) by default.\n :type db_name: str\n :returns: the same instance of the engine\n :rtype: Engine" - }, - { - "code": "def parse_hotkey_combinations(hotkey):\n def combine_step(step):\n return (tuple(sorted(scan_codes)) for scan_codes in _itertools.product(*step))\n return tuple(tuple(combine_step(step)) for step in parse_hotkey(hotkey))", - "docstring": "Parses a user-provided hotkey. Differently from `parse_hotkey`,\n instead of each step being a list of the different scan codes for each key,\n each step is a list of all possible combinations of those scan codes." - }, - { - "code": "def same_run_eval(black_num=0, white_num=0, completions=4):\n if black_num <= 0 or white_num <= 0:\n print(\"Need real model numbers\")\n return\n b = fsdb.get_model(black_num)\n w = fsdb.get_model(white_num)\n b_model_path = os.path.join(fsdb.models_dir(), b)\n w_model_path = os.path.join(fsdb.models_dir(), w)\n flags_path = fsdb.eval_flags_path()\n obj = launch_eval_job(b_model_path + \".pb\",\n w_model_path + \".pb\",\n \"{:d}-{:d}\".format(black_num, white_num),\n bucket_name=flags.FLAGS.bucket_name,\n flags_path=flags_path,\n completions=completions)\n return \"{} job launched ok\".format(obj[1].metadata.name)", - "docstring": "Shorthand to spawn a job matching up two models from the same run,\n identified by their model number" - }, - { - "code": "def renew_lease(self, lease_id, increment=None):\n params = {\n 'lease_id': lease_id,\n 'increment': increment,\n }\n api_path = '/v1/sys/leases/renew'\n response = self._adapter.put(\n url=api_path,\n json=params,\n )\n return response.json()", - "docstring": "Renew a lease, requesting to extend the lease.\n\n Supported methods:\n PUT: /sys/leases/renew. Produces: 200 application/json\n\n :param lease_id: The ID of the lease to extend.\n :type lease_id: str | unicode\n :param increment: The requested amount of time (in seconds) to extend the lease.\n :type increment: int\n :return: The JSON response of the request\n :rtype: dict" - }, - { - "code": "def get_files_to_check(self):\n def _get_matches(conf):\n match_func = re(conf.match + '$').match\n match_dir_func = re(conf.match_dir + '$').match\n return match_func, match_dir_func\n def _get_ignore_decorators(conf):\n return (re(conf.ignore_decorators) if conf.ignore_decorators\n else None)\n for name in self._arguments:\n if os.path.isdir(name):\n for root, dirs, filenames in os.walk(name):\n config = self._get_config(os.path.abspath(root))\n match, match_dir = _get_matches(config)\n ignore_decorators = _get_ignore_decorators(config)\n dirs[:] = [d for d in dirs if match_dir(d)]\n for filename in filenames:\n if match(filename):\n full_path = os.path.join(root, filename)\n yield (full_path, list(config.checked_codes),\n ignore_decorators)\n else:\n config = self._get_config(os.path.abspath(name))\n match, _ = _get_matches(config)\n ignore_decorators = _get_ignore_decorators(config)\n if match(name):\n yield (name, list(config.checked_codes), ignore_decorators)", - "docstring": "Generate files and error codes to check on each one.\n\n Walk dir trees under `self._arguments` and yield file names\n that `match` under each directory that `match_dir`.\n The method locates the configuration for each file name and yields a\n tuple of (filename, [error_codes]).\n\n With every discovery of a new configuration file `IllegalConfiguration`\n might be raised." - }, - { - "code": "def upload_file(self, filepath, key):\n log = logging.getLogger(self.cls_logger + '.upload_file')\n log.info('Attempting to upload file %s to S3 bucket %s as key %s...',\n filepath, self.bucket_name, key)\n if not isinstance(filepath, basestring):\n log.error('filepath argument is not a string')\n return False\n if not isinstance(key, basestring):\n log.error('key argument is not a string')\n return False\n if not os.path.isfile(filepath):\n log.error('File not found on file system: %s', filepath)\n return False\n try:\n self.s3client.upload_file(\n Filename=filepath, Bucket=self.bucket_name, Key=key)\n except ClientError as e:\n log.error('Unable to upload file %s to bucket %s as key %s:\\n%s',\n filepath, self.bucket_name, key, e)\n return False\n else:\n log.info('Successfully uploaded file to S3 bucket %s as key %s',\n self.bucket_name, key)\n return True", - "docstring": "Uploads a file using the passed S3 key\n\n This method uploads a file specified by the filepath to S3\n using the provided S3 key.\n\n :param filepath: (str) Full path to the file to be uploaded\n :param key: (str) S3 key to be set for the upload\n :return: True if upload is successful, False otherwise." - }, - { - "code": "def open_file(cls, filename: str, response: BaseResponse, mode='wb+'):\n _logger.debug('Saving file to {0}, mode={1}.',\n filename, mode)\n dir_path = os.path.dirname(filename)\n if dir_path and not os.path.exists(dir_path):\n os.makedirs(dir_path)\n response.body = Body(open(filename, mode))", - "docstring": "Open a file object on to the Response Body.\n\n Args:\n filename: The path where the file is to be saved\n response: Response\n mode: The file mode\n\n This function will create the directories if not exist." - }, - { - "code": "def sort_servers_closest(servers: Sequence[str]) -> Sequence[Tuple[str, float]]:\n if not {urlparse(url).scheme for url in servers}.issubset({'http', 'https'}):\n raise TransportError('Invalid server urls')\n get_rtt_jobs = set(\n gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url)\n for server_url\n in servers\n )\n gevent.joinall(get_rtt_jobs, raise_error=False)\n sorted_servers: List[Tuple[str, float]] = sorted(\n (job.value for job in get_rtt_jobs if job.value[1] is not None),\n key=itemgetter(1),\n )\n log.debug('Matrix homeserver RTT times', rtt_times=sorted_servers)\n return sorted_servers", - "docstring": "Sorts a list of servers by http round-trip time\n\n Params:\n servers: sequence of http server urls\n Returns:\n sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers\n (possibly empty)" - }, - { - "code": "def _print_header(self):\n header = \" Iter Dir \"\n if self.constraints is not None:\n header += ' SC CC'\n header += \" Function\"\n if self.convergence_condition is not None:\n header += self.convergence_condition.get_header()\n header += \" Time\"\n self._screen(\"-\"*(len(header)), newline=True)\n self._screen(header, newline=True)\n self._screen(\"-\"*(len(header)), newline=True)", - "docstring": "Print the header for screen logging" - }, - { - "code": "def parse_value(self, value_string: str):\n self.value = Decimal(value_string)\n return self.value", - "docstring": "Parses the amount string." - }, - { - "code": "def incremental_value(self, slip_moment, mmax, mag_value, bbar, dbar):\n delta_m = mmax - mag_value\n a_3 = self._get_a3(bbar, dbar, slip_moment, mmax)\n return a_3 * bbar * (np.exp(bbar * delta_m) - 1.0) * (delta_m > 0.0)", - "docstring": "Returns the incremental rate with Mmax = Mag_value" - }, - { - "code": "def get_group_gn(dim, dim_per_gp, num_groups):\n assert dim_per_gp == -1 or num_groups == -1, \\\n \"GroupNorm: can only specify G or C/G.\"\n if dim_per_gp > 0:\n assert dim % dim_per_gp == 0, \\\n \"dim: {}, dim_per_gp: {}\".format(dim, dim_per_gp)\n group_gn = dim // dim_per_gp\n else:\n assert dim % num_groups == 0, \\\n \"dim: {}, num_groups: {}\".format(dim, num_groups)\n group_gn = num_groups\n return group_gn", - "docstring": "get number of groups used by GroupNorm, based on number of channels." - }, - { - "code": "def GetMemLimitMB(self):\n counter = c_uint()\n ret = vmGuestLib.VMGuestLib_GetMemLimitMB(self.handle.value, byref(counter))\n if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)\n return counter.value", - "docstring": "Retrieves the upper limit of memory that is available to the virtual\n machine. For information about setting a memory limit, see \"Limits and\n Reservations\" on page 14." - }, - { - "code": "def from_values_indices(cls, values, indices, populate=False,\n structure=None, voigt_rank=None,\n vsym=True, verbose=False):\n indices = np.array(indices)\n if voigt_rank:\n shape = ([3]*(voigt_rank % 2) + [6]*(voigt_rank // 2))\n else:\n shape = np.ceil(np.max(indices+1, axis=0) / 3.) * 3\n base = np.zeros(shape.astype(int))\n for v, idx in zip(values, indices):\n base[tuple(idx)] = v\n if 6 in shape:\n obj = cls.from_voigt(base)\n else:\n obj = cls(base)\n if populate:\n assert structure, \"Populate option must include structure input\"\n obj = obj.populate(structure, vsym=vsym, verbose=verbose)\n elif structure:\n obj = obj.fit_to_structure(structure)\n return obj", - "docstring": "Creates a tensor from values and indices, with options\n for populating the remainder of the tensor.\n\n Args:\n values (floats): numbers to place at indices\n indices (array-likes): indices to place values at\n populate (bool): whether to populate the tensor\n structure (Structure): structure to base population\n or fit_to_structure on\n voigt_rank (int): full tensor rank to indicate the\n shape of the resulting tensor. This is necessary\n if one provides a set of indices more minimal than\n the shape of the tensor they want, e.g.\n Tensor.from_values_indices((0, 0), 100)\n vsym (bool): whether to voigt symmetrize during the\n optimization procedure\n verbose (bool): whether to populate verbosely" - }, - { - "code": "def _get_exec_binary(binary, kw):\n binary = which(binary, path=kw.get('env', {}).get('PATH'))\n if binary is None:\n raise_os_error(errno.ENOENT)\n return binary", - "docstring": "On win32, the subprocess module can only reliably resolve the\n target binary if it's actually a binary; as for a Node.js script\n it seems to only work iff shell=True was specified, presenting\n a security risk. Resolve the target manually through which will\n account for that.\n\n The kw argument is the keyword arguments that will be passed into\n whatever respective subprocess.Popen family of methods. The PATH\n environment variable will be used if available." - }, - { - "code": "def total_area_per_neurite(neurites, neurite_type=NeuriteType.all):\n return [neurite.area for neurite in iter_neurites(neurites, filt=is_type(neurite_type))]", - "docstring": "Surface area in a collection of neurites.\n\n The area is defined as the sum of the area of the sections." - }, - { - "code": "def check_initialized(method):\n def _decorator(self, *args, **kwargs):\n if self._arguments is None or self._options is None:\n raise RuntimeError('using an uninitialized configuration')\n return method(self, *args, **kwargs)\n return _decorator", - "docstring": "Check that the configuration object was initialized." - }, - { - "code": "def M_step(self):\n for k in range(self.K):\n self.rho[k] = self.type_prob[:, k].sum() / self.N\n for k in range(self.K):\n for m in range(self.M):\n temp_prob = np.dot(self.type_prob[:, k],\n self.feature_counts[:, m])\n if temp_prob < 1e-99:\n temp_prob = 1e-99\n self.mu[k, m] = temp_prob / np.dot(self.type_prob[:, k],\n self.observations)", - "docstring": "generate new parameter estimates given updated type distribution" - }, - { - "code": "def set_name(self, name):\n self.name = name\n self.changed_event.emit(self)", - "docstring": "Changes the name of the account.\n\n :type name: string\n :param name: The account name." - }, - { - "code": "def getexif_location(directory,fn):\n lat=None\n lon=None\n sign_lat=+1.0\n sign_lon=+1.0\n exif_tags=exifread.process_file(\\\n open(os.path.join(directory,fn),'rb'))\n try:\n d,m,s=exif_tags['GPS GPSLongitude'].values\n if exif_tags['GPS GPSLongitudeRef'].values=='W':\n sign_lon=-1.0\n lon=float(d.num) +float(m.num)/60.0 +float(s.num/float(s.den))/3600.0\n lon=lon*sign_lon\n d,m,s=exif_tags['GPS GPSLatitude'].values\n if exif_tags['GPS GPSLatitudeRef'].values=='S':\n sign_lat=-1.0\n lat=float(d.num)\\\n +float(m.num)/60.0\\\n +float(s.num/float(s.den))/3600.0\n lat=lat*sign_lat\n except:\n logger.debug(\"%s - Couldn't extract GPS info\"%(fn))\n return lat,lon", - "docstring": "directory - Dir where file is located\n fn - filename to check for EXIF GPS\n\n Returns touple of lat,lon if EXIF\n eg. (34.035460,-118.227885)\n files contains GPS info, otherwise returns\n None,None" - }, - { - "code": "def has_no_password(gpg_secret_keyid):\n if gnupg is None:\n return False\n gpg = gnupg.GPG()\n s = gpg.sign(\"\", keyid=gpg_secret_keyid, passphrase=\"\")\n try:\n return s.status == \"signature created\"\n except AttributeError:\n if hasattr(s, \"stderr\"):\n return \"GOOD_PASSPHRASE\" in s.stderr", - "docstring": "Returns True iif gpg_secret_key has a password" - }, - { - "code": "def clean_translation(self):\n translation = self.cleaned_data['translation']\n if self.instance and self.instance.content_object:\n obj = self.instance.content_object\n field = obj._meta.get_field(self.instance.field)\n max_length = field.max_length\n if max_length and len(translation) > max_length:\n raise forms.ValidationError(\n _('The entered translation is too long. You entered '\n '%(entered)s chars, max length is %(maxlength)s') % {\n 'entered': len(translation),\n 'maxlength': max_length,\n }\n )\n else:\n raise forms.ValidationError(\n _('Can not store translation. First create all translation'\n ' for this object')\n )\n return translation", - "docstring": "Do not allow translations longer than the max_lenght of the field to\n be translated." - }, - { - "code": "def save(self):\n client = self._new_api_client()\n params = {'id': self.id} if hasattr(self, 'id') else {}\n action = 'patch' if hasattr(self, 'id') else 'post'\n saved_model = client.make_request(self, action, url_params=params, post_data=self._to_json)\n self.__init__(**saved_model._to_dict)", - "docstring": "Save an instance of a Union object" - }, - { - "code": "async def info(self, session, *, dc=None, watch=None, consistency=None):\n session_id = extract_attr(session, keys=[\"ID\"])\n response = await self._api.get(\"/v1/session/info\", session_id,\n watch=watch,\n consistency=consistency,\n params={\"dc\": dc})\n try:\n result = response.body[0]\n except IndexError:\n meta = extract_meta(response.headers)\n raise NotFound(\"No session for %r\" % session_id, meta=meta)\n return consul(result, meta=extract_meta(response.headers))", - "docstring": "Queries a given session\n\n Parameters:\n session (ObjectID): Session ID\n dc (str): Specify datacenter that will be used.\n Defaults to the agent's local datacenter.\n watch (Blocking): Do a blocking query\n consistency (Consistency): Force consistency\n Returns:\n ObjectMeta: where value is the queried session\n Raises:\n NotFound: session is absent\n\n Returns the requested session information within a given datacenter.\n\n It returns a mapping like this::\n\n {\n \"LockDelay\": datetime.timedelta(0, 15),\n \"Checks\": [\n \"serfHealth\"\n ],\n \"Node\": \"foobar\",\n \"ID\": \"adf4238a-882b-9ddc-4a9d-5b6758e4159e\",\n \"CreateIndex\": 1086449\n }" - }, - { - "code": "def refresh( self ):\n self.clear()\n for i, filename in enumerate(self.filenames()):\n name = '%i. %s' % (i+1, os.path.basename(filename))\n action = self.addAction(name)\n action.setData(wrapVariant(filename))", - "docstring": "Clears out the actions for this menu and then loads the files." - }, - { - "code": "def file_ref(self):\n from metatab.util import slugify\n assert self.file_name is None or isinstance(self.file_name, str)\n if self.file_name is not None and self.row is not None:\n parts = split(self.file_name);\n return \"{} {}:{} \".format(parts[-1], self.row, self.col)\n elif self.row is not None:\n return \" {}:{} \".format(self.row, self.col)\n else:\n return ''", - "docstring": "Return a string for the file, row and column of the term." - }, - { - "code": "def fetch_country_by_ip(ip):\n iplookup = ipdata.ipdata()\n data = iplookup.lookup(ip)\n if data.get('status') != 200:\n return ''\n return data.get('response', {}).get('country_code', '')", - "docstring": "Fetches country code by IP\n\n Returns empty string if the request fails in non-200 code.\n\n Uses the ipdata.co service which has the following rules:\n\n * Max 1500 requests per day\n\n See: https://ipdata.co/docs.html#python-library" - }, - { - "code": "def transformer_big():\n hparams = transformer_base()\n hparams.hidden_size = 1024\n hparams.filter_size = 4096\n hparams.batch_size = 2048\n hparams.num_heads = 16\n hparams.layer_prepostprocess_dropout = 0.3\n return hparams", - "docstring": "HParams for transformer big model on WMT." - }, - { - "code": "def _get_fieldtranslations(instance, field=None, lang=None):\n\t_filter = {\"module\": instance.__module__, \"model\": instance.__class__.__name__, \"object_id\": instance.id}\n\tif lang:\n\t\t_filter[\"lang\"] = lang\n\tif field:\n\t\t_filter[\"field\"] = field\n\t\ttry:\n\t\t\treturn FieldTranslation.objects.get(**_filter)\n\t\texcept FieldTranslation.DoesNotExist:\n\t\t\treturn False\n\treturn FieldTranslation.objects.filter(**_filter)", - "docstring": "Get all the translations for this object." - }, - { - "code": "def authenticate_token(self, load):\n token = self.get_tok(load['token'])\n if not token or token['eauth'] not in self.opts['external_auth']:\n log.warning('Authentication failure of type \"token\" occurred.')\n return False\n return token", - "docstring": "Authenticate a user by the token specified in load.\n Return the token object or False if auth failed." - }, - { - "code": "def _dfs_preorder(node, visited):\n if node not in visited:\n visited.add(node)\n yield node\n if node.lo is not None:\n yield from _dfs_preorder(node.lo, visited)\n if node.hi is not None:\n yield from _dfs_preorder(node.hi, visited)", - "docstring": "Iterate through nodes in DFS pre-order." - }, - { - "code": "def show_status(self):\n txt = 'Agent Status:\\n'\n print(txt)\n txt += \"start_x = \" + str(self.start_x) + \"\\n\"\n txt += \"start_y = \" + str(self.start_y) + \"\\n\"\n txt += \"target_x = \" + str(self.target_x) + \"\\n\"\n txt += \"target_y = \" + str(self.target_y) + \"\\n\"\n txt += \"current_x = \" + str(self.current_x) + \"\\n\"\n txt += \"current_y = \" + str(self.current_y) + \"\\n\"\n print(self.grd)\n return txt", - "docstring": "dumps the status of the agent" - }, - { - "code": "def get_sectionsf(self, *args, **kwargs):\n def func(f):\n doc = f.__doc__\n self.get_sections(doc or '', *args, **kwargs)\n return f\n return func", - "docstring": "Decorator method to extract sections from a function docstring\n\n Parameters\n ----------\n ``*args`` and ``**kwargs``\n See the :meth:`get_sections` method. Note, that the first argument\n will be the docstring of the specified function\n\n Returns\n -------\n function\n Wrapper that takes a function as input and registers its sections\n via the :meth:`get_sections` method" - }, - { - "code": "def handle_feedback(self, pkt):\n self.logger.debug(\"handle feedback\")\n self.frame = self.decode_frameno(pkt.z & 0o7777) - 1\n self.server.controller.init_frame(self.frame)\n self.server.controller.set_frame(self.frame)", - "docstring": "This part of the protocol is used by IRAF to erase a frame in\n the framebuffers." - }, - { - "code": "def with_dimensions(self, *dimensions):\n self.dimensions = tuple(\n self._maybe_make_dimension(dim) for dim in dimensions)\n self._cached = None\n return self", - "docstring": "Declare dimensions for this Measurement, returns self for chaining." - }, - { - "code": "def best_parent( self, node, tree_type=None ):\n parents = self.parents(node)\n selected_parent = None\n if node['type'] == 'type':\n module = \".\".join( node['name'].split( '.' )[:-1] )\n if module:\n for mod in parents:\n if mod['type'] == 'module' and mod['name'] == module:\n selected_parent = mod \n if parents and selected_parent is None:\n parents.sort( key = lambda x: self.value(node, x) )\n return parents[-1]\n return selected_parent", - "docstring": "Choose the best parent for a given node" - }, - { - "code": "def read_mrz(file, save_roi=False, extra_cmdline_params=''):\n p = MRZPipeline(file, extra_cmdline_params)\n mrz = p.result\n if mrz is not None:\n mrz.aux['text'] = p['text']\n if save_roi:\n mrz.aux['roi'] = p['roi']\n return mrz", - "docstring": "The main interface function to this module, encapsulating the recognition pipeline.\n Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object.\n\n :param file: A filename or a stream to read the file data from.\n :param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from.\n :param extra_cmdline_params:extra parameters to the ocr.py" - }, - { - "code": "def load_template(name, directory, extension, encoding, encoding_errors):\n abs_path = get_abs_template_path(name, directory, extension)\n return load_file(abs_path, encoding, encoding_errors)", - "docstring": "Load a template and return its contents as a unicode string." - }, - { - "code": "def getBriefAndDetailedRST(textRoot, node):\n node_xml_contents = utils.nodeCompoundXMLContents(node)\n if not node_xml_contents:\n return \"\", \"\"\n try:\n node_soup = BeautifulSoup(node_xml_contents, \"lxml-xml\")\n except:\n utils.fancyError(\"Unable to parse [{0}] xml using BeautifulSoup\".format(node.name))\n try:\n brief = node_soup.doxygen.compounddef.find_all(\"briefdescription\", recursive=False)\n brief_desc = \"\"\n if len(brief) == 1:\n brief = brief[0]\n if not brief.get_text().isspace():\n brief_desc = convertDescriptionToRST(textRoot, node, brief, None)\n detailed = node_soup.doxygen.compounddef.find_all(\"detaileddescription\", recursive=False)\n detailed_desc = \"\"\n if len(detailed) == 1:\n detailed = detailed[0]\n if not detailed.get_text().isspace():\n detailed_desc = convertDescriptionToRST(textRoot, node, detailed, \"Detailed Description\")\n return brief_desc, detailed_desc\n except:\n utils.fancyError(\n \"Could not acquire soup.doxygen.compounddef; likely not a doxygen xml file.\"\n )", - "docstring": "Given an input ``node``, return a tuple of strings where the first element of\n the return is the ``brief`` description and the second is the ``detailed``\n description.\n\n .. todo:: actually document this" - }, - { - "code": "async def items(self):\n accumulator = Accumulator()\n for graft in load_grafts():\n accumulator.spawn(graft())\n response = await accumulator.join()\n return response.items()", - "docstring": "Expose all grafts." - }, - { - "code": "def json(self):\n return {\n \"elevation\": self.elevation,\n \"latitude\": self.latitude,\n \"longitude\": self.longitude,\n \"icao_code\": self.icao_code,\n \"name\": self.name,\n \"quality\": self.quality,\n \"wban_ids\": self.wban_ids,\n \"recent_wban_id\": self.recent_wban_id,\n \"climate_zones\": {\n \"iecc_climate_zone\": self.iecc_climate_zone,\n \"iecc_moisture_regime\": self.iecc_moisture_regime,\n \"ba_climate_zone\": self.ba_climate_zone,\n \"ca_climate_zone\": self.ca_climate_zone,\n },\n }", - "docstring": "Return a JSON-serializeable object containing station metadata." - }, - { - "code": "def compare_contract_versions(\n proxy: ContractProxy,\n expected_version: str,\n contract_name: str,\n address: Address,\n) -> None:\n assert isinstance(expected_version, str)\n try:\n deployed_version = proxy.contract.functions.contract_version().call()\n except BadFunctionCallOutput:\n raise AddressWrongContract('')\n deployed_version = deployed_version.replace('_', '0')\n expected_version = expected_version.replace('_', '0')\n deployed = [int(x) for x in deployed_version.split('.')]\n expected = [int(x) for x in expected_version.split('.')]\n if deployed != expected:\n raise ContractVersionMismatch(\n f'Provided {contract_name} contract ({to_normalized_address(address)}) '\n f'version mismatch. Expected: {expected_version} Got: {deployed_version}',\n )", - "docstring": "Compare version strings of a contract.\n\n If not matching raise ContractVersionMismatch. Also may raise AddressWrongContract\n if the contract contains no code." - }, - { - "code": "def remove_escapes(self):\n chars = []\n i = 0\n while i < len(self.string):\n char = self.string[i]\n if char == \"\\\\\":\n i += 1\n else:\n chars.append(char)\n i += 1\n return \"\".join(chars)", - "docstring": "Removes everything except number and letters from string\n\n :return: All numbers and letters in string" - }, - { - "code": "def screensaver():\n cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app'\n call = __salt__['cmd.run_all'](\n cmd,\n output_loglevel='debug',\n python_shell=False\n )\n _check_cmd(call)\n return True", - "docstring": "Launch the screensaver.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' desktop.screensaver" - }, - { - "code": "def add_plot(self, *args, extension='pdf', **kwargs):\n add_image_kwargs = {}\n for key in ('width', 'placement'):\n if key in kwargs:\n add_image_kwargs[key] = kwargs.pop(key)\n filename = self._save_plot(*args, extension=extension, **kwargs)\n self.add_image(filename, **add_image_kwargs)", - "docstring": "Add the current Matplotlib plot to the figure.\n\n The plot that gets added is the one that would normally be shown when\n using ``plt.show()``.\n\n Args\n ----\n args:\n Arguments passed to plt.savefig for displaying the plot.\n extension : str\n extension of image file indicating figure file type\n kwargs:\n Keyword arguments passed to plt.savefig for displaying the plot. In\n case these contain ``width`` or ``placement``, they will be used\n for the same purpose as in the add_image command. Namely the width\n and placement of the generated plot in the LaTeX document." - }, - { - "code": "def tostr(s, encoding='ascii'):\n if PY3K:\n if isinstance(s, str):\n return s\n else:\n return s.decode(encoding)\n else:\n if isinstance(s, unicode):\n return s.encode(encoding)\n else:\n return s", - "docstring": "Convert string-like-thing s to the 'str' type, in all Pythons, even\n back before Python 2.6. What 'str' means varies by PY3K or not.\n In Pythons before 3.0, str and bytes are the same type.\n In Python 3+, this may require a decoding step." - }, - { - "code": "def satisfied(self,lab):\n for const in self.constraints:\n if not const.satisfied(lab):\n return False\n return True", - "docstring": "Check whether the labeling satisfies all constraints" - }, - { - "code": "def unpack(self, unpacker):\n (count, ) = unpacker.unpack_struct(_H)\n items = [(None, None), ]\n count -= 1\n hackpass = False\n for _i in range(0, count):\n if hackpass:\n hackpass = False\n items.append((None, None))\n else:\n item = _unpack_const_item(unpacker)\n items.append(item)\n if item[0] in (CONST_Long, CONST_Double):\n hackpass = True\n self.consts = items", - "docstring": "Unpacks the constant pool from an unpacker stream" - }, - { - "code": "def filepath_to_uri(path):\n if path is None:\n return path\n return urllib.quote(path.replace(\"\\\\\", \"/\"), safe=b\"/~!*()'\")", - "docstring": "Convert an file system path to a URI portion that is suitable for\n inclusion in a URL.\n\n We are assuming input is either UTF-8 or unicode already.\n\n This method will encode certain chars that would normally be recognized as\n special chars for URIs. Note that this method does not encode the '\n character, as it is a valid character within URIs. See\n encodeURIComponent() JavaScript function for more details.\n\n Returns an ASCII string containing the encoded result." - }, - { - "code": "def read_mol2_from_list(self, mol2_lines, mol2_code, columns=None):\n r\n self._load_mol2(mol2_lines, mol2_code, columns)\n return self", - "docstring": "r\"\"\"Reads Mol2 file from a list into DataFrames\n\n Attributes\n ----------\n mol2_lines : list\n A list of lines containing the mol2 file contents. For example,\n ['@MOLECULE\\n',\n 'ZINC38611810\\n',\n ' 65 68 0 0 0\\n',\n 'SMALL\\n',\n 'NO_CHARGES\\n',\n '\\n',\n '@ATOM\\n',\n ' 1 C1 -1.1786 2.7011 -4.0323 C.3 1 <0> -0.1537\\n',\n ' 2 C2 -1.2950 1.2442 -3.5798 C.3 1 <0> -0.1156\\n',\n ...]\n\n mol2_code : str or None\n Name or ID of the molecule.\n\n columns : dict or None (default: None)\n If None, this methods expects a 9-column ATOM section that contains\n the following columns:\n {0:('atom_id', int), 1:('atom_name', str),\n 2:('x', float), 3:('y', float), 4:('z', float),\n 5:('atom_type', str), 6:('subst_id', int),\n 7:('subst_name', str), 8:('charge', float)}\n If your Mol2 files are formatted differently, you can provide your\n own column_mapping dictionary in a format similar to the one above.\n However, note that not all assert_raise_message methods may be\n supported then.\n\n Returns\n ---------\n self" - }, - { - "code": "def filter_above_threshold(\n self,\n key_fn,\n value_dict,\n threshold,\n default_value=0.0):\n def filter_fn(x):\n key = key_fn(x)\n value = value_dict.get(key, default_value)\n return value > threshold\n return self.filter(filter_fn)", - "docstring": "The code for filtering by gene or transcript expression was pretty\n much identical aside from which identifier you pull off an effect.\n So, factored out the common operations for filtering an effect\n collection into this helper method.\n\n Parameters\n ----------\n key_fn : callable\n Given an element of this collection, returns a key into `value_dict`\n\n value_dict : dict\n Dict from keys returned by `extract_key_fn` to float values\n\n threshold : float\n Only keep elements whose value in `value_dict` is above this\n threshold.\n\n default_value : float\n Value to use for elements whose key is not in `value_dict`" - }, - { - "code": "def getPhotos(self, tags='', per_page='', page=''):\n method = 'flickr.groups.pools.getPhotos'\n data = _doget(method, group_id=self.id, tags=tags,\\\n per_page=per_page, page=page)\n photos = []\n for photo in data.rsp.photos.photo:\n photos.append(_parse_photo(photo))\n return photos", - "docstring": "Get a list of photo objects for this group" - }, - { - "code": "def check_directory(self):\n exists = os.path.exists(self.directory)\n if not exists:\n logger.error(\"No migrations directory found. Check your path or create a migration first.\")\n logger.error(\"Directory: %s\" % self.directory)\n return exists", - "docstring": "Check if migrations directory exists." - }, - { - "code": "def addTrail(self, offset=None, maxlength=None, n=25, c=None, alpha=None, lw=1):\n if maxlength is None:\n maxlength = self.diagonalSize() * 20\n if maxlength == 0:\n maxlength = 1\n if self.trail is None:\n pos = self.GetPosition()\n self.trailPoints = [None] * n\n self.trailSegmentSize = maxlength / n\n self.trailOffset = offset\n ppoints = vtk.vtkPoints()\n poly = vtk.vtkPolyData()\n ppoints.SetData(numpy_to_vtk([pos] * n))\n poly.SetPoints(ppoints)\n lines = vtk.vtkCellArray()\n lines.InsertNextCell(n)\n for i in range(n):\n lines.InsertCellPoint(i)\n poly.SetPoints(ppoints)\n poly.SetLines(lines)\n mapper = vtk.vtkPolyDataMapper()\n if c is None:\n if hasattr(self, \"GetProperty\"):\n col = self.GetProperty().GetColor()\n else:\n col = (0.1, 0.1, 0.1)\n else:\n col = colors.getColor(c)\n if alpha is None:\n alpha = 1\n if hasattr(self, \"GetProperty\"):\n alpha = self.GetProperty().GetOpacity()\n mapper.SetInputData(poly)\n tline = Actor()\n tline.SetMapper(mapper)\n tline.GetProperty().SetColor(col)\n tline.GetProperty().SetOpacity(alpha)\n tline.GetProperty().SetLineWidth(lw)\n self.trail = tline\n return self", - "docstring": "Add a trailing line to actor.\n\n :param offset: set an offset vector from the object center.\n :param maxlength: length of trailing line in absolute units\n :param n: number of segments to control precision\n :param lw: line width of the trail\n\n .. hint:: |trail| |trail.py|_" - }, - { - "code": "def _create_topk_unique(inputs, k):\n height = inputs.shape[0]\n width = inputs.shape[1]\n neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32)\n ones = tf.ones([height, width], dtype=tf.float32)\n neg_inf_r2 = ones * neg_inf_r0\n inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)\n tmp = inputs\n topk_r2 = tf.zeros([height, k], dtype=tf.float32)\n for i in range(k):\n kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)\n k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0),\n [height, 1])\n topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)\n ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))\n tmp = tf.where(ge_r2, neg_inf_r2, inputs)\n log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))\n next_power_of_two = 1 << log2_ceiling\n count_mask = next_power_of_two - 1\n mask_r0 = tf.constant(count_mask)\n mask_r2 = tf.fill([height, k], mask_r0)\n topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)\n topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)\n return topk_r2, topk_indices_r2", - "docstring": "Creates the top k values in sorted order with indices.\n\n Args:\n inputs: A tensor with rank of 2. [batch_size, original_size].\n k: An integer, number of top elements to select.\n\n Returns:\n topk_r2: A tensor, the k largest elements. [batch_size, k].\n topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]." - }, - { - "code": "def reverse(array):\n l = list(array)\n l.reverse()\n return _n.array(l)", - "docstring": "returns a reversed numpy array" - }, - { - "code": "def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,\n topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer=\"em\"):\n model = callMLlibFunc(\"trainLDAModel\", rdd, k, maxIterations,\n docConcentration, topicConcentration, seed,\n checkpointInterval, optimizer)\n return LDAModel(model)", - "docstring": "Train a LDA model.\n\n :param rdd:\n RDD of documents, which are tuples of document IDs and term\n (word) count vectors. The term count vectors are \"bags of\n words\" with a fixed-size vocabulary (where the vocabulary size\n is the length of the vector). Document IDs must be unique\n and >= 0.\n :param k:\n Number of topics to infer, i.e., the number of soft cluster\n centers.\n (default: 10)\n :param maxIterations:\n Maximum number of iterations allowed.\n (default: 20)\n :param docConcentration:\n Concentration parameter (commonly named \"alpha\") for the prior\n placed on documents' distributions over topics (\"theta\").\n (default: -1.0)\n :param topicConcentration:\n Concentration parameter (commonly named \"beta\" or \"eta\") for\n the prior placed on topics' distributions over terms.\n (default: -1.0)\n :param seed:\n Random seed for cluster initialization. Set as None to generate\n seed based on system time.\n (default: None)\n :param checkpointInterval:\n Period (in iterations) between checkpoints.\n (default: 10)\n :param optimizer:\n LDAOptimizer used to perform the actual calculation. Currently\n \"em\", \"online\" are supported.\n (default: \"em\")" - }, - { - "code": "def generate_covalent_bond_graph(covalent_bonds):\n bond_graph=networkx.Graph()\n for inter in covalent_bonds:\n bond_graph.add_edge(inter.a, inter.b)\n return bond_graph", - "docstring": "Generates a graph of the covalent bond network described by the interactions.\n\n Parameters\n ----------\n covalent_bonds: [CovalentBond]\n List of `CovalentBond`.\n\n Returns\n -------\n bond_graph: networkx.Graph\n A graph of the covalent bond network." - }, - { - "code": "def A_array(l1,l2,PA,PB,CP,g):\n Imax = l1+l2+1\n A = [0]*Imax\n for i in range(Imax):\n for r in range(int(floor(i/2)+1)):\n for u in range(int(floor((i-2*r)/2)+1)):\n I = i-2*r-u\n A[I] = A[I] + A_term(i,r,u,l1,l2,PA,PB,CP,g)\n return A", - "docstring": "THO eq. 2.18 and 3.1\n\n >>> A_array(0,0,0,0,0,1)\n [1.0]\n >>> A_array(0,1,1,1,1,1)\n [1.0, -1.0]\n >>> A_array(1,1,1,1,1,1)\n [1.5, -2.5, 1.0]" - }, - { - "code": "def check_file(\n state,\n fname,\n missing_msg=\"Did you create a file named `{}`?\",\n is_dir_msg=\"Want to check a file named `{}`, but found a directory.\",\n parse=True,\n use_fs=True,\n use_solution=False,\n):\n if use_fs:\n p = Path(fname)\n if not p.exists():\n state.report(Feedback(missing_msg.format(fname)))\n if p.is_dir():\n state.report(Feedback(is_dir_msg.format(fname)))\n code = p.read_text()\n else:\n code = _get_fname(state, \"student_code\", fname)\n if code is None:\n state.report(Feedback(missing_msg.format(fname)))\n sol_kwargs = {\"solution_code\": None, \"solution_ast\": None}\n if use_solution:\n sol_code = _get_fname(state, \"solution_code\", fname)\n if sol_code is None:\n raise Exception(\"Solution code does not have file named: %s\" % fname)\n sol_kwargs[\"solution_code\"] = sol_code\n sol_kwargs[\"solution_ast\"] = (\n state.parse(sol_code, test=False) if parse else None\n )\n return state.to_child(\n student_code=code,\n student_ast=state.parse(code) if parse else None,\n fname=fname,\n **sol_kwargs\n )", - "docstring": "Test whether file exists, and make its contents the student code.\n\n Note: this SCT fails if the file is a directory." - }, - { - "code": "def rephase_standard(UuL, UdL, UuR, UdR):\n K = UuL.conj().T @ UdL\n f = mixing_phases(K)\n Fdelta = np.diag(np.exp([1j*f['delta1'], 1j*f['delta2'], 1j*f['delta3']]))\n Fphi = np.diag(np.exp([-1j*f['phi1']/2., -1j*f['phi2']/2., 0]))\n return UuL @ Fdelta, UdL @ Fphi.conj(), UuR @ Fdelta, UdR @ Fphi.conj()", - "docstring": "Function to rephase the quark rotation matrices in order to\n obtain the CKM matrix in standard parametrization.\n\n The input matrices are assumed to diagonalize the up-type and down-type\n quark matrices like\n\n ```\n UuL.conj().T @ Mu @ UuR = Mu_diag\n UdL.conj().T @ Md @ UdR = Md_diag\n ```\n\n The CKM matrix is given as `VCKM = UuL.conj().T @ UdL`.\n\n Returns a tuple with the rephased versions of the input matrices." - }, - { - "code": "def _set_auth(self, user, password):\n if user is None or len(user.strip()) == 0:\n self._user = None\n self._password = None\n self._auth = None\n else:\n self._user = user.strip()\n if password:\n self._password = password.strip()\n try:\n self._auth = aiohttp.BasicAuth(self._user, self._password, \"utf-8\")\n except ValueError as e:\n log.error(str(e))\n else:\n self._password = None\n self._auth = aiohttp.BasicAuth(self._user, \"\")", - "docstring": "Set authentication parameters" - }, - { - "code": "def _all_combos(self):\n combos = _product(self.dims)\n if not combos:\n return\n calc = [(coalesce(_product(self.dims[i+1:]), 1), mm) for i, mm in enumerate(self.dims)]\n for c in xrange(combos):\n yield tuple(int(c / dd) % mm for dd, mm in calc)", - "docstring": "RETURN AN ITERATOR OF ALL COORDINATES" - }, - { - "code": "def get_breaks_lno(self, filename):\n return list(\n filter(\n lambda x: x is not None, [\n getattr(breakpoint, 'line', None)\n for breakpoint in self.breakpoints\n if breakpoint.on_file(filename)\n ]\n )\n )", - "docstring": "List all line numbers that have a breakpoint" - }, - { - "code": "def flatten(self, redact=False):\n od = OrderedDict()\n for key, view in self.items():\n if redact and view.redact:\n od[key] = REDACTED_TOMBSTONE\n else:\n try:\n od[key] = view.flatten(redact=redact)\n except ConfigTypeError:\n od[key] = view.get()\n return od", - "docstring": "Create a hierarchy of OrderedDicts containing the data from\n this view, recursively reifying all views to get their\n represented values.\n\n If `redact` is set, then sensitive values are replaced with\n the string \"REDACTED\"." - }, - { - "code": "def readTempC(self):\n\t\tt = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP)\n\t\tself._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF))\n\t\ttemp = (t & 0x0FFF) / 16.0\n\t\tif t & 0x1000:\n\t\t\ttemp -= 256.0\n\t\treturn temp", - "docstring": "Read sensor and return its value in degrees celsius." - }, - { - "code": "def _add_boolean_cli_param(params, key, value):\n if value is True:\n params.append('--{0}'.format(key))", - "docstring": "Adds key as a command line parameter to params." - }, - { - "code": "def parse_options(source):\n options = {}\n tokens = [t.strip() for t in source.split('=')]\n name = tokens[0]\n for token in tokens[1:-1]:\n value, next_name = token.rsplit(' ', 1)\n options[name.strip()] = value\n name = next_name\n options[name.strip()] = tokens[-1].strip()\n return options", - "docstring": "parses chart tag options" - }, - { - "code": "async def apply(self, sender: str, recipient: str, mailbox: str,\n append_msg: AppendMessage) \\\n -> Tuple[Optional[str], AppendMessage]:\n ...", - "docstring": "Run the filter and return the mailbox where it should be appended,\n or None to discard, and the message to be appended, which is usually\n the same as ``append_msg``.\n\n Args:\n sender: The envelope sender of the message.\n recipient: The envelope recipient of the message.\n mailbox: The intended mailbox to append the message.\n append_msg: The message to be appended.\n\n raises:\n :exc:`~pymap.exceptions.AppendFailure`" - }, - { - "code": "def create_tracked_tempdir(*args, **kwargs):\n tempdir = TemporaryDirectory(*args, **kwargs)\n TRACKED_TEMPORARY_DIRECTORIES.append(tempdir)\n atexit.register(tempdir.cleanup)\n warnings.simplefilter(\"ignore\", ResourceWarning)\n return tempdir.name", - "docstring": "Create a tracked temporary directory.\n\n This uses `TemporaryDirectory`, but does not remove the directory when\n the return value goes out of scope, instead registers a handler to cleanup\n on program exit.\n\n The return value is the path to the created directory." - }, - { - "code": "def _load_from(self, line):\n line = line.split('\n line = re.sub('(F|f)(R|r)(O|o)(M|m):','', line).strip()\n bot.info('FROM %s' %line)\n self.config['from'] = line", - "docstring": "load the From section of the recipe for the Dockerfile." - }, - { - "code": "def draw(args):\n if len(args) != 2:\n log.error('Two arguments required: [build target] [output file]')\n app.quit(1)\n target = args[0]\n out = args[1]\n try:\n bb = Butcher()\n bb.load_graph(target)\n except error.BrokenGraph as lolno:\n log.fatal(lolno)\n app.quit(1)\n filtered_graph = bb.graph.subgraph(\n networkx.topological_sort(bb.graph, nbunch=[address.new(target)]))\n a = networkx.to_agraph(filtered_graph)\n a.draw(out, prog='dot')\n log.info('Graph written to %s', out)", - "docstring": "Load the build graph for a target and render it to an image." - }, - { - "code": "def search(self,\n q,\n t=None,\n focus=None,\n bbox=None,\n start=1,\n num=10,\n sortField=None,\n sortOrder=\"asc\",\n useSecurity=True):\n if self._url.endswith(\"/rest\"):\n url = self._url + \"/search\"\n else:\n url = self._url + \"/rest/search\"\n params = {\n \"f\" : \"json\",\n \"q\" : q,\n \"sortOrder\" : sortOrder,\n \"num\" : num,\n \"start\" : start,\n 'restrict' : useSecurity\n }\n if not focus is None:\n params['focus'] = focus\n if not t is None:\n params['t'] = t\n if useSecurity and \\\n self._securityHandler is not None and \\\n self._securityHandler.method == \"token\":\n params[\"token\"] = self._securityHandler.token\n if sortField is not None:\n params['sortField'] = sortField\n if bbox is not None:\n params['bbox'] = bbox\n return self._get(url=url,\n param_dict=params,\n securityHandler=self._securityHandler,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port)", - "docstring": "This operation searches for content items in the portal. The\n searches are performed against a high performance index that\n indexes the most popular fields of an item. See the Search\n reference page for information on the fields and the syntax of the\n query.\n The search index is updated whenever users add, update, or delete\n content. There can be a lag between the time that the content is\n updated and the time when it's reflected in the search results.\n The results of a search only contain items that the user has\n permission to access.\n\n Inputs:\n q - The query string used to search\n t - type of content to search for.\n focus - another content filter. Ex: files\n bbox - The bounding box for a spatial search defined as minx,\n miny, maxx, or maxy. Search requires q, bbox, or both.\n Spatial search is an overlaps/intersects function of the\n query bbox and the extent of the document.\n Documents that have no extent (e.g., mxds, 3dds, lyr)\n will not be found when doing a bbox search.\n Document extent is assumed to be in the WGS84 geographic\n coordinate system.\n start - The number of the first entry in the result set\n response. The index number is 1-based.\n The default value of start is 1 (that is, the first\n search result).\n The start parameter, along with the num parameter, can\n be used to paginate the search results.\n num - The maximum number of results to be included in the result\n set response.\n The default value is 10, and the maximum allowed value is\n 100.\n The start parameter, along with the num parameter, can be\n used to paginate the search results.\n sortField - Field to sort by. You can also sort by multiple\n fields (comma separated) for an item.\n The allowed sort field names are title, created,\n type, owner, modified, avgRating, numRatings,\n numComments, and numViews.\n sortOrder - Describes whether the results return in ascending or\n descending order. Default is ascending.\n Values: asc | desc\n useSecurity - boolean value that determines if the security\n handler object's token will be appended on the\n search call. If the value is set to False, then\n the search will be performed without\n authentication. This means only items that have\n been shared with everyone on AGOL or portal site\n will be found. If it is set to True, then all\n items the user has permission to see based on the\n query passed will be returned.\n Output:\n returns a list of dictionary" - }, - { - "code": "def set_current_canvas(canvas):\n canvas.context._do_CURRENT_command = True\n if canvasses and canvasses[-1]() is canvas:\n return\n cc = [c() for c in canvasses if c() is not None]\n while canvas in cc:\n cc.remove(canvas)\n cc.append(canvas)\n canvasses[:] = [weakref.ref(c) for c in cc]", - "docstring": "Make a canvas active. Used primarily by the canvas itself." - }, - { - "code": "def expand_seed(self, start_seed, num_iterations, val):\n self.grd.set_tile(start_seed[0], start_seed[1], val)\n cur_pos = [start_seed[0], start_seed[1]]\n while num_iterations > 0:\n num_iterations -= 1\n for y in range(cur_pos[0]-randint(0,2), cur_pos[0] + randint(0,2)):\n for x in range(cur_pos[1]-randint(0,2), cur_pos[1] + randint(0,2)):\n if x < self.grd.grid_width and x >= 0 and y >= 0 and y < self.grd.grid_height:\n if self.grd.get_tile(y,x) != val:\n self.grd.set_tile(y, x, TERRAIN_LAND)\n num_iterations -= 1\n new_x = cur_pos[0] + randint(0,3)-2\n new_y = cur_pos[1] + randint(0,3)-2\n if new_x > self.grd.grid_width - 1:\n new_x = 0\n if new_y > self.grd.grid_height - 1:\n new_y = 0\n if new_x < 0:\n new_x = self.grd.grid_width - 1\n if new_y < 0:\n new_y = self.grd.grid_height - 1\n cur_pos = [new_y, new_x]", - "docstring": "takes a seed start point and grows out in random\n directions setting cell points to val" - }, - { - "code": "def with_headers(self, headers):\n return self.replace(headers=_merge_maps(self.headers, headers))", - "docstring": "Create a new request with added headers\n\n Parameters\n ----------\n headers: Mapping\n the headers to add" - }, - { - "code": "def wait_till_page_load(self,driver,max_wait_time):\n\t\tsleepCount = max_wait_time\n\t\twhile self.tracking_no not in driver.page_source and 'Invalid Input' not in driver.page_source:\t\t\n\t\t\tsleep(1)\n\t\t\tsleepCount -= 1\n\t\t\tif sleepCount is 0:\n\t\t\t\traise Exception('Request timed out!')", - "docstring": "This method pauses execution until the page is loaded fully, including\n\t\t\tdata delayed by JavaScript" - }, - { - "code": "def get_urls(self):\n parts = []\n seen = set()\n for v in list(self._meta.item_views)+list(self._meta.action_views):\n if not v in seen:\n view, name = self.get_view_and_name(v)\n if view and name:\n parts.append(self.get_url(name, view, v))\n seen.add(v)\n for v in set(self._views).difference(seen):\n view, name = self.get_view_and_name(v)\n if view and name:\n parts.append(self.get_url(name, view, v))\n return parts", - "docstring": "Returns urls handling bundles and views.\n This processes the 'item view' first in order\n and then adds any non item views at the end." - }, - { - "code": "def argument_action(self, text, loc, arg):\r\n exshared.setpos(loc, text)\r\n if DEBUG > 0:\r\n print(\"ARGUMENT:\",arg.exp)\r\n if DEBUG == 2: self.symtab.display()\r\n if DEBUG > 2: return\r\n arg_ordinal = len(self.function_arguments)\r\n if not self.symtab.same_type_as_argument(arg.exp, self.function_call_index, arg_ordinal):\r\n raise SemanticException(\"Incompatible type for argument %d in '%s'\" % (arg_ordinal + 1, self.symtab.get_name(self.function_call_index)))\r\n self.function_arguments.append(arg.exp)", - "docstring": "Code executed after recognising each of function's arguments" - }, - { - "code": "def svd_thresh(data, threshold=None, n_pc=None, thresh_type='hard'):\n r\n if ((not isinstance(n_pc, (int, str, type(None)))) or\n (isinstance(n_pc, int) and n_pc <= 0) or\n (isinstance(n_pc, str) and n_pc != 'all')):\n raise ValueError('Invalid value for \"n_pc\", specify a positive '\n 'integer value or \"all\"')\n u, s, v = calculate_svd(data)\n if isinstance(threshold, type(None)):\n if isinstance(n_pc, type(None)):\n n_pc = find_n_pc(u, factor=0.1)\n if ((isinstance(n_pc, int) and n_pc >= s.size) or\n (isinstance(n_pc, str) and n_pc == 'all')):\n n_pc = s.size\n warn('Using all singular values.')\n threshold = s[n_pc - 1]\n s_new = thresh(s, threshold, thresh_type)\n if np.all(s_new == s):\n warn('No change to singular values.')\n s_new = np.diag(s_new)\n return np.dot(u, np.dot(s_new, v))", - "docstring": "r\"\"\"Threshold the singular values\n\n This method thresholds the input data using singular value decomposition\n\n Parameters\n ----------\n data : np.ndarray\n Input data array, 2D matrix\n threshold : float or np.ndarray, optional\n Threshold value(s)\n n_pc : int or str, optional\n Number of principal components, specify an integer value or 'all'\n threshold_type : str {'hard', 'soft'}, optional\n Type of thresholding (default is 'hard')\n\n Returns\n -------\n np.ndarray thresholded data\n\n Raises\n ------\n ValueError\n For invalid n_pc value\n\n Examples\n --------\n >>> from modopt.signal.svd import svd_thresh\n >>> x = np.arange(18).reshape(9, 2).astype(float)\n >>> svd_thresh(x, n_pc=1)\n array([[ 0.49815487, 0.54291537],\n [ 2.40863386, 2.62505584],\n [ 4.31911286, 4.70719631],\n [ 6.22959185, 6.78933678],\n [ 8.14007085, 8.87147725],\n [ 10.05054985, 10.95361772],\n [ 11.96102884, 13.03575819],\n [ 13.87150784, 15.11789866],\n [ 15.78198684, 17.20003913]])" - }, - { - "code": "def is_metaseries(self):\n if self.index > 1 or self.software != 'MetaSeries':\n return False\n d = self.description\n return d.startswith('') and d.endswith('')", - "docstring": "Page contains MDS MetaSeries metadata in ImageDescription tag." - }, - { - "code": "def start_reporter(self):\n stdout_file, stderr_file = self.new_log_files(\"reporter\", True)\n process_info = ray.services.start_reporter(\n self.redis_address,\n stdout_file=stdout_file,\n stderr_file=stderr_file,\n redis_password=self._ray_params.redis_password)\n assert ray_constants.PROCESS_TYPE_REPORTER not in self.all_processes\n if process_info is not None:\n self.all_processes[ray_constants.PROCESS_TYPE_REPORTER] = [\n process_info\n ]", - "docstring": "Start the reporter." - }, - { - "code": "def _RecurseKey(self, recur_item, root='', depth=15):\n if depth < 1:\n logger.debug('Recursion limit hit for key: {0:s}'.format(root))\n return\n if isinstance(recur_item, (list, tuple)):\n for recur in recur_item:\n for key in self._RecurseKey(recur, root, depth):\n yield key\n return\n if not hasattr(recur_item, 'iteritems'):\n return\n for key, value in iter(recur_item.items()):\n yield root, key, value\n if isinstance(value, dict):\n value = [value]\n if isinstance(value, list):\n for item in value:\n if isinstance(item, dict):\n for keyval in self._RecurseKey(\n item, root=root + '/' + key, depth=depth - 1):\n yield keyval", - "docstring": "Flattens nested dictionaries and lists by yielding their values.\n\n The hierarchy of a bencode file is a series of nested dictionaries and\n lists. This is a helper function helps plugins navigate the structure\n without having to reimplement their own recursive methods.\n\n This method implements an overridable depth limit to prevent processing\n extremely deeply nested dictionaries. If the limit is reached a debug\n message is logged indicating which key processing stopped on.\n\n Args:\n recur_item (object): object to be checked for additional nested items.\n root (str): the pathname of the current working key.\n depth (int): a counter to ensure we stop at the maximum recursion depth.\n\n Yields:\n tuple: containing:\n str: root\n str: key\n str: value" - }, - { - "code": "def check(self, gen):\n retval = Counter()\n name, _, block = next(gen, ('', 0, ''))\n if name in self.SAY_THINK:\n if self.is_blank(block.args[0]):\n retval[self.CORRECT] += 1\n else:\n name, _, block = next(gen, ('', 0, ''))\n if name == 'play sound %s until done':\n retval[self.CORRECT] += 1\n retval += self.check(gen)\n else:\n retval[self.INCORRECT] += 1\n else:\n retval[self.INCORRECT] += 1\n return retval", - "docstring": "Check that the last part of the chain matches.\n\n TODO: Fix to handle the following situation that appears to not work\n\n say 'message 1'\n play sound until done\n say 'message 2'\n say 'message 3'\n play sound until done\n say ''" - }, - { - "code": "def C_dict2array(C):\n return np.hstack([np.asarray(C[k]).ravel() for k in C_keys])", - "docstring": "Convert an OrderedDict containing C values to a 1D array." - }, - { - "code": "def scaledBy(self, scale):\n scaled = deepcopy(self)\n if type(scaled.value) in (int, float):\n scaled.value *= scale\n elif isinstance(scaled.value, numbers):\n scaled.value.values = tuple(v * scale for v in scaled.value.values)\n return scaled", - "docstring": "Return a new Value scaled by a given number for ints and floats." - }, - { - "code": "def _options_operation(self, api_interface, path, methods):\n if path.startswith(api_interface.path_prefix):\n path = path[len(api_interface.path_prefix):]\n methods = set(methods)\n methods.add(api.Method.OPTIONS)\n operation_decorator = api_interface.operation(\n path, api.Method.OPTIONS, middleware=[_MethodsMiddleware(methods)])\n operation = operation_decorator(self.cors_options)\n operation.operation_id = path.format(separator='.') + '.cors_options'", - "docstring": "Generate an options operation for the specified path" - }, - { - "code": "def fetch():\n mir, r, slackpkg_last_date = mirror(), \"\", \"\"\n count, upgraded = 0, []\n if mir:\n tar = urlopen(mir)\n try:\n r = tar.read()\n except AttributeError:\n print(\"sun: error: can't read mirror\")\n if os.path.isfile(var_lib_slackpkg + changelog_txt):\n slackpkg_last_date = read_file(\"{0}{1}\".format(\n var_lib_slackpkg, changelog_txt)).split(\"\\n\", 1)[0].strip()\n else:\n return [count, upgraded]\n for line in r.splitlines():\n if slackpkg_last_date == line.strip():\n break\n if (line.endswith(\"z: Upgraded.\") or line.endswith(\"z: Rebuilt.\") or\n line.endswith(\"z: Added.\") or line.endswith(\"z: Removed.\")):\n upgraded.append(line.split(\"/\")[-1])\n count += 1\n if (line.endswith(\"*: Upgraded.\") or line.endswith(\"*: Rebuilt.\") or\n line.endswith(\"*: Added.\") or line.endswith(\"*: Removed.\")):\n upgraded.append(line)\n count += 1\n return [count, upgraded]", - "docstring": "Get ChangeLog.txt file size and counts upgraded packages" - }, - { - "code": "def _set_shape_on_tensor(tensor, shape):\n if shape is not None:\n try:\n tensor.set_shape(shape)\n except ValueError:\n raise ValueError(\"Requested shape does not match tensor's shape: %s vs %s\"\n % (shape, tensor.get_shape()))\n elif tensor.get_shape().ndims is None:\n raise ValueError('Unknown shape on tensor: %s' % tensor)", - "docstring": "Convenience to set a shape or check it." - }, - { - "code": "def tryImportModule(self, name):\n self._name = name\n try:\n import importlib\n self._module = importlib.import_module(name)\n except ImportError:\n self._module = None\n self._version = ''\n self._packagePath = ''\n else:\n if self._versionAttribute:\n self._version = getattr(self._module, self._versionAttribute, '???')\n if self._pathAttribute:\n self._packagePath = getattr(self._module, self._pathAttribute, '???')", - "docstring": "Imports the module and sets version information\n If the module cannot be imported, the version is set to empty values." - }, - { - "code": "def build_D3treeStandard(old, MAX_DEPTH, level=1, toplayer=None):\n out = []\n if not old:\n old = toplayer\n for x in old:\n d = {}\n d['qname'] = x.qname\n d['name'] = x.bestLabel(quotes=False).replace(\"_\", \" \")\n d['objid'] = x.id\n if x.children() and level < MAX_DEPTH:\n d['size'] = len(x.children()) + 5\n d['realsize'] = len(x.children())\n d['children'] = build_D3treeStandard(x.children(), MAX_DEPTH,\n level + 1)\n else:\n d['size'] = 1\n d['realsize'] = 0\n out += [d]\n return out", - "docstring": "For d3s examples all we need is a json with name, children and size .. eg\n\n\t {\n\t \"name\": \"flare\",\n\t \"children\": [\n\t {\n\t \"name\": \"analytics\",\n\t \"children\": [\n\t\t{\n\t\t \"name\": \"cluster\",\n\t\t \"children\": [\n\t\t {\"name\": \"AgglomerativeCluster\", \"size\": 3938},\n\t\t {\"name\": \"CommunityStructure\", \"size\": 3812},\n\t\t {\"name\": \"HierarchicalCluster\", \"size\": 6714},\n\t\t {\"name\": \"MergeEdge\", \"size\": 743}\n\t\t ]\n\t\t},\n\t\tetc..." - }, - { - "code": "def run_continuously(self, interval=1):\n cease_continuous_run = threading.Event()\n class ScheduleThread(threading.Thread):\n @classmethod\n def run(cls):\n while not cease_continuous_run.is_set():\n self.run_pending()\n time.sleep(interval)\n continuous_thread = ScheduleThread()\n continuous_thread.start()\n return cease_continuous_run", - "docstring": "Continuously run, while executing pending jobs at each elapsed\n time interval.\n\n @return cease_continuous_run: threading.Event which can be set to\n cease continuous run.\n\n Please note that it is *intended behavior that run_continuously()\n does not run missed jobs*. For example, if you've registered a job\n that should run every minute and you set a continuous run interval\n of one hour then your job won't be run 60 times at each interval but\n only once." - }, - { - "code": "def _should_include_member(self, name, member):\n if _always_drop_symbol_re.match(name):\n return False\n if name in self._exclude_symbols:\n return False\n return True", - "docstring": "Returns True if this member should be included in the document." - }, - { - "code": "def get_all_tags_of_confirmation(self, confirmation_id):\n return self._iterate_through_pages(\n get_function=self.get_tags_of_confirmation_per_page,\n resource=CONFIRMATION_TAGS,\n **{'confirmation_id': confirmation_id}\n )", - "docstring": "Get all tags of confirmation\n This will iterate over all pages until it gets all elements.\n So if the rate limit exceeded it will throw an Exception and you will get nothing\n\n :param confirmation_id: the confirmation id\n :return: list" - }, - { - "code": "def get(self, key):\n group = self.get_node(key)\n if group is None:\n raise KeyError('No object named {key} in the file'.format(key=key))\n return self._read_group(group)", - "docstring": "Retrieve pandas object stored in file\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n obj : same type as object stored in file" - }, - { - "code": "def check_status_logfile(self, checker_func):\n self.status = checker_func(self.logfile)\n return self.status", - "docstring": "Check on the status of this particular job using the logfile" - }, - { - "code": "def toList(variable, types=(basestring, int, float, )):\n if isinstance(variable, types):\n return [variable]\n else:\n return variable", - "docstring": "Converts a variable of type string, int, float to a list, containing the\n variable as the only element.\n\n :param variable: any python object\n :type variable: (str, int, float, others)\n\n :returns: [variable] or variable" - }, - { - "code": "def add(self, payload=None):\n try:\n db = self._client[self.database]\n col = db[WORKFLOW_DATA_COLLECTION_NAME]\n return str(col.insert_one({\n DataStoreDocumentSection.Meta:\n payload if isinstance(payload, dict) else {},\n DataStoreDocumentSection.Data: {}\n }).inserted_id)\n except ConnectionFailure:\n raise DataStoreNotConnected()", - "docstring": "Adds a new document to the data store and returns its id.\n\n Args:\n payload (dict): Dictionary of initial data that should be stored\n in the new document in the meta section.\n\n Raises:\n DataStoreNotConnected: If the data store is not connected to the server.\n\n Returns:\n str: The id of the newly created document." - }, - { - "code": "def get_py_source(file):\n try:\n response = None\n pysource = \"\"\n if regexp_py.search(file) is None:\n response = {\"error\": \"Only Python source files are allowed. (*.py)\"}\n else:\n with open(file, 'r') as pyfile:\n pysource = pyfile.read()\n response = {\"data\": pysource}\n except Exception as e:\n response = {\"error\": str(e)}\n finally:\n return response", - "docstring": "Retrieves and returns the source code for any Python\n files requested by the UI via the host agent\n\n @param file [String] The fully qualified path to a file" - }, - { - "code": "def array_metadata_to_n5(array_metadata):\n for f, t in zarr_to_n5_keys:\n array_metadata[t] = array_metadata[f]\n del array_metadata[f]\n del array_metadata['zarr_format']\n try:\n dtype = np.dtype(array_metadata['dataType'])\n except TypeError:\n raise TypeError(\n \"data type %s not supported by N5\" % array_metadata['dataType'])\n array_metadata['dataType'] = dtype.name\n array_metadata['dimensions'] = array_metadata['dimensions'][::-1]\n array_metadata['blockSize'] = array_metadata['blockSize'][::-1]\n if 'fill_value' in array_metadata:\n if array_metadata['fill_value'] != 0 and array_metadata['fill_value'] is not None:\n raise ValueError(\"N5 only supports fill_value == 0 (for now)\")\n del array_metadata['fill_value']\n if 'order' in array_metadata:\n if array_metadata['order'] != 'C':\n raise ValueError(\"zarr N5 storage only stores arrays in C order (for now)\")\n del array_metadata['order']\n if 'filters' in array_metadata:\n if array_metadata['filters'] != [] and array_metadata['filters'] is not None:\n raise ValueError(\"N5 storage does not support zarr filters\")\n del array_metadata['filters']\n assert 'compression' in array_metadata\n compressor_config = array_metadata['compression']\n compressor_config = compressor_config_to_n5(compressor_config)\n array_metadata['compression'] = compressor_config\n return array_metadata", - "docstring": "Convert array metadata from zarr to N5 format." - }, - { - "code": "def register(util_type, registry=_utils):\n def marker(f):\n mark = util_type + '_'\n if not f.__name__.startswith(mark):\n raise TypeError(\n 'not registering %s to %s' % (f.__name__, util_type))\n registry[util_type][f.__name__[len(mark):]] = f\n return f\n return marker", - "docstring": "Crude, local registration decorator for a crude local registry of\n all utilities local to this module." - }, - { - "code": "def get_encoding_name(self, encoding):\n encoding = CodePages.get_encoding_name(encoding)\n if encoding not in self.codepages:\n raise ValueError((\n 'Encoding \"{}\" cannot be used for the current profile. '\n 'Valid encodings are: {}'\n ).format(encoding, ','.join(self.codepages.keys())))\n return encoding", - "docstring": "Given an encoding provided by the user, will return a\n canonical encoding name; and also validate that the encoding\n is supported.\n\n TODO: Support encoding aliases: pc437 instead of cp437." - }, - { - "code": "def filter_missing_X_and_y(X, y):\n y_nans = np.isnan(y)\n x_nans = np.isnan(X).any(axis=1)\n unioned_nans = np.logical_or(x_nans, y_nans)\n return X[~unioned_nans], y[~unioned_nans]", - "docstring": "Remove rows from X and y where either contains nans." - }, - { - "code": "def set_empty_for_all(self, row_column_list):\n for row, column in row_column_list:\n self.set_empty(row, column)", - "docstring": "Keep all specified subplots completely empty.\n\n :param row_column_list: a list containing (row, column) tuples to\n specify the subplots, or None to indicate *all* subplots.\n :type row_column_list: list or None" - }, - { - "code": "def get_distance_between_two_points(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", - "docstring": "Returns the distance between two XYPoints." - }, - { - "code": "def fit_quadratic(X, y):\n model = make_pipeline(\n PolynomialFeatures(2), linear_model.LinearRegression()\n )\n model.fit(X, y)\n return model", - "docstring": "Uses OLS with Polynomial order 2." - }, - { - "code": "def dePeriod(arr):\n diff= arr-nu.roll(arr,1,axis=1)\n w= diff < -6.\n addto= nu.cumsum(w.astype(int),axis=1)\n return arr+_TWOPI*addto", - "docstring": "make an array of periodic angles increase linearly" - }, - { - "code": "def get_content(path):\n with codecs.open(abs_path(path), encoding='utf-8') as f:\n return f.read()", - "docstring": "Get content of file." - }, - { - "code": "def short_repr(item, max_length=15):\n item = repr(item)\n if len(item) > max_length:\n item = '{}...{}'.format(item[:max_length - 3], item[-1])\n return item", - "docstring": "Short representation of item if it is too long" - }, - { - "code": "def product_name(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()", - "docstring": "Returns the product name of the connected J-Link.\n\n Args:\n self (JLink): the ``JLink`` instance\n\n Returns:\n Product name." - }, - { - "code": "def get_asset(self, name, **kwargs):\n if len(name.split(\".\")) == 3:\n return self.get_objects([name], **kwargs)[0]\n else:\n return self.lookup_asset_symbols([name], **kwargs)[0]", - "docstring": "Get full asset from name of id\n\n :param str name: Symbol name or asset id (e.g. 1.3.0)" - }, - { - "code": "def homeautoswitch(self, cmd, ain=None, param=None):\n assert self.sid, \"Not logged in\"\n params = {\n 'switchcmd': cmd,\n 'sid': self.sid,\n }\n if param is not None:\n params['param'] = param\n if ain:\n params['ain'] = ain\n url = self.base_url + '/webservices/homeautoswitch.lua'\n response = self.session.get(url, params=params, timeout=10)\n response.raise_for_status()\n return response.text.strip().encode('utf-8')", - "docstring": "Call a switch method.\n Should only be used by internal library functions." - }, - { - "code": "def config_sanity_check(self):\n if 'name' not in self.config:\n raise EventifyConfigError(\n)\n if 'publish_topic' not in self.config:\n raise EventifyConfigError(\n)\n if 'topic' not in self.config['publish_topic']:\n raise EventifyConfigError(\n)", - "docstring": "Base configuration sanity checks" - }, - { - "code": "def build_napp_package(napp_name):\n ignored_extensions = ['.swp', '.pyc', '.napp']\n ignored_dirs = ['__pycache__', '.git', '.tox']\n files = os.listdir()\n for filename in files:\n if os.path.isfile(filename) and '.' in filename and \\\n filename.rsplit('.', 1)[1] in ignored_extensions:\n files.remove(filename)\n elif os.path.isdir(filename) and filename in ignored_dirs:\n files.remove(filename)\n napp_file = tarfile.open(napp_name + '.napp', 'x:xz')\n for local_f in files:\n napp_file.add(local_f)\n napp_file.close()\n file_payload = open(napp_name + '.napp', 'rb')\n os.remove(napp_name + '.napp')\n return file_payload", - "docstring": "Build the .napp file to be sent to the napps server.\n\n Args:\n napp_identifier (str): Identifier formatted as\n /\n\n Return:\n file_payload (binary): The binary representation of the napp\n package that will be POSTed to the napp server." - }, - { - "code": "def custom(command, user=None, conf_file=None, bin_env=None):\n ret = __salt__['cmd.run_all'](\n _ctl_cmd(command, None, conf_file, bin_env),\n runas=user,\n python_shell=False,\n )\n return _get_return(ret)", - "docstring": "Run any custom supervisord command\n\n user\n user to run supervisorctl as\n conf_file\n path to supervisord config file\n bin_env\n path to supervisorctl bin or path to virtualenv with supervisor\n installed\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' supervisord.custom \"mstop '*gunicorn*'\"" - }, - { - "code": "def find_list_of_ids(self, resource, ids, client_projection=None):\n args = self._es_args(resource)\n return self._parse_hits(self.elastic(resource).mget(body={'ids': ids}, **args), resource)", - "docstring": "Find documents by ids." - }, - { - "code": "def delete_service(resource_root, name, cluster_name=\"default\"):\n return call(resource_root.delete,\n \"%s/%s\" % (SERVICES_PATH % (cluster_name,), name),\n ApiService)", - "docstring": "Delete a service by name\n @param resource_root: The root Resource object.\n @param name: Service name\n @param cluster_name: Cluster name\n @return: The deleted ApiService object" - }, - { - "code": "def matches(self, spec):\n if callable(spec) and not isinstance(spec, type): return spec(self)\n elif isinstance(spec, type): return isinstance(self, spec)\n specification = (self.__class__.__name__, self.group, self.label)\n split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec\n split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False)\n for s in split_spec))\n if all(nocompare): return True\n match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc))\n self_spec = match_fn(split_spec)\n unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec\n if unescaped_match: return True\n sanitizers = [util.sanitize_identifier, util.group_sanitizer, util.label_sanitizer]\n identifier_specification = tuple(fn(ident, escape=False)\n for ident, fn in zip(specification, sanitizers))\n identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec\n return identifier_match", - "docstring": "Whether the spec applies to this object.\n\n Args:\n spec: A function, spec or type to check for a match\n * A 'type[[.group].label]' string which is compared\n against the type, group and label of this object\n * A function which is given the object and returns\n a boolean.\n * An object type matched using isinstance.\n\n Returns:\n bool: Whether the spec matched this object." - }, - { - "code": "async def article(\n self, title, description=None,\n *, url=None, thumb=None, content=None,\n id=None, text=None, parse_mode=(), link_preview=True,\n geo=None, period=60, contact=None, game=False, buttons=None\n ):\n result = types.InputBotInlineResult(\n id=id or '',\n type='article',\n send_message=await self._message(\n text=text, parse_mode=parse_mode, link_preview=link_preview,\n geo=geo, period=period,\n contact=contact,\n game=game,\n buttons=buttons\n ),\n title=title,\n description=description,\n url=url,\n thumb=thumb,\n content=content\n )\n if id is None:\n result.id = hashlib.sha256(bytes(result)).hexdigest()\n return result", - "docstring": "Creates new inline result of article type.\n\n Args:\n title (`str`):\n The title to be shown for this result.\n\n description (`str`, optional):\n Further explanation of what this result means.\n\n url (`str`, optional):\n The URL to be shown for this result.\n\n thumb (:tl:`InputWebDocument`, optional):\n The thumbnail to be shown for this result.\n For now it has to be a :tl:`InputWebDocument` if present.\n\n content (:tl:`InputWebDocument`, optional):\n The content to be shown for this result.\n For now it has to be a :tl:`InputWebDocument` if present." - }, - { - "code": "def write(self, fb):\n print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)\n print('class = {}'.format(fb.func_ins.name), file=self.file)\n print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)\n print('func_args = {}'.format(repr(fb.func_args)), file=self.file)\n print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)\n print('ext = ({}, {})'.format(\n repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)\n if self.setup_stat is not None:\n self._write_a_stat('setup', self.setup_stat)\n if self.foward_stat is not None:\n self._write_a_stat('forward', self.forward_stat)\n if self.backward_stat is not None:\n self._write_a_stat('backward', self.backward_stat)", - "docstring": "Write a single function benchmark.\n\n Args:\n fb (FunctionBenchmark): FunctionBenchmark class instance.\n Before passing to this, you should call ``fb.benchmark()``." - }, - { - "code": "def task_factory(loop, coro):\n task = asyncio.Task(coro, loop=loop)\n if task._source_traceback:\n del task._source_traceback[-1]\n current_task = asyncio.Task.current_task(loop=loop)\n if current_task is not None and hasattr(current_task, 'context'):\n setattr(task, 'context', current_task.context)\n return task", - "docstring": "Task factory function\n\n Fuction closely mirrors the logic inside of\n asyncio.BaseEventLoop.create_task. Then if there is a current\n task and the current task has a context then share that context\n with the new task" - }, - { - "code": "def rgb(self):\n if not self.__rgb:\n rgb = bytearray(self.height * self.width * 3)\n raw = self.raw\n rgb[0::3] = raw[2::4]\n rgb[1::3] = raw[1::4]\n rgb[2::3] = raw[0::4]\n self.__rgb = bytes(rgb)\n return self.__rgb", - "docstring": "Compute RGB values from the BGRA raw pixels.\n\n :return bytes: RGB pixels." - }, - { - "code": "def _copy(self, other, copy_func):\n super(Choice, self)._copy(other, copy_func)\n self._choice = other._choice\n self._name = other._name\n self._parsed = copy_func(other._parsed)", - "docstring": "Copies the contents of another Choice object to itself\n\n :param object:\n Another instance of the same class\n\n :param copy_func:\n An reference of copy.copy() or copy.deepcopy() to use when copying\n lists, dicts and objects" - }, - { - "code": "def reload(self):\n try:\n if hasattr(self, 'href'):\n data = self._api.get(self.href, append_base=False).json()\n resource = self.__class__(api=self._api, **data)\n elif hasattr(self, 'id') and hasattr(self, '_URL') and \\\n 'get' in self._URL:\n data = self._api.get(\n self._URL['get'].format(id=self.id)).json()\n resource = self.__class__(api=self._api, **data)\n else:\n raise SbgError('Resource can not be refreshed!')\n query = {'id': self.id} if hasattr(self, 'id') else {}\n extra = {'resource': self.__class__.__name__, 'query': query}\n logger.info('Reloading {} resource.'.format(self), extra=extra)\n except Exception:\n raise SbgError('Resource can not be refreshed!')\n self._data = resource._data\n self._dirty = resource._dirty\n self._old = copy.deepcopy(self._data.data)\n return self", - "docstring": "Refreshes the resource with the data from the server." - }, - { - "code": "def urls(self):\n url_bases = self._url_module.url_bases\n unformatted_paths = self._url_module.url_paths\n urls = {}\n for url_base in url_bases:\n for url_path, handler in unformatted_paths.items():\n url = url_path.format(url_base)\n urls[url] = handler\n return urls", - "docstring": "A dictionary of the urls to be mocked with this service and the handlers\n that should be called in their place" - }, - { - "code": "def _get_snpeff_cmd(cmd_name, datadir, data, out_file):\n resources = config_utils.get_resources(\"snpeff\", data[\"config\"])\n jvm_opts = resources.get(\"jvm_opts\", [\"-Xms750m\", \"-Xmx3g\"])\n jvm_opts = config_utils.adjust_opts(jvm_opts, {\"algorithm\": {\"memory_adjust\":\n {\"direction\": \"increase\",\n \"maximum\": \"30000M\",\n \"magnitude\": max(2, dd.get_cores(data))}}})\n memory = \" \".join(jvm_opts)\n snpeff = config_utils.get_program(\"snpEff\", data[\"config\"])\n java_args = \"-Djava.io.tmpdir=%s\" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), \"tmp\"))\n export = \"unset JAVA_HOME && export PATH=%s:\\\"$PATH\\\" && \" % (utils.get_java_binpath())\n cmd = \"{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}\"\n return cmd.format(**locals())", - "docstring": "Retrieve snpEff base command line." - }, - { - "code": "def serve_file(self, load):\n ret = {'data': '',\n 'dest': ''}\n if 'env' in load:\n load.pop('env')\n if 'path' not in load or 'loc' not in load or 'saltenv' not in load:\n return ret\n if not isinstance(load['saltenv'], six.string_types):\n load['saltenv'] = six.text_type(load['saltenv'])\n fnd = self.find_file(load['path'], load['saltenv'])\n if not fnd.get('back'):\n return ret\n fstr = '{0}.serve_file'.format(fnd['back'])\n if fstr in self.servers:\n return self.servers[fstr](load, fnd)\n return ret", - "docstring": "Serve up a chunk of a file" - }, - { - "code": "def vector_normalize(mat, max_vec_norm=1.):\n assert mat.flags.c_contiguous\n n, m = mat.shape\n vector_normalize_kernel.prepared_call(\n (m, 1, 1), (32, 1, 1),\n mat.gpudata,\n np.float32(max_vec_norm),\n np.int32(m),\n np.int32(n))", - "docstring": "Normalize each column vector in mat to length\n max_vec_norm if it is longer than max_vec_norm" - }, - { - "code": "def create(cls, card_id, name_on_card=None, pin_code=None, second_line=None,\n custom_headers=None):\n if custom_headers is None:\n custom_headers = {}\n request_map = {\n cls.FIELD_NAME_ON_CARD: name_on_card,\n cls.FIELD_PIN_CODE: pin_code,\n cls.FIELD_SECOND_LINE: second_line\n }\n request_map_string = converter.class_to_json(request_map)\n request_map_string = cls._remove_field_for_request(request_map_string)\n api_client = client.ApiClient(cls._get_api_context())\n request_bytes = request_map_string.encode()\n request_bytes = security.encrypt(cls._get_api_context(), request_bytes,\n custom_headers)\n endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(),\n card_id)\n response_raw = api_client.post(endpoint_url, request_bytes,\n custom_headers)\n return BunqResponseInt.cast_from_bunq_response(\n cls._process_for_id(response_raw)\n )", - "docstring": "Request a card replacement.\n\n :type user_id: int\n :type card_id: int\n :param name_on_card: The user's name as it will be on the card. Check\n 'card-name' for the available card names for a user.\n :type name_on_card: str\n :param pin_code: The plaintext pin code. Requests require encryption to\n be enabled.\n :type pin_code: str\n :param second_line: The second line on the card.\n :type second_line: str\n :type custom_headers: dict[str, str]|None\n\n :rtype: BunqResponseInt" - }, - { - "code": "def _device_to_sysfs_path(device):\n return '%s-%s' % (\n device.getBusNumber(),\n '.'.join([str(item) for item in device.GetPortNumberList()]))", - "docstring": "Convert device to corresponding sysfs path." - }, - { - "code": "def get_banks_by_item(self, *args, **kwargs):\n catalogs = self._get_provider_session('item_bank_session').get_banks_by_item(*args, **kwargs)\n cat_list = []\n for cat in catalogs:\n cat_list.append(Bank(self._provider_manager, cat, self._runtime, self._proxy))\n return BankList(cat_list)", - "docstring": "Pass through to provider ItemBankSession.get_banks_by_item" - }, - { - "code": "def rename(self, new_name):\n yield from self._hypervisor.send(\"nio rename {name} {new_name}\".format(name=self._name, new_name=new_name))\n log.info(\"NIO {name} renamed to {new_name}\".format(name=self._name, new_name=new_name))\n self._name = new_name", - "docstring": "Renames this NIO\n\n :param new_name: new NIO name" - }, - { - "code": "def expandEntitiesFromEmail(e):\n email = {}\n email[\"type\"] = \"i3visio.email\"\n email[\"value\"] = e\n email[\"attributes\"] = []\n alias = {}\n alias[\"type\"] = \"i3visio.alias\"\n alias[\"value\"] = e.split(\"@\")[0]\n alias[\"attributes\"] = []\n domain= {}\n domain[\"type\"] = \"i3visio.domain\"\n domain[\"value\"] = e.split(\"@\")[1]\n domain[\"attributes\"] = []\n return [email, alias, domain]", - "docstring": "Method that receives an email an creates linked entities\n\n Args:\n -----\n e: Email to verify.\n\n Returns:\n --------\n Three different values: email, alias and domain in a list." - }, - { - "code": "def decode_body(cls, header, f):\n assert header.packet_type == MqttControlPacketType.publish\n dupe = bool(header.flags & 0x08)\n retain = bool(header.flags & 0x01)\n qos = ((header.flags & 0x06) >> 1)\n if qos == 0 and dupe:\n raise DecodeError(\"Unexpected dupe=True for qos==0 message [MQTT-3.3.1-2].\")\n decoder = mqtt_io.FileDecoder(mqtt_io.LimitReader(f, header.remaining_len))\n num_bytes_consumed, topic_name = decoder.unpack_utf8()\n if qos != 0:\n packet_id, = decoder.unpack(mqtt_io.FIELD_PACKET_ID)\n else:\n packet_id = 0\n payload_len = header.remaining_len - decoder.num_bytes_consumed\n payload = decoder.read(payload_len)\n return decoder.num_bytes_consumed, MqttPublish(packet_id, topic_name, payload, dupe, qos, retain)", - "docstring": "Generates a `MqttPublish` packet given a\n `MqttFixedHeader`. This method asserts that header.packet_type\n is `publish`.\n\n Parameters\n ----------\n header: MqttFixedHeader\n f: file\n Object with a read method.\n\n Raises\n ------\n DecodeError\n When there are extra bytes at the end of the packet.\n\n Returns\n -------\n int\n Number of bytes consumed from ``f``.\n MqttPublish\n Object extracted from ``f``." - }, - { - "code": "def _filter_response(self, response_dict):\n filtered_dict = {}\n for key, value in response_dict.items():\n if key == \"_jsns\":\n continue\n if key == \"xmlns\":\n continue\n if type(value) == list and len(value) == 1:\n filtered_dict[key] = value[0]\n elif type(value) == dict and len(value.keys()) == 1 and \"_content\" \\\n in value.keys():\n filtered_dict[key] = value[\"_content\"]\n elif type(value) == dict:\n tmp_dict = self._filter_response(value)\n filtered_dict[key] = tmp_dict\n else:\n filtered_dict[key] = value\n return filtered_dict", - "docstring": "Add additional filters to the response dictionary\n\n Currently the response dictionary is filtered like this:\n\n * If a list only has one item, the list is replaced by that item\n * Namespace-Keys (_jsns and xmlns) are removed\n\n :param response_dict: the pregenerated, but unfiltered response dict\n :type response_dict: dict\n :return: The filtered dictionary\n :rtype: dict" - }, - { - "code": "def get_linear_address(self, segment, address):\n hThread = self.get_handle(win32.THREAD_QUERY_INFORMATION)\n selector = self.get_register(segment)\n ldt = win32.GetThreadSelectorEntry(hThread, selector)\n BaseLow = ldt.BaseLow\n BaseMid = ldt.HighWord.Bytes.BaseMid << 16\n BaseHi = ldt.HighWord.Bytes.BaseHi << 24\n Base = BaseLow | BaseMid | BaseHi\n LimitLow = ldt.LimitLow\n LimitHi = ldt.HighWord.Bits.LimitHi << 16\n Limit = LimitLow | LimitHi\n if address > Limit:\n msg = \"Address %s too large for segment %s (selector %d)\"\n msg = msg % (HexDump.address(address, self.get_bits()),\n segment, selector)\n raise ValueError(msg)\n return Base + address", - "docstring": "Translates segment-relative addresses to linear addresses.\n\n Linear addresses can be used to access a process memory,\n calling L{Process.read} and L{Process.write}.\n\n @type segment: str\n @param segment: Segment register name.\n\n @type address: int\n @param address: Segment relative memory address.\n\n @rtype: int\n @return: Linear memory address.\n\n @raise ValueError: Address is too large for selector.\n\n @raise WindowsError:\n The current architecture does not support selectors.\n Selectors only exist in x86-based systems." - }, - { - "code": "def config_prefix(prefix):\n global register_option, get_option, set_option, reset_option\n def wrap(func):\n def inner(key, *args, **kwds):\n pkey = '{prefix}.{key}'.format(prefix=prefix, key=key)\n return func(pkey, *args, **kwds)\n return inner\n _register_option = register_option\n _get_option = get_option\n _set_option = set_option\n set_option = wrap(set_option)\n get_option = wrap(get_option)\n register_option = wrap(register_option)\n yield None\n set_option = _set_option\n get_option = _get_option\n register_option = _register_option", - "docstring": "contextmanager for multiple invocations of API with a common prefix\n\n supported API functions: (register / get / set )__option\n\n Warning: This is not thread - safe, and won't work properly if you import\n the API functions into your module using the \"from x import y\" construct.\n\n Example:\n\n import pandas._config.config as cf\n with cf.config_prefix(\"display.font\"):\n cf.register_option(\"color\", \"red\")\n cf.register_option(\"size\", \" 5 pt\")\n cf.set_option(size, \" 6 pt\")\n cf.get_option(size)\n ...\n\n etc'\n\n will register options \"display.font.color\", \"display.font.size\", set the\n value of \"display.font.size\"... and so on." - }, - { - "code": "def _func(self, volume, params):\n e0, b0, b1, v0 = tuple(params)\n eta = (v0 / volume) ** (1. / 3.)\n return (e0 +\n 9. * b0 * v0 / 16. * (eta ** 2 - 1)**2 *\n (6 + b1 * (eta ** 2 - 1.) - 4. * eta ** 2))", - "docstring": "BirchMurnaghan equation from PRB 70, 224107" - }, - { - "code": "def undefinedImageType(self):\n if self._undefinedImageType is None:\n ctx = SparkContext._active_spark_context\n self._undefinedImageType = \\\n ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()\n return self._undefinedImageType", - "docstring": "Returns the name of undefined image type for the invalid image.\n\n .. versionadded:: 2.3.0" - }, - { - "code": "def service(self):\n if self._service is not None:\n return self._service\n if self._input_definition is None:\n return None\n splunkd_uri = self._input_definition.metadata[\"server_uri\"]\n session_key = self._input_definition.metadata[\"session_key\"]\n splunkd = urlsplit(splunkd_uri, allow_fragments=False)\n self._service = Service(\n scheme=splunkd.scheme,\n host=splunkd.hostname,\n port=splunkd.port,\n token=session_key,\n )\n return self._service", - "docstring": "Returns a Splunk service object for this script invocation.\n\n The service object is created from the Splunkd URI and session key\n passed to the command invocation on the modular input stream. It is\n available as soon as the :code:`Script.stream_events` method is\n called.\n\n :return: :class:splunklib.client.Service. A value of None is returned,\n if you call this method before the :code:`Script.stream_events` method\n is called." - }, - { - "code": "def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,\n label=None, convention='start', kind=None, loffset=None,\n limit=None, base=0, on=None, level=None):\n from pandas.core.resample import (resample,\n _maybe_process_deprecations)\n axis = self._get_axis_number(axis)\n r = resample(self, freq=rule, label=label, closed=closed,\n axis=axis, kind=kind, loffset=loffset,\n convention=convention,\n base=base, key=on, level=level)\n return _maybe_process_deprecations(r,\n how=how,\n fill_method=fill_method,\n limit=limit)", - "docstring": "Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : str\n The offset string or object representing target conversion.\n how : str\n Method for down/re-sampling, default to 'mean' for downsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).mean()``, or\n ``.resample(...).apply()``\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n fill_method : str, default None\n Filling method for upsampling.\n\n .. deprecated:: 0.18.0\n The new syntax is ``.resample(...).()``,\n e.g. ``.resample(...).pad()``\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n limit : int, default None\n Maximum size gap when reindexing with `fill_method`.\n\n .. deprecated:: 0.18.0\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n `_\n for more.\n\n To learn more about the offset strings, please see `this link\n `__.\n\n Examples\n --------\n\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90" - }, - { - "code": "def getrefnames(idf, objname):\n iddinfo = idf.idd_info\n dtls = idf.model.dtls\n index = dtls.index(objname)\n fieldidds = iddinfo[index]\n for fieldidd in fieldidds:\n if 'field' in fieldidd:\n if fieldidd['field'][0].endswith('Name'):\n if 'reference' in fieldidd:\n return fieldidd['reference']\n else:\n return []", - "docstring": "get the reference names for this object" - }, - { - "code": "def get_spline_values(filt, inp, nr_per_dec=None):\n r\n if nr_per_dec == 0:\n return filt.base/inp[:, None], inp\n outmax = filt.base[-1]/inp.min()\n outmin = filt.base[0]/inp.max()\n if nr_per_dec < 0:\n pts_per_dec = 1/np.log(filt.factor)\n nout = int(np.ceil(np.log(outmax/outmin)*pts_per_dec) + 1)\n else:\n pts_per_dec = nr_per_dec\n nout = int(np.ceil(np.log10(outmax/outmin)*pts_per_dec) + 1)\n if nr_per_dec < 0:\n if nout-filt.base.size < 3:\n nout = filt.base.size+3\n else:\n if nout < 4:\n nout = 4\n if nr_per_dec < 0:\n out = np.exp(np.arange(np.log(outmin), np.log(outmin) +\n nout/pts_per_dec, 1/pts_per_dec))\n new_inp = inp.max()*np.exp(-np.arange(nout - filt.base.size + 1) /\n pts_per_dec)\n else:\n out = 10**np.arange(np.log10(outmin), np.log10(outmin) +\n nout/pts_per_dec, 1/pts_per_dec)\n new_inp = filt.base/inp[:, None]\n return np.atleast_2d(out), new_inp", - "docstring": "r\"\"\"Return required calculation points." - }, - { - "code": "def send(self, output_name, frame):\n for input_method in self._component_connections[output_name]:\n input_method(frame)", - "docstring": "Send an output frame.\n\n The frame is sent to each input the output is connected to. If\n there are no connections this is a null operation with little\n overhead.\n\n :param str output_name: the output to use. Must be a member of\n :py:attr:`~Component.outputs`.\n\n :param Frame frame: the frame to send." - }, - { - "code": "def record(self, chunk_size = None,\n dfmt = \"f\",\n channels = 1,\n rate = DEFAULT_SAMPLE_RATE,\n **kwargs\n ):\n if chunk_size is None:\n chunk_size = chunks.size\n if hasattr(self, \"api\"):\n kwargs.setdefault(\"input_device_index\", self.api[\"defaultInputDevice\"])\n channels = kwargs.pop(\"nchannels\", channels)\n input_stream = RecStream(self,\n self._pa.open(format=_STRUCT2PYAUDIO[dfmt],\n channels=channels,\n rate=rate,\n frames_per_buffer=chunk_size,\n input=True,\n **kwargs),\n chunk_size,\n dfmt\n )\n self._recordings.append(input_stream)\n return input_stream", - "docstring": "Records audio from device into a Stream.\n\n Parameters\n ----------\n chunk_size :\n Number of samples per chunk (block sent to device).\n dfmt :\n Format, as in chunks(). Default is \"f\" (Float32).\n channels :\n Channels in audio stream (serialized).\n rate :\n Sample rate (same input used in sHz).\n\n Returns\n -------\n Endless Stream instance that gather data from the audio input device." - }, - { - "code": "def get_column(columns, column_tys, index):\n weld_obj = WeldObject(encoder_, decoder_)\n columns_var = weld_obj.update(columns, tys=WeldVec(column_tys), override=False)\n if isinstance(columns, WeldObject):\n columns_var = columns.obj_id\n weld_obj.dependencies[columns_var] = columns\n weld_template =\n weld_obj.weld_code = weld_template % {\"columns\": columns_var,\n \"ty\": column_tys,\n \"index\": index}\n return weld_obj", - "docstring": "Get column corresponding to passed-in index from ptr returned\n by groupBySum.\n\n Args:\n columns (List): List of columns as WeldObjects\n column_tys (List): List of each column data ty\n index (int): index of selected column\n\n Returns:\n A WeldObject representing this computation" - }, - { - "code": "def _splitGenoGeneWindow(self,annotation_file=None,cis=1e4,funct='protein_coding',minSnps=1.,maxSnps=SP.inf):\n assert annotation_file is not None, 'Splitter:: specify annotation file'\n try:\n f = h5py.File(annotation_file,'r')\n geneID = f['geneID'][:]\n gene_chrom = f['chrom'][:]\n gene_start = f['start'][:]\n gene_end = f['end'][:]\n gene_strand = f['strand'][:]\n gene_function = f['function'][:]\n f.close()\n except:\n print('Splitter:: format annotation file not valid')\n if funct is not None and funct!=list: funct=[funct]\n windows = []\n nSnps = []\n Igene = []\n for gene_i in range(geneID.shape[0]):\n if funct is not None:\n if gene_function[gene_i] not in funct:\n Igene.append(False)\n continue\n wnd = [gene_chrom[gene_i],gene_start[gene_i]-cis,gene_end[gene_i]+cis]\n Ir = (self.chrom==wnd[0])*(self.pos>=wnd[1])*(self.pos<=wnd[2])\n _nSnps = Ir.sum()\n if _nSnps>=minSnps and _nSnps<=maxSnps:\n windows.append(wnd)\n nSnps.append(_nSnps)\n Igene.append(True)\n else:\n Igene.append(False)\n Igene = SP.array(Igene)\n self.info['nSnps'] = SP.array(nSnps)\n self.info['geneID'] = geneID[Igene]\n self.info['gene_start'] = gene_start[Igene]\n self.info['gene_end'] = gene_end[Igene]\n self.info['gene_strand'] = gene_strand[Igene]\n self.info['gene_function'] = gene_function[Igene]\n return SP.array(windows)", - "docstring": "split into windows based on genes" - }, - { - "code": "def log_correction(self, event, action):\n action = str(action)\n self.history.info(action)\n self._corrections.append(dict(\n event=event.as_dict(),\n action=action,\n ))", - "docstring": "This method should be called once we have fixed the problem associated to this event.\n It adds a new entry in the correction history of the node.\n\n Args:\n event: :class:`AbinitEvent` that triggered the correction.\n action (str): Human-readable string with info on the action perfomed to solve the problem." - }, - { - "code": "def authorize(login, password, scopes, note='', note_url='', client_id='',\n client_secret='', two_factor_callback=None):\n gh = GitHub()\n gh.login(two_factor_callback=two_factor_callback)\n return gh.authorize(login, password, scopes, note, note_url, client_id,\n client_secret)", - "docstring": "Obtain an authorization token for the GitHub API.\n\n :param str login: (required)\n :param str password: (required)\n :param list scopes: (required), areas you want this token to apply to,\n i.e., 'gist', 'user'\n :param str note: (optional), note about the authorization\n :param str note_url: (optional), url for the application\n :param str client_id: (optional), 20 character OAuth client key for which\n to create a token\n :param str client_secret: (optional), 40 character OAuth client secret for\n which to create the token\n :param func two_factor_callback: (optional), function to call when a\n Two-Factor Authentication code needs to be provided by the user.\n :returns: :class:`Authorization `" - }, - { - "code": "def add_papyrus_routes(self, route_name_prefix, base_url):\n route_name = route_name_prefix + '_read_many'\n self.add_route(route_name, base_url, request_method='GET')\n route_name = route_name_prefix + '_read_one'\n self.add_route(route_name, base_url + '/{id}', request_method='GET')\n route_name = route_name_prefix + '_count'\n self.add_route(route_name, base_url + '/count', request_method='GET')\n route_name = route_name_prefix + '_create'\n self.add_route(route_name, base_url, request_method='POST')\n route_name = route_name_prefix + '_update'\n self.add_route(route_name, base_url + '/{id}', request_method='PUT')\n route_name = route_name_prefix + '_delete'\n self.add_route(route_name, base_url + '/{id}', request_method='DELETE')", - "docstring": "A helper method that adds routes to view callables that, together,\n implement the MapFish HTTP interface.\n\n Example::\n\n import papyrus\n config.include(papyrus)\n config.add_papyrus_routes('spots', '/spots')\n config.scan()\n\n Arguments:\n\n ``route_name_prefix' The prefix used for the route names\n passed to ``config.add_route``.\n\n ``base_url`` The web service's base URL, e.g. ``/spots``. No\n trailing slash!" - }, - { - "code": "def copy(self):\n copy = MOC(name=self.name, mocid=self.id,\n origin=self.origin, moctype=self.type)\n copy += self\n return copy", - "docstring": "Return a copy of a MOC.\n\n >>> p = MOC(4, (5, 6))\n >>> q = p.copy()\n >>> repr(q)\n ''" - }, - { - "code": "def samples(self, anystring, limit=None, offset=None, sortby=None):\n uri = self._uris['samples'].format(anystring)\n params = {'limit': limit, 'offset': offset, 'sortby': sortby}\n return self.get_parse(uri, params)", - "docstring": "Return an object representing the samples identified by the input domain, IP, or URL" - }, - { - "code": "def selectis(table, field, value, complement=False):\n return selectop(table, field, value, operator.is_, complement=complement)", - "docstring": "Select rows where the given field `is` the given value." - }, - { - "code": "def is_label_dataframe(label, df):\n setdiff = set(label) - set(df.columns.tolist())\n if len(setdiff) == 0:\n return True\n else:\n return False", - "docstring": "check column label existance" - }, - { - "code": "def update_command(self, command, args=None):\n if command is None:\n self._command = lambda: None\n else:\n if args is None:\n self._command = command\n else:\n self._command = utils.with_args(command, *args)", - "docstring": "Updates the callback command which is called when the ButtonGroup\n changes.\n\n Setting to `None` stops the callback.\n\n :param callback command:\n The callback function to call.\n\n :param callback args:\n A list of arguments to pass to the widgets `command`, defaults to\n `None`." - }, - { - "code": "def pretty_format_args(*args, **kwargs):\n args = list([repr(a) for a in args])\n for key, value in kwargs.items():\n args.append(\"%s=%s\" % (key, repr(value)))\n return \"(%s)\" % \", \".join([a for a in args])", - "docstring": "Take the args, and kwargs that are passed them and format in a\n prototype style." - }, - { - "code": "def lsl(hdfs_path, user=None, recursive=False):\n host, port, path_ = path.split(hdfs_path, user)\n fs = hdfs(host, port, user)\n if not recursive:\n dir_list = fs.list_directory(path_)\n else:\n treewalk = fs.walk(path_)\n top = next(treewalk)\n if top['kind'] == 'directory':\n dir_list = list(treewalk)\n else:\n dir_list = [top]\n fs.close()\n return dir_list", - "docstring": "Return a list of dictionaries of file properties.\n\n If ``hdfs_path`` is a file, there is only one item corresponding to\n the file itself; if it is a directory and ``recursive`` is\n :obj:`False`, each list item corresponds to a file or directory\n contained by it; if it is a directory and ``recursive`` is\n :obj:`True`, the list contains one item for every file or directory\n in the tree rooted at ``hdfs_path``." - }, - { - "code": "def field_cache_to_index_pattern(self, field_cache):\n mapping_dict = {}\n mapping_dict['customFormats'] = \"{}\"\n mapping_dict['title'] = self.index_pattern\n mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))\n mapping_str = json.dumps(mapping_dict, separators=(',', ':'))\n return mapping_str", - "docstring": "Return a .kibana index-pattern doc_type" - }, - { - "code": "def minor_min_width(G):\n adj = {v: set(G[v]) for v in G}\n lb = 0\n while len(adj) > 1:\n v = min(adj, key=lambda v: len(adj[v]))\n neighbors = adj[v]\n if not neighbors:\n del adj[v]\n continue\n def neighborhood_degree(u):\n Gu = adj[u]\n return sum(w in Gu for w in neighbors)\n u = min(neighbors, key=neighborhood_degree)\n new_lb = len(adj[v])\n if new_lb > lb:\n lb = new_lb\n adj[v] = adj[v].union(n for n in adj[u] if n != v)\n for n in adj[v]:\n adj[n].add(v)\n for n in adj[u]:\n adj[n].discard(u)\n del adj[u]\n return lb", - "docstring": "Computes a lower bound for the treewidth of graph G.\n\n Parameters\n ----------\n G : NetworkX graph\n The graph on which to compute a lower bound on the treewidth.\n\n Returns\n -------\n lb : int\n A lower bound on the treewidth.\n\n Examples\n --------\n This example computes a lower bound for the treewidth of the :math:`K_7`\n complete graph.\n\n >>> import dwave_networkx as dnx\n >>> import networkx as nx\n >>> K_7 = nx.complete_graph(7)\n >>> dnx.minor_min_width(K_7)\n 6\n\n References\n ----------\n Based on the algorithm presented in [GD]_" - }, - { - "code": "def get_irradiance_value_for_hoy(self, hoy):\n count = int(hoy * self.timestep)\n return self.direct_normal_irradiance[count], \\\n self.diffuse_horizontal_irradiance[count]", - "docstring": "Get direct and diffuse irradiance values for an hoy." - }, - { - "code": "def _AdjustForTimeZoneOffset(\n self, year, month, day_of_month, hours, minutes, time_zone_offset):\n hours_from_utc, minutes_from_utc = divmod(time_zone_offset, 60)\n minutes += minutes_from_utc\n if minutes >= 60:\n minutes -= 60\n hours += 1\n hours += hours_from_utc\n if hours < 0:\n hours += 24\n day_of_month -= 1\n elif hours >= 24:\n hours -= 24\n day_of_month += 1\n days_per_month = self._GetDaysPerMonth(year, month)\n if day_of_month < 1:\n month -= 1\n if month < 1:\n month = 12\n year -= 1\n day_of_month += self._GetDaysPerMonth(year, month)\n elif day_of_month > days_per_month:\n month += 1\n if month > 12:\n month = 1\n year += 1\n day_of_month -= days_per_month\n return year, month, day_of_month, hours, minutes", - "docstring": "Adjusts the date and time values for a time zone offset.\n\n Args:\n year (int): year e.g. 1970.\n month (int): month, where 1 represents January.\n day_of_month (int): day of the month, where 1 represents the first day.\n hours (int): hours.\n minutes (int): minutes.\n time_zone_offset (int): time zone offset in number of minutes from UTC.\n\n Returns:\n tuple[int, int, int, int, int, int]: time zone correct year, month,\n day_of_month, hours and minutes values." - }, - { - "code": "def random_variants(\n count,\n genome_name=\"GRCh38\",\n deletions=True,\n insertions=True,\n random_seed=None):\n rng = random.Random(random_seed)\n ensembl = genome_for_reference_name(genome_name)\n if ensembl in _transcript_ids_cache:\n transcript_ids = _transcript_ids_cache[ensembl]\n else:\n transcript_ids = ensembl.transcript_ids()\n _transcript_ids_cache[ensembl] = transcript_ids\n variants = []\n for _ in range(count * 100):\n if len(variants) < count:\n transcript_id = rng.choice(transcript_ids)\n transcript = ensembl.transcript_by_id(transcript_id)\n if not transcript.complete:\n continue\n exon = rng.choice(transcript.exons)\n base1_genomic_position = rng.randint(exon.start, exon.end)\n transcript_offset = transcript.spliced_offset(base1_genomic_position)\n seq = transcript.sequence\n ref = str(seq[transcript_offset])\n if transcript.on_backward_strand:\n ref = reverse_complement(ref)\n alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref]\n if insertions:\n nucleotide_pairs = [\n x + y\n for x in STANDARD_NUCLEOTIDES\n for y in STANDARD_NUCLEOTIDES\n ]\n alt_nucleotides.extend(nucleotide_pairs)\n if deletions:\n alt_nucleotides.append(\"\")\n alt = rng.choice(alt_nucleotides)\n variant = Variant(\n transcript.contig,\n base1_genomic_position,\n ref=ref,\n alt=alt,\n ensembl=ensembl)\n variants.append(variant)\n else:\n return VariantCollection(variants)\n raise ValueError(\n (\"Unable to generate %d random variants, \"\n \"there may be a problem with PyEnsembl\") % count)", - "docstring": "Generate a VariantCollection with random variants that overlap\n at least one complete coding transcript." - }, - { - "code": "def maximum_address(self):\n maximum_address = self._segments.maximum_address\n if maximum_address is not None:\n maximum_address //= self.word_size_bytes\n return maximum_address", - "docstring": "The maximum address of the data, or ``None`` if the file is empty." - }, - { - "code": "def add_documents(self, documents):\n result, errors = [], []\n for document in documents:\n try:\n result.append(self.add_document(document))\n except RuntimeError as exc:\n errors.append((document, exc))\n return result, errors", - "docstring": "Adds more than one document using the same API call\n\n Returns two lists: the first one contains the successfully uploaded\n documents, and the second one tuples with documents that failed to be\n uploaded and the exceptions raised." - }, - { - "code": "def build_pdf(source, texinputs=[], builder=None):\n if builder is None:\n builders = PREFERRED_BUILDERS\n elif builder not in BUILDERS:\n raise RuntimeError('Invalid Builder specified')\n else:\n builders = (builder, )\n for bld in builders:\n bld_cls = BUILDERS[bld]\n builder = bld_cls()\n if not builder.is_available():\n continue\n return builder.build_pdf(source, texinputs)\n else:\n raise RuntimeError('No available builder could be instantiated. '\n 'Please make sure LaTeX is installed.')", - "docstring": "Builds a LaTeX source to PDF.\n\n Will automatically instantiate an available builder (or raise a\n :class:`exceptions.RuntimeError` if none are available) and build the\n supplied source with it.\n\n Parameters are passed on to the builder's\n :meth:`~latex.build.LatexBuilder.build_pdf` function.\n\n :param builder: Specify which builder should be used - ``latexmk``,\n ``pdflatex`` or ``xelatexmk``." - }, - { - "code": "def column_alias(cell, names):\n column = slugify(cell.column or '', sep='_')\n column = column.strip('_')\n column = 'column' if not len(column) else column\n name, i = column, 2\n while name in names:\n name = '%s_%s' % (name, i)\n i += 1\n return name", - "docstring": "Generate a normalized version of the column name." - }, - { - "code": "def kcore_bd(CIJ, k, peel=False):\n if peel:\n peelorder, peellevel = ([], [])\n iter = 0\n CIJkcore = CIJ.copy()\n while True:\n id, od, deg = degrees_dir(CIJkcore)\n ff, = np.where(np.logical_and(deg < k, deg > 0))\n if ff.size == 0:\n break\n iter += 1\n CIJkcore[ff, :] = 0\n CIJkcore[:, ff] = 0\n if peel:\n peelorder.append(ff)\n if peel:\n peellevel.append(iter * np.ones((len(ff),)))\n kn = np.sum(deg > 0)\n if peel:\n return CIJkcore, kn, peelorder, peellevel\n else:\n return CIJkcore, kn", - "docstring": "The k-core is the largest subnetwork comprising nodes of degree at\n least k. This function computes the k-core for a given binary directed\n connection matrix by recursively peeling off nodes with degree lower\n than k, until no such nodes remain.\n\n Parameters\n ----------\n CIJ : NxN np.ndarray\n binary directed adjacency matrix\n k : int\n level of k-core\n peel : bool\n If True, additionally calculates peelorder and peellevel. Defaults to\n False.\n\n Returns\n -------\n CIJkcore : NxN np.ndarray\n connection matrix of the k-core. This matrix only contains nodes of\n degree at least k.\n kn : int\n size of k-core\n peelorder : Nx1 np.ndarray\n indices in the order in which they were peeled away during k-core\n decomposition. only returned if peel is specified.\n peellevel : Nx1 np.ndarray\n corresponding level - nodes in at the same level have been peeled\n away at the same time. only return if peel is specified\n\n Notes\n -----\n 'peelorder' and 'peellevel' are similar the the k-core sub-shells\n described in Modha and Singh (2010)." - }, - { - "code": "def read_first_available_value(filename, field_name):\n if not os.path.exists(filename):\n return None\n with open(filename, 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n value = row.get(field_name)\n if value:\n return value\n return None", - "docstring": "Reads the first assigned value of the given field in the CSV table." - }, - { - "code": "def plot(self, xmin, xmax, idx_input=0, idx_output=0, points=100,\n **kwargs) -> None:\n for toy, ann_ in self:\n ann_.plot(xmin, xmax,\n idx_input=idx_input, idx_output=idx_output,\n points=points,\n label=str(toy),\n **kwargs)\n pyplot.legend()", - "docstring": "Call method |anntools.ANN.plot| of all |anntools.ANN| objects\n handled by the actual |anntools.SeasonalANN| object." - }, - { - "code": "def remove_entry(self, anime_id):\n r = self._query_('libraries/%s/remove' % anime_id, 'POST')\n if not r.status_code == 200:\n raise ValueError", - "docstring": "Removes an entry from the user's library.\n\n :param anime_id: The Anime ID or slug." - }, - { - "code": "def price(pc, service, attrib, sku):\n pc.service = service.lower()\n pc.sku = sku\n pc.add_attributes(attribs=attrib)\n click.echo(\"Service Alias: {0}\".format(pc.service_alias))\n click.echo(\"URL: {0}\".format(pc.service_url))\n click.echo(\"Region: {0}\".format(pc.region))\n click.echo(\"Product Terms: {0}\".format(pc.terms))\n click.echo(\"Filtering Attributes: {0}\".format(pc.attributes))\n prices = pyutu.get_prices(pc)\n for p in prices:\n click.echo(\"Rate Code: {0} price: {1}\".format(\n p, json.dumps(prices[p], indent=2, sort_keys=True))\n )\n click.echo(\"Total Prices Found: {0}\".format(len(prices)))\n if sys.version_info >= (3, 3):\n click.echo(\"Time: {0} secs\".format(time.process_time()))", - "docstring": "Get a list of a service's prices.\n The list will be in the given region, matching the specific terms and\n any given attribute filters or a SKU." - }, - { - "code": "def _pour(opener, flags=0, *args, **kwargs):\n with _enumerator(opener,\n *args,\n entry_cls=_ArchiveEntryItState,\n **kwargs) as r:\n ext = libarchive.calls.archive_write.c_archive_write_disk_new()\n libarchive.calls.archive_write.c_archive_write_disk_set_options(\n ext,\n flags\n )\n for state in r:\n yield state\n if state.selected is False:\n continue\n r = libarchive.calls.archive_write.c_archive_write_header(\n ext,\n state.entry_res)\n buff = ctypes.c_void_p()\n size = ctypes.c_size_t()\n offset = ctypes.c_longlong()\n while 1:\n r = libarchive.calls.archive_read.\\\n c_archive_read_data_block(\n state.reader_res,\n ctypes.byref(buff),\n ctypes.byref(size),\n ctypes.byref(offset))\n if r == libarchive.constants.archive.ARCHIVE_EOF:\n break\n elif r != libarchive.constants.archive.ARCHIVE_OK:\n message = c_archive_error_string(state.reader_res)\n raise libarchive.exception.ArchiveError(\n \"Pour failed: (%d) [%s]\" % (r, message))\n r = libarchive.calls.archive_write.c_archive_write_data_block(\n ext,\n buff,\n size,\n offset)\n r = libarchive.calls.archive_write.\\\n c_archive_write_finish_entry(ext)", - "docstring": "A flexible pouring facility that knows how to enumerate entry data." - }, - { - "code": "def _load_library(library_names, library_file_extensions,\n library_search_paths, version_check_callback):\n candidates = _find_library_candidates(library_names,\n library_file_extensions,\n library_search_paths)\n library_versions = []\n for filename in candidates:\n version = version_check_callback(filename)\n if version is not None and version >= (3, 0, 0):\n library_versions.append((version, filename))\n if not library_versions:\n return None\n library_versions.sort()\n return ctypes.CDLL(library_versions[-1][1])", - "docstring": "Finds, loads and returns the most recent version of the library." - }, - { - "code": "def deserialize_frame(stream, header, verifier=None):\n _LOGGER.debug(\"Starting frame deserialization\")\n frame_data = {}\n final_frame = False\n (sequence_number,) = unpack_values(\">I\", stream, verifier)\n if sequence_number == SequenceIdentifier.SEQUENCE_NUMBER_END.value:\n _LOGGER.debug(\"Deserializing final frame\")\n (sequence_number,) = unpack_values(\">I\", stream, verifier)\n final_frame = True\n else:\n _LOGGER.debug(\"Deserializing frame sequence number %d\", int(sequence_number))\n frame_data[\"final_frame\"] = final_frame\n frame_data[\"sequence_number\"] = sequence_number\n (frame_iv,) = unpack_values(\">{iv_len}s\".format(iv_len=header.algorithm.iv_len), stream, verifier)\n frame_data[\"iv\"] = frame_iv\n if final_frame is True:\n (content_length,) = unpack_values(\">I\", stream, verifier)\n if content_length >= header.frame_length:\n raise SerializationError(\n \"Invalid final frame length: {final} >= {normal}\".format(\n final=content_length, normal=header.frame_length\n )\n )\n else:\n content_length = header.frame_length\n (frame_content, frame_tag) = unpack_values(\n \">{content_len}s{auth_len}s\".format(content_len=content_length, auth_len=header.algorithm.auth_len),\n stream,\n verifier,\n )\n frame_data[\"ciphertext\"] = frame_content\n frame_data[\"tag\"] = frame_tag\n return MessageFrameBody(**frame_data), final_frame", - "docstring": "Deserializes a frame from a body.\n\n :param stream: Source data stream\n :type stream: io.BytesIO\n :param header: Deserialized header\n :type header: aws_encryption_sdk.structures.MessageHeader\n :param verifier: Signature verifier object (optional)\n :type verifier: aws_encryption_sdk.internal.crypto.Verifier\n :returns: Deserialized frame and a boolean stating if this is the final frame\n :rtype: :class:`aws_encryption_sdk.internal.structures.MessageFrameBody` and bool" - }, - { - "code": "def covariance(self,pt0,pt1):\n x = np.array([pt0[0],pt1[0]])\n y = np.array([pt0[1],pt1[1]])\n names = [\"n1\",\"n2\"]\n return self.covariance_matrix(x,y,names=names).x[0,1]", - "docstring": "get the covarince between two points implied by Vario2d\n\n Parameters\n ----------\n pt0 : (iterable of len 2)\n first point x and y\n pt1 : (iterable of len 2)\n second point x and y\n\n Returns\n -------\n cov : float\n covariance between pt0 and pt1" - }, - { - "code": "def _strip_value(value, lookup='exact'):\n if lookup == 'in':\n stripped_value = [_strip_object(el) for el in value]\n else:\n stripped_value = _strip_object(value)\n return stripped_value", - "docstring": "Helper function to remove the branch and version information from the given value,\n which could be a single object or a list." - }, - { - "code": "def dot(self, other_tf):\n if other_tf.to_frame != self.from_frame:\n raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame))\n pose_tf = self.matrix.dot(other_tf.matrix)\n rotation, translation = RigidTransform.rotation_and_translation_from_matrix(pose_tf)\n if isinstance(other_tf, SimilarityTransform):\n return SimilarityTransform(self.rotation, self.translation, scale=1.0,\n from_frame=self.from_frame,\n to_frame=self.to_frame) * other_tf\n return RigidTransform(rotation, translation,\n from_frame=other_tf.from_frame,\n to_frame=self.to_frame)", - "docstring": "Compose this rigid transform with another.\n\n This transform is on the left-hand side of the composition.\n\n Parameters\n ----------\n other_tf : :obj:`RigidTransform`\n The other RigidTransform to compose with this one.\n\n Returns\n -------\n :obj:`RigidTransform`\n A RigidTransform that represents the composition.\n\n Raises\n ------\n ValueError\n If the to_frame of other_tf is not identical to this transform's\n from_frame." - }, - { - "code": "def difference(self, key, *others):\n if not isinstance(key, str):\n raise ValueError(\"String expected.\")\n self.db.sdiffstore(key, [self.key] + [o.key for o in others])\n return Set(key)", - "docstring": "Return a new set with elements in the set that are not in the others." - }, - { - "code": "def touch(filename, timestamp):\n if timestamp is not None:\n timestamp = (timestamp, timestamp)\n from os import utime\n utime(filename, timestamp)", - "docstring": "set the mtime of a file" - }, - { - "code": "def __start_thread(self):\n with self.__lock:\n if self.__nb_threads >= self._max_threads:\n return False\n if self._done_event.is_set():\n return False\n name = \"{0}-{1}\".format(self._logger.name, self._thread_id)\n self._thread_id += 1\n thread = threading.Thread(target=self.__run, name=name)\n thread.daemon = True\n try:\n self.__nb_threads += 1\n thread.start()\n self._threads.append(thread)\n return True\n except (RuntimeError, OSError):\n self.__nb_threads -= 1\n return False", - "docstring": "Starts a new thread, if possible" - }, - { - "code": "def get_filetypes2wildcards(filetypes):\n def is_available(filetype):\n return filetype not in FILETYPE_AVAILABILITY or \\\n FILETYPE_AVAILABILITY[filetype]\n available_filetypes = filter(is_available, filetypes)\n return OrderedDict((ft, FILETYPE2WILDCARD[ft])\n for ft in available_filetypes)", - "docstring": "Returns OrderedDict of filetypes to wildcards\n\n The filetypes that are provided in the filetypes parameter are checked for\n availability. Only available filetypes are inluded in the return ODict.\n\n Parameters\n ----------\n filetypes: Iterable of strings\n \\tFiletype list" - }, - { - "code": "def unscale_dict_wet(C):\n return {k: _scale_dict[k] * v for k, v in C.items()}", - "docstring": "Undo the scaling applied in `scale_dict_wet`." - }, - { - "code": "def show(self):\n bytecode._Print(\"MAP_LIST SIZE\", self.size)\n for i in self.map_item:\n if i.item != self:\n i.show()", - "docstring": "Print with a pretty display the MapList object" - }, - { - "code": "def setup(self, target=None, strict=False, minify=False, line_numbers=False, keep_lines=False, no_tco=False):\n if target is None:\n target = \"\"\n else:\n target = str(target).replace(\".\", \"\")\n if target in pseudo_targets:\n target = pseudo_targets[target]\n if target not in targets:\n raise CoconutException(\n \"unsupported target Python version \" + ascii(target),\n extra=\"supported targets are \" + ', '.join(ascii(t) for t in specific_targets) + \", or leave blank for universal\",\n )\n logger.log_vars(\"Compiler args:\", locals())\n self.target, self.strict, self.minify, self.line_numbers, self.keep_lines, self.no_tco = (\n target, strict, minify, line_numbers, keep_lines, no_tco,\n )", - "docstring": "Initializes parsing parameters." - }, - { - "code": "def flatten(data):\n if not data:\n return data\n if type(data[0]) in (list, tuple):\n return list(flatten(data[0])) + list(flatten(data[1:]))\n return list(data[:1]) + list(flatten(data[1:]))", - "docstring": "Returns a flattened version of a list.\n\n Courtesy of https://stackoverflow.com/a/12472564\n\n Args:\n data (`tuple` or `list`): Input data\n\n Returns:\n `list`" - }, - { - "code": "def visit_field(self, _, children):\n filters = children[0]\n return self.Field(getattr(filters[0], 'name', None), filters=filters)", - "docstring": "A simple field.\n\n Arguments\n ---------\n _ (node) : parsimonious.nodes.Node.\n children : list\n - 0: for ``FILTERS``: list of instances of ``.resources.Field``.\n\n Returns\n -------\n .resources.Field\n An instance of ``.resources.Field`` with the correct name.\n\n Example\n -------\n\n >>> DataQLParser(r'foo', default_rule='FIELD').data\n \n >>> DataQLParser(r'foo(1)', default_rule='FIELD').data\n \n >>> DataQLParser(r'foo.bar()', default_rule='FIELD').data\n " - }, - { - "code": "def run_action(self, feature, action,\n run_if_error=False,\n raise_exception=True):\n if len(self._error_dict[feature]) > 0 and not run_if_error:\n return\n error = None\n instance = self.features[feature]\n try:\n getattr(instance, action)()\n except Exception as e:\n e = sys.exc_info()[1]\n self.logger.info(\"An exception occurred with action %s in feature %s!\" %\n (action, feature))\n self.logger.debug(\"Exception\", exc_info=sys.exc_info())\n error = str(e)\n self.log_feature_error(feature, str(e))\n if error is not None and raise_exception:\n exception_msg = \"%s action failed for feature %s: %s\" % (action, feature, error)\n if self.phase == PHASE.REMOVE:\n raise FormulaException(exception_msg)\n else:\n raise SprinterException(exception_msg)\n return error", - "docstring": "Run an action, and log it's output in case of errors" - }, - { - "code": "def get_one(self):\n self.default_val = None\n o = self.default_val\n d = self._query('get_one')\n if d:\n o = self.orm_class(d, hydrate=True)\n return o", - "docstring": "get one row from the db" - }, - { - "code": "def ramp_function(data_type, attr, fname, xinstfeature, xNNifeature):\r\n diff = 0\r\n mmdiff = attr[fname][3]\n rawfd = abs(xinstfeature - xNNifeature)\n if data_type == 'mixed':\n standDev = attr[fname][4]\r\n if rawfd > standDev:\n diff = 1\r\n else:\r\n diff = abs(xinstfeature - xNNifeature) / mmdiff\r\n else:\n diff = abs(xinstfeature - xNNifeature) / mmdiff\r\n return diff", - "docstring": "Our own user simplified variation of the ramp function suggested by Hong 1994, 1997. Hong's method requires the user to specifiy two thresholds\r\n that indicate the max difference before a score of 1 is given, as well a min difference before a score of 0 is given, and any in the middle get a\r\n score that is the normalized difference between the two continuous feature values. This was done because when discrete and continuous features were mixed,\r\n continuous feature scores were underestimated. Towards simplicity, automation, and a dataset adaptable approach,\r\n here we simply check whether the difference is greater than the standard deviation for the given feature; if so we assign a score of 1, otherwise we\r\n assign the normalized feature score difference. This should help compensate for the underestimation." - }, - { - "code": "def component_basis_str(basis, elements=None):\n s = \"Description: \" + basis['description'] + '\\n'\n eldata = basis['elements']\n if elements is None:\n elements = list(eldata.keys())\n else:\n elements = expand_elements(elements, True)\n for z in elements:\n s += element_data_str(z, eldata[z]) + '\\n'\n return s", - "docstring": "Print a component basis set\n\n If elements is not None, only the specified elements will be printed\n (see :func:`bse.misc.expand_elements`)" - }, - { - "code": "def setup_colorbars(self, plot_call_sign):\n self.fig.colorbar(plot_call_sign, cax=self.cbar_ax,\n ticks=self.cbar_ticks, orientation=self.cbar_orientation)\n (getattr(self.cbar_ax, 'set_' + self.cbar_var + 'ticklabels')\n (self.cbar_tick_labels, fontsize=self.cbar_ticks_fontsize))\n (getattr(self.cbar_ax, 'set_' + self.cbar_var + 'label')\n (self.cbar_label, fontsize=self.cbar_label_fontsize, labelpad=self.cbar_label_pad))\n return", - "docstring": "Setup colorbars for each type of plot.\n\n Take all of the optional performed during ``__init__`` method and makes the colorbar.\n\n Args:\n plot_call_sign (obj): Plot instance of ax.contourf with colormapping to\n add as a colorbar." - }, - { - "code": "def linkify_with_contacts(self, contacts):\n for i in self:\n if not hasattr(i, 'contacts'):\n continue\n links_list = strip_and_uniq(i.contacts)\n new = []\n for name in [e for e in links_list if e]:\n contact = contacts.find_by_name(name)\n if contact is not None and contact.uuid not in new:\n new.append(contact.uuid)\n else:\n i.add_error(\"the contact '%s' defined for '%s' is unknown\"\n % (name, i.get_name()))\n i.contacts = new", - "docstring": "Link items with contacts items\n\n :param contacts: all contacts object\n :type contacts: alignak.objects.contact.Contacts\n :return: None" - }, - { - "code": "def add_list_member(self, list_id, user_id):\n return List(tweepy_list_to_json(self._client.add_list_member(list_id=list_id, user_id=user_id)))", - "docstring": "Add a user to list\n\n :param list_id: list ID number\n :param user_id: user ID number\n :return: :class:`~responsebot.models.List` object" - }, - { - "code": "def _on_connected(self, future):\n if future.exception():\n self._connect_future.set_exception(future.exception())\n return\n conn = future.result()\n LOGGER.debug('Connected to %s (%r, %r, %r)', conn.name,\n self._clustering, self._discovery, self._connected)\n if self._clustering:\n self._cluster[conn.name] = conn\n if not self._discovery:\n self.io_loop.add_future(self.cluster_nodes(),\n self._on_cluster_discovery)\n elif self.ready:\n LOGGER.debug('Cluster nodes all connected')\n if not self._connect_future.done():\n self._connect_future.set_result(True)\n self._connected.set()\n else:\n def on_selected(sfuture):\n LOGGER.debug('Initial setup and selection processed')\n if sfuture.exception():\n self._connect_future.set_exception(sfuture.exception())\n else:\n self._connect_future.set_result(True)\n self._connected.set()\n select_future = concurrent.Future()\n self.io_loop.add_future(select_future, on_selected)\n self._connection = conn\n cmd = Command(\n self._build_command(['SELECT', str(conn.database)]),\n self._connection, None, None)\n cmd.connection.execute(cmd, select_future)", - "docstring": "Invoked when connections have been established. If the client is\n in clustering mode, it will kick of the discovery step if needed. If\n not, it will select the configured database.\n\n :param future: The connection future\n :type future: tornado.concurrent.Future" - }, - { - "code": "def all(self):\n result = self.invoke()\n if self.resolve_links:\n result.resolve_links()\n return result", - "docstring": "Attempt to retrieve all available resources matching this request.\n\n :return: Result instance as returned by the :class:`.Dispatcher`." - }, - { - "code": "def schedule(self, api_call, bundle_id, bundle_desc, bundling_request,\n kwargs=None):\n kwargs = kwargs or dict()\n bundle = self._bundle_for(api_call, bundle_id, bundle_desc,\n bundling_request, kwargs)\n elts = getattr(bundling_request, bundle_desc.bundled_field)\n event = bundle.extend(elts)\n count_threshold = self._options.element_count_threshold\n if count_threshold > 0 and bundle.element_count >= count_threshold:\n self._run_now(bundle.bundle_id)\n size_threshold = self._options.request_byte_threshold\n if size_threshold > 0 and bundle.request_bytesize >= size_threshold:\n self._run_now(bundle.bundle_id)\n return event", - "docstring": "Schedules bundle_desc of bundling_request as part of bundle_id.\n\n The returned value an :class:`Event` that\n\n * has a ``result`` attribute that will eventually be set to the result\n the api call\n * will be used to wait for the response\n * holds the canceller function for canceling this part of the bundle\n\n Args:\n api_call (callable[[object], object]): the scheduled API call.\n bundle_id (str): identifies the bundle on which the API call should be\n made.\n bundle_desc (gax.BundleDescriptor): describes the structure of the\n bundled call.\n bundling_request (object): the request instance to use in the API\n call.\n kwargs (dict): optional, the keyword arguments passed to the API call.\n\n Returns:\n Event: the scheduled event." - }, - { - "code": "def full_rule(self):\n return join(self.bp_prefix, self.rule, trailing_slash=self.rule.endswith('/'))", - "docstring": "The full url rule for this route, including any blueprint prefix." - }, - { - "code": "def currencies(self) -> CurrenciesAggregate:\n if not self.__currencies_aggregate:\n self.__currencies_aggregate = CurrenciesAggregate(self.book)\n return self.__currencies_aggregate", - "docstring": "Returns the Currencies aggregate" - }, - { - "code": "def annotated(self):\n edge_annotations = {}\n for edge in self.edges:\n if edge not in edge_annotations:\n referrer = self._tail[edge]\n known_refs = annotated_references(referrer)\n for out_edge in self._out_edges[referrer]:\n referent = self._head[out_edge]\n if known_refs[referent]:\n annotation = known_refs[referent].pop()\n else:\n annotation = None\n edge_annotations[out_edge] = annotation\n annotated_vertices = [\n AnnotatedVertex(\n id=id(vertex),\n annotation=object_annotation(vertex),\n )\n for vertex in self.vertices\n ]\n annotated_edges = [\n AnnotatedEdge(\n id=edge,\n annotation=edge_annotations[edge],\n head=id(self._head[edge]),\n tail=id(self._tail[edge]),\n )\n for edge in self.edges\n ]\n return AnnotatedGraph(\n vertices=annotated_vertices,\n edges=annotated_edges,\n )", - "docstring": "Annotate this graph, returning an AnnotatedGraph object\n with the same structure." - }, - { - "code": "def arg_spec(cls, mtd_name):\n mtd = getattr(cls, mtd_name)\n required_params = []\n optional_params = []\n if hasattr(inspect, 'signature'):\n params = inspect.signature(mtd).parameters\n for k in params.keys():\n if params[k].default == inspect.Parameter.empty:\n if not (params[k].name == 'self' or params[k].name == 'cls'):\n required_params.append(k)\n else:\n optional_params.append(k)\n else:\n params = inspect.getargspec(mtd)\n num = len(params[0]) if params[0] else 0\n n_opt = len(params[3]) if params[3] else 0\n n_req = (num - n_opt) if n_opt <= num else 0\n for i in range(0, n_req):\n required_params.append(params[0][i])\n for i in range(n_req, num):\n optional_params.append(params[0][i])\n if inspect.isroutine(getattr(cls, mtd_name)):\n bound_mtd = cls.__dict__[mtd_name]\n if not isinstance(bound_mtd, staticmethod):\n del required_params[0]\n return required_params, optional_params", - "docstring": "Cross-version argument signature inspection\n\n Parameters\n ----------\n cls : class\n mtd_name : str\n Name of the method to be inspected\n\n Returns\n -------\n required_params : list of str\n List of required, positional parameters\n optional_params : list of str\n List of optional parameters, i.e. parameters with a default value" - }, - { - "code": "def convert_regex_to_flask_path(url_path):\n for token in [\"$\"]:\n url_path = url_path.replace(token, \"\")\n def caller(reg):\n match_name, match_pattern = reg.groups()\n return ''.format(match_pattern, match_name)\n url_path = re.sub(\"\\(\\?P<(.*?)>(.*?)\\)\", caller, url_path)\n if url_path.endswith(\"/?\"):\n url_path = url_path.rstrip(\"/?\")\n return url_path", - "docstring": "Converts a regex matching url to one that can be used with flask" - }, - { - "code": "def update_key_bundle(key_bundle, diff):\n try:\n _add = diff['add']\n except KeyError:\n pass\n else:\n key_bundle.extend(_add)\n try:\n _del = diff['del']\n except KeyError:\n pass\n else:\n _now = time.time()\n for k in _del:\n k.inactive_since = _now", - "docstring": "Apply a diff specification to a KeyBundle.\n The keys that are to be added are added.\n The keys that should be deleted are marked as inactive.\n\n :param key_bundle: The original KeyBundle\n :param diff: The difference specification\n :return: An updated key_bundle" - }, - { - "code": "def fetch(self):\n since = time.mktime(self.options.since.datetime.timetuple())\n until = time.mktime(self.options.until.datetime.timetuple())\n log.info(\"Searching for links saved by {0}\".format(self.user))\n self.stats = self.parent.bitly.user_link_history(created_after=since,\n created_before=until)", - "docstring": "Bit.ly API expect unix timestamps" - }, - { - "code": "def traverseJobGraph(self, rootJob, jobsToReport=None, foundJobStoreIDs=None):\n if jobsToReport is None:\n jobsToReport = []\n if foundJobStoreIDs is None:\n foundJobStoreIDs = set()\n if rootJob.jobStoreID in foundJobStoreIDs:\n return jobsToReport\n foundJobStoreIDs.add(rootJob.jobStoreID)\n jobsToReport.append(rootJob)\n for jobs in rootJob.stack:\n for successorJobStoreID in [x.jobStoreID for x in jobs]:\n if successorJobStoreID not in foundJobStoreIDs and self.jobStore.exists(successorJobStoreID):\n self.traverseJobGraph(self.jobStore.load(successorJobStoreID), jobsToReport, foundJobStoreIDs)\n for jobs in rootJob.services:\n for serviceJobStoreID in [x.jobStoreID for x in jobs]:\n if self.jobStore.exists(serviceJobStoreID):\n if serviceJobStoreID in foundJobStoreIDs:\n raise RuntimeError('Service job was unexpectedly found while traversing ')\n foundJobStoreIDs.add(serviceJobStoreID)\n jobsToReport.append(self.jobStore.load(serviceJobStoreID))\n return jobsToReport", - "docstring": "Find all current jobs in the jobStore and return them as an Array.\n\n :param jobNode rootJob: The root job of the workflow.\n :param list jobsToReport: A list of jobNodes to be added to and returned.\n :param set foundJobStoreIDs: A set of jobStoreIDs used to keep track of jobStoreIDs encountered in traversal.\n :returns jobsToReport: The list of jobs currently in the job graph." - }, - { - "code": "def _generate_phrases(self, sentences):\n phrase_list = set()\n for sentence in sentences:\n word_list = [word.lower() for word in wordpunct_tokenize(sentence)]\n phrase_list.update(self._get_phrase_list_from_words(word_list))\n return phrase_list", - "docstring": "Method to generate contender phrases given the sentences of the text\n document.\n\n :param sentences: List of strings where each string represents a\n sentence which forms the text.\n :return: Set of string tuples where each tuple is a collection\n of words forming a contender phrase." - }, - { - "code": "def split_authority(authority):\n if '@' in authority:\n userinfo, hostport = authority.split('@', 1)\n else:\n userinfo, hostport = None, authority\n if userinfo and ':' in userinfo:\n user, passwd = userinfo.split(':', 1)\n else:\n user, passwd = userinfo, None\n if hostport and ':' in hostport:\n host, port = hostport.split(':', 1)\n else:\n host, port = hostport, None\n if not host:\n host = None\n return (user, passwd, host, port)", - "docstring": "Basic authority parser that splits authority into component parts\n\n >>> split_authority(\"user:password@host:port\")\n ('user', 'password', 'host', 'port')" - }, - { - "code": "def schedule_vkontakte_message(message, to, sender=None, priority=None):\n schedule_messages(message, recipients('vk', to), sender=sender, priority=priority)", - "docstring": "Schedules VKontakte message for delivery.\n\n :param str message: text or URL to publish on wall.\n :param list|str|unicode to: recipients addresses or Django User model heir instances with `vk` attributes.\n :param User sender: User model heir instance\n :param int priority: number describing message priority. If set overrides priority provided with message type." - }, - { - "code": "def setupnode(overwrite=False):\n if not port_is_open():\n if not skip_disable_root():\n disable_root()\n port_changed = change_ssh_port()\n if server_state('setupnode-incomplete'):\n env.overwrite=True\n else: set_server_state('setupnode-incomplete')\n upload_ssh_key()\n restrict_ssh()\n add_repositories()\n upgrade_packages()\n setup_ufw()\n uninstall_packages()\n install_packages()\n upload_etc()\n post_install_package()\n setup_ufw_rules()\n set_timezone()\n set_server_state('setupnode-incomplete',delete=True)\n for s in webserver_list():\n stop_webserver(s)\n start_webserver(s)", - "docstring": "Install a baseline host. Can be run multiple times" - }, - { - "code": "def _get_template_text_from_package(self, template_name):\n if self._package_name is None:\n raise self.MustacheError('No package specified for template loading.')\n path = os.path.join('templates', template_name + '.mustache')\n template_text = pkgutil.get_data(self._package_name, path)\n if template_text is None:\n raise self.MustacheError(\n 'could not find template {} in package {}'.format(path, self._package_name))\n return template_text.decode('utf8')", - "docstring": "Load the named template embedded in our package." - }, - { - "code": "def _get_globals():\n if _get_globals_callback is not None:\n return _get_globals_callback()\n else:\n try:\n from __main__ import __dict__ as namespace\n except ImportError:\n try:\n import __main__\n namespace = __main__.__dict__\n except:\n namespace\n shell = namespace.get('__ipythonshell__')\n if shell is not None and hasattr(shell, 'user_ns'):\n return shell.user_ns\n else:\n return namespace\n return namespace", - "docstring": "Return current Python interpreter globals namespace" - }, - { - "code": "def configure_root():\n root_logger = logging.getLogger()\n for hdlr in root_logger.handlers:\n if isinstance(hdlr, logging.StreamHandler):\n root_logger.removeHandler(hdlr)\n root_logger.setLevel(ROOT_LOG_LEVEL)\n hdlr = logging.StreamHandler(ROOT_LOG_STREAM) \n formatter = colorlog.ColoredFormatter(\n '%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s',\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red,bg_white',\n } \n ) \n hdlr.setFormatter(formatter)\n root_logger.addHandler(hdlr)", - "docstring": "Configure the root logger." - }, - { - "code": "def MessageToJson(message,\n including_default_value_fields=False,\n preserving_proto_field_name=False):\n printer = _Printer(including_default_value_fields,\n preserving_proto_field_name)\n return printer.ToJsonString(message)", - "docstring": "Converts protobuf message to JSON format.\n\n Args:\n message: The protocol buffers message instance to serialize.\n including_default_value_fields: If True, singular primitive fields,\n repeated fields, and map fields will always be serialized. If\n False, only serialize non-empty fields. Singular message fields\n and oneof fields are not affected by this option.\n preserving_proto_field_name: If True, use the original proto field\n names as defined in the .proto file. If False, convert the field\n names to lowerCamelCase.\n\n Returns:\n A string containing the JSON formatted protocol buffer message." - }, - { - "code": "def add_client(self, client_identifier):\n if client_identifier in self.clients:\n _LOGGER.error('%s already in group %s', client_identifier, self.identifier)\n return\n new_clients = self.clients\n new_clients.append(client_identifier)\n yield from self._server.group_clients(self.identifier, new_clients)\n _LOGGER.info('added %s to %s', client_identifier, self.identifier)\n self._server.client(client_identifier).callback()\n self.callback()", - "docstring": "Add a client." - }, - { - "code": "def at_least_libvips(x, y):\n major = version(0)\n minor = version(1)\n return major > x or (major == x and minor >= y)", - "docstring": "Is this at least libvips x.y?" - }, - { - "code": "def fire(self, state):\n if self.action is None or self.action == BP_IPDB:\n import ipdb; ipdb.set_trace()\n elif self.action == BP_IPYTHON:\n import IPython\n shell = IPython.terminal.embed.InteractiveShellEmbed()\n shell.mainloop(display_banner=\"This is an ipython shell for you to happily debug your state!\\n\" + \\\n \"The state can be accessed through the variable 'state'. You can\\n\" +\\\n \"make modifications, then exit this shell to resume your analysis.\")\n else:\n self.action(state)", - "docstring": "Trigger the breakpoint.\n\n :param state: The state." - }, - { - "code": "def u32(self, name, value=None, align=None):\n self.uint(4, name, value, align)", - "docstring": "Add an unsigned 4 byte integer field to template.\n\n This is an convenience method that simply calls `Uint` keyword with predefined length." - }, - { - "code": "def by_filter(cls, session, opts, **kwargs):\n where = []\n if opts.get('local_only'):\n where.append(cls.local == True)\n if opts.get('names'):\n where.append(cls.name.in_(opts['names']))\n if opts.get('classifiers'):\n ids = [c.id for c in opts.get('classifiers')]\n cls_pkg = classifier__package\n qry = session.query(cls_pkg.c.package_id,\n func.count('*'))\n qry = qry.filter(cls_pkg.c.classifier_id.in_(ids))\n qry = qry.group_by(cls_pkg.c.package_id)\n qry = qry.having(func.count('*') >= len(ids))\n where.append(cls.id.in_([r[0] for r in qry.all()]))\n return cls.find(session, where=where, **kwargs)", - "docstring": "Get packages from given filters.\n\n :param session: SQLAlchemy session\n :type session: :class:`sqlalchemy.Session`\n\n :param opts: filtering options\n :type opts: `dict\n\n :return: package instances\n :rtype: generator of :class:`pyshop.models.Package`" - }, - { - "code": "def handle_time(msg):\n return msg.copy(ack=0, payload=calendar.timegm(time.localtime()))", - "docstring": "Process an internal time request message." - }, - { - "code": "def _compute_weights(self):\n n = self.n\n k = self.kappa\n self.Wm = np.full(2*n+1, .5 / (n + k))\n self.Wm[0] = k / (n+k)\n self.Wc = self.Wm", - "docstring": "Computes the weights for the unscented Kalman filter. In this\n formulation the weights for the mean and covariance are the same." - }, - { - "code": "def copy(self):\n other = ProtoFeed()\n for key in cs.PROTOFEED_ATTRS:\n value = getattr(self, key)\n if isinstance(value, pd.DataFrame):\n value = value.copy()\n setattr(other, key, value)\n return other", - "docstring": "Return a copy of this ProtoFeed, that is, a feed with all the\n same attributes." - }, - { - "code": "def register_forward_model(self, pid_mag, pid_pha):\n self.register_magnitude_model(pid_mag)\n self.register_phase_model(pid_pha)", - "docstring": "Register parameter sets as the forward models for magnitude and\n phase\n\n Parameters\n ----------\n pid_mag: int\n parameter id corresponding to the magnitude model\n pid_pha: int\n parameter id corresponding to the phase model" - }, - { - "code": "def quality(self):\n from udata.models import Discussion\n result = {}\n if not self.id:\n return result\n if self.next_update:\n result['frequency'] = self.frequency\n result['update_in'] = -(self.next_update - datetime.now()).days\n if self.tags:\n result['tags_count'] = len(self.tags)\n if self.description:\n result['description_length'] = len(self.description)\n if self.resources:\n result['has_resources'] = True\n result['has_only_closed_or_no_formats'] = all(\n resource.closed_or_no_format for resource in self.resources)\n result['has_unavailable_resources'] = not all(\n self.check_availability())\n discussions = Discussion.objects(subject=self)\n if discussions:\n result['discussions'] = len(discussions)\n result['has_untreated_discussions'] = not all(\n discussion.person_involved(self.owner)\n for discussion in discussions)\n result['score'] = self.compute_quality_score(result)\n return result", - "docstring": "Return a dict filled with metrics related to the inner\n\n quality of the dataset:\n\n * number of tags\n * description length\n * and so on" - }, - { - "code": "def findHTMLMeta(stream):\n parser = YadisHTMLParser()\n chunks = []\n while 1:\n chunk = stream.read(CHUNK_SIZE)\n if not chunk:\n break\n chunks.append(chunk)\n try:\n parser.feed(chunk)\n except HTMLParseError, why:\n chunks.append(stream.read())\n break\n except ParseDone, why:\n uri = why[0]\n if uri is None:\n chunks.append(stream.read())\n break\n else:\n return uri\n content = ''.join(chunks)\n raise MetaNotFound(content)", - "docstring": "Look for a meta http-equiv tag with the YADIS header name.\n\n @param stream: Source of the html text\n @type stream: Object that implements a read() method that works\n like file.read\n\n @return: The URI from which to fetch the XRDS document\n @rtype: str\n\n @raises MetaNotFound: raised with the content that was\n searched as the first parameter." - }, - { - "code": "def save_to_file(self, fp, sep='\\n'):\n n = 0\n m = self.read()\n while m:\n n += 1\n fp.write(m.get_body())\n if sep:\n fp.write(sep)\n self.delete_message(m)\n m = self.read()\n return n", - "docstring": "Read all messages from the queue and persist them to file-like object.\n Messages are written to the file and the 'sep' string is written\n in between messages. Messages are deleted from the queue after\n being written to the file.\n Returns the number of messages saved." - }, - { - "code": "def _get_min_max_value(min, max, value=None, step=None):\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else:\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n if value == 0:\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", - "docstring": "Return min, max, value given input values with possible None." - }, - { - "code": "def add_reads(self, reads):\n if len(reads) == 0:\n return self\n new_reads = self.reads.union(reads)\n if len(new_reads) > len(self.reads):\n return VariantSequence(\n prefix=self.prefix,\n alt=self.alt,\n suffix=self.suffix,\n reads=new_reads)\n else:\n return self", - "docstring": "Create another VariantSequence with more supporting reads." - }, - { - "code": "def __apply_nested_option(self, option_name, field_names, set_operation):\n nested_fields = [name.split('.', 1) for name in field_names if '.' in name]\n nested_options = defaultdict(list)\n for parent, nested_names in nested_fields:\n nested_options[parent].append(nested_names)\n for key, options in iter(nested_options.items()):\n new_options = self.set_class(options)\n original_options = getattr(self.declared_fields[key], option_name, ())\n if original_options:\n if set_operation == 'union':\n new_options |= self.set_class(original_options)\n if set_operation == 'intersection':\n new_options &= self.set_class(original_options)\n setattr(self.declared_fields[key], option_name, new_options)", - "docstring": "Apply nested options to nested fields" - }, - { - "code": "def adjustMask(self):\r\n if self.currentMode() == XPopupWidget.Mode.Dialog:\r\n self.clearMask()\r\n return\r\n path = self.borderPath()\r\n bitmap = QBitmap(self.width(), self.height())\r\n bitmap.fill(QColor('white'))\r\n with XPainter(bitmap) as painter:\r\n painter.setRenderHint(XPainter.Antialiasing)\r\n pen = QPen(QColor('black'))\r\n pen.setWidthF(0.75)\r\n painter.setPen(pen)\r\n painter.setBrush(QColor('black'))\r\n painter.drawPath(path)\r\n self.setMask(bitmap)", - "docstring": "Updates the alpha mask for this popup widget." - }, - { - "code": "def mutate(self, context, handler, args, kw):\n\t\tdef cast(arg, val):\n\t\t\tif arg not in annotations:\n\t\t\t\treturn\n\t\t\tcast = annotations[key]\n\t\t\ttry:\n\t\t\t\tval = cast(val)\n\t\t\texcept (ValueError, TypeError) as e:\n\t\t\t\tparts = list(e.args)\n\t\t\t\tparts[0] = parts[0] + \" processing argument '{}'\".format(arg)\n\t\t\t\te.args = tuple(parts)\n\t\t\t\traise\n\t\t\treturn val\n\t\tannotations = getattr(handler.__func__ if hasattr(handler, '__func__') else handler, '__annotations__', None)\n\t\tif not annotations:\n\t\t\treturn\n\t\targspec = getfullargspec(handler)\n\t\targlist = list(argspec.args)\n\t\tif ismethod(handler):\n\t\t\tdel arglist[0]\n\t\tfor i, value in enumerate(list(args)):\n\t\t\tkey = arglist[i]\n\t\t\tif key in annotations:\n\t\t\t\targs[i] = cast(key, value)\n\t\tfor key, value in list(items(kw)):\n\t\t\tif key in annotations:\n\t\t\t\tkw[key] = cast(key, value)", - "docstring": "Inspect and potentially mutate the given handler's arguments.\n\t\t\n\t\tThe args list and kw dictionary may be freely modified, though invalid arguments to the handler will fail." - }, - { - "code": "def handle_wf_finalization(self):\n if ((not self.current.flow_enabled or (\n self.current.task_type.startswith('End') and not self.are_we_in_subprocess())) and\n 'token' in self.current.output):\n del self.current.output['token']", - "docstring": "Removes the ``token`` key from ``current.output`` if WF is over." - }, - { - "code": "def remove_trailing_string(content, trailing):\n if content.endswith(trailing) and content != trailing:\n return content[:-len(trailing)]\n return content", - "docstring": "Strip trailing component `trailing` from `content` if it exists.\n Used when generating names from view classes." - }, - { - "code": "def _getStyle(node):\n u\n if node.nodeType == Node.ELEMENT_NODE and len(node.getAttribute('style')) > 0:\n styleMap = {}\n rawStyles = node.getAttribute('style').split(';')\n for style in rawStyles:\n propval = style.split(':')\n if len(propval) == 2:\n styleMap[propval[0].strip()] = propval[1].strip()\n return styleMap\n else:\n return {}", - "docstring": "u\"\"\"Returns the style attribute of a node as a dictionary." - }, - { - "code": "def evaluate(\n model,\n data_path,\n gpu_id=-1,\n gold_preproc=False,\n displacy_path=None,\n displacy_limit=25,\n return_scores=False,\n):\n msg = Printer()\n util.fix_random_seed()\n if gpu_id >= 0:\n util.use_gpu(gpu_id)\n util.set_env_log(False)\n data_path = util.ensure_path(data_path)\n displacy_path = util.ensure_path(displacy_path)\n if not data_path.exists():\n msg.fail(\"Evaluation data not found\", data_path, exits=1)\n if displacy_path and not displacy_path.exists():\n msg.fail(\"Visualization output directory not found\", displacy_path, exits=1)\n corpus = GoldCorpus(data_path, data_path)\n nlp = util.load_model(model)\n dev_docs = list(corpus.dev_docs(nlp, gold_preproc=gold_preproc))\n begin = timer()\n scorer = nlp.evaluate(dev_docs, verbose=False)\n end = timer()\n nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)\n results = {\n \"Time\": \"%.2f s\" % (end - begin),\n \"Words\": nwords,\n \"Words/s\": \"%.0f\" % (nwords / (end - begin)),\n \"TOK\": \"%.2f\" % scorer.token_acc,\n \"POS\": \"%.2f\" % scorer.tags_acc,\n \"UAS\": \"%.2f\" % scorer.uas,\n \"LAS\": \"%.2f\" % scorer.las,\n \"NER P\": \"%.2f\" % scorer.ents_p,\n \"NER R\": \"%.2f\" % scorer.ents_r,\n \"NER F\": \"%.2f\" % scorer.ents_f,\n }\n msg.table(results, title=\"Results\")\n if displacy_path:\n docs, golds = zip(*dev_docs)\n render_deps = \"parser\" in nlp.meta.get(\"pipeline\", [])\n render_ents = \"ner\" in nlp.meta.get(\"pipeline\", [])\n render_parses(\n docs,\n displacy_path,\n model_name=model,\n limit=displacy_limit,\n deps=render_deps,\n ents=render_ents,\n )\n msg.good(\"Generated {} parses as HTML\".format(displacy_limit), displacy_path)\n if return_scores:\n return scorer.scores", - "docstring": "Evaluate a model. To render a sample of parses in a HTML file, set an\n output directory as the displacy_path argument." - }, - { - "code": "def _convert_query(self, query):\n query = self.dictionary.doc2bow(self._tokenize_latex(query))\n sims = self.index[query]\n neighbors = sorted(sims, key=lambda item: -item[1])\n neighbors = {\"neighbors\":[{self.columns[0]: {\"data\": self.docs[n[0]], \"fmt\": \"math\"}, self.columns[1]: {\"data\": float(n[1])}} for n in neighbors]} if neighbors else {\"neighbors\": []}\n return neighbors", - "docstring": "Convert query into an indexable string." - }, - { - "code": "def get_benchmark_from_name(root, name, extra_params=None):\n if '-' in name:\n try:\n name, param_idx = name.split('-', 1)\n param_idx = int(param_idx)\n except ValueError:\n raise ValueError(\"Benchmark id %r is invalid\" % (name,))\n else:\n param_idx = None\n update_sys_path(root)\n benchmark = None\n parts = name.split('.')\n for i in [1, 2]:\n path = os.path.join(root, *parts[:-i]) + '.py'\n if not os.path.isfile(path):\n continue\n modname = '.'.join([os.path.basename(root)] + parts[:-i])\n module = import_module(modname)\n try:\n module_attr = getattr(module, parts[-i])\n except AttributeError:\n break\n if i == 1 and inspect.isfunction(module_attr):\n benchmark = _get_benchmark(parts[-i], module, None, module_attr)\n break\n elif i == 2 and inspect.isclass(module_attr):\n try:\n class_attr = getattr(module_attr, parts[-1])\n except AttributeError:\n break\n if (inspect.isfunction(class_attr) or\n inspect.ismethod(class_attr)):\n benchmark = _get_benchmark(parts[-1], module, module_attr,\n class_attr)\n break\n if benchmark is None:\n for benchmark in disc_benchmarks(root):\n if benchmark.name == name:\n break\n else:\n raise ValueError(\n \"Could not find benchmark '{0}'\".format(name))\n if param_idx is not None:\n benchmark.set_param_idx(param_idx)\n if extra_params:\n class ExtraBenchmarkAttrs:\n pass\n for key, value in extra_params.items():\n setattr(ExtraBenchmarkAttrs, key, value)\n benchmark._attr_sources.insert(0, ExtraBenchmarkAttrs)\n return benchmark", - "docstring": "Create a benchmark from a fully-qualified benchmark name.\n\n Parameters\n ----------\n root : str\n Path to the root of a benchmark suite.\n\n name : str\n Fully-qualified name to a specific benchmark." - }, - { - "code": "def peak_signal_to_noise_ratio(true, pred):\n return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)", - "docstring": "Image quality metric based on maximal signal power vs. power of the noise.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n peak signal to noise ratio (PSNR)" - }, - { - "code": "def queuedb_row_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", - "docstring": "Dict row factory" - }, - { - "code": "def save(self, commit=True):\n if self.errors:\n raise ValueError(\"The %s could not be saved because the data didn't\"\n \" validate.\" % self.instance._meta.object_name)\n instance = super(BaseDynamicEntityForm, self).save(commit=False)\n for name in instance.get_schema_names():\n value = self.cleaned_data.get(name)\n setattr(instance, name, value)\n if commit:\n instance.save()\n return instance", - "docstring": "Saves this ``form``'s cleaned_data into model instance ``self.instance``\n and related EAV attributes.\n\n Returns ``instance``." - }, - { - "code": "def setTargets(self, targets):\n if not self.verifyArguments(targets) and not self.patterned:\n raise NetworkError('setTargets() requires [[...],[...],...] or [{\"layerName\": [...]}, ...].', targets)\n self.targets = targets", - "docstring": "Sets the targets." - }, - { - "code": "def out(self, obj, formatter=None, out_file=None):\n if not isinstance(obj, CommandResultItem):\n raise TypeError('Expected {} got {}'.format(CommandResultItem.__name__, type(obj)))\n import platform\n import colorama\n if platform.system() == 'Windows':\n out_file = colorama.AnsiToWin32(out_file).stream\n output = formatter(obj)\n try:\n print(output, file=out_file, end='')\n except IOError as ex:\n if ex.errno == errno.EPIPE:\n pass\n else:\n raise\n except UnicodeEncodeError:\n print(output.encode('ascii', 'ignore').decode('utf-8', 'ignore'),\n file=out_file, end='')", - "docstring": "Produces the output using the command result.\n The method does not return a result as the output is written straight to the output file.\n\n :param obj: The command result\n :type obj: knack.util.CommandResultItem\n :param formatter: The formatter we should use for the command result\n :type formatter: function\n :param out_file: The file to write output to\n :type out_file: file-like object" - }, - { - "code": "def __createInspectorActionGroup(self, parent):\n actionGroup = QtWidgets.QActionGroup(parent)\n actionGroup.setExclusive(True)\n sortedItems = sorted(self.argosApplication.inspectorRegistry.items,\n key=lambda item: item.identifier)\n shortCutNr = 1\n for item in sortedItems:\n logger.debug(\"item: {}\".format(item.identifier))\n setAndDrawFn = partial(self.setAndDrawInspectorById, item.identifier)\n action = QtWidgets.QAction(item.name, self, triggered=setAndDrawFn, checkable=True)\n action.setData(item.identifier)\n if shortCutNr <= 9 and \"debug\" not in item.identifier:\n action.setShortcut(QtGui.QKeySequence(\"Ctrl+{}\".format(shortCutNr)))\n shortCutNr += 1\n actionGroup.addAction(action)\n return actionGroup", - "docstring": "Creates an action group with 'set inspector' actions for all installed inspector." - }, - { - "code": "def open(self, mode=None):\n if mode is None:\n mode = self.mode\n elif mode not in ['r', 'w', 'a']:\n raise ValueError('Invalid mode! Modes: [\\'a\\', \\'r\\', \\'w\\']')\n if self._file is None:\n self._file = h5py.File(self.path, mode=mode)", - "docstring": "Open the container file.\n\n Args:\n mode (str): Either 'r' for read-only, 'w' for truncate and write or\n 'a' for append. (default: 'a').\n If ``None``, uses ``self.mode``." - }, - { - "code": "def __get_method_abbrev(self):\n abbrevs = {\n 'furthest': 'fn',\n 'nearest': 'nn',\n 'average': 'an',\n }\n if self.Parameters['method'].isOn():\n method = self.Parameters['method'].Value\n else:\n method = self.Parameters['method'].Default\n return abbrevs[method]", - "docstring": "Abbreviated form of clustering method parameter.\n\n Used to guess output filenames for MOTHUR." - }, - { - "code": "def portCnt(port):\n if port.children:\n return sum(map(lambda p: portCnt(p), port.children))\n else:\n return 1", - "docstring": "recursively count number of ports without children" - }, - { - "code": "def deploy_custom_domain(awsclient, api_name, api_target_stage,\n api_base_path, domain_name, route_53_record,\n cert_name, cert_arn, hosted_zone_id, ensure_cname):\n api_base_path = _basepath_to_string_if_null(api_base_path)\n api = _api_by_name(awsclient, api_name)\n if not api:\n print(\"Api %s does not exist, aborting...\" % api_name)\n return 1\n domain = _custom_domain_name_exists(awsclient, domain_name)\n if not domain:\n response = _create_custom_domain(awsclient, domain_name, cert_name, cert_arn)\n cloudfront_distribution = response['distributionDomainName']\n else:\n response = _update_custom_domain(awsclient, domain_name, cert_name, cert_arn)\n cloudfront_distribution = response['distributionDomainName']\n if _base_path_mapping_exists(awsclient, domain_name, api_base_path):\n _ensure_correct_base_path_mapping(awsclient, domain_name,\n api_base_path, api['id'],\n api_target_stage)\n else:\n _create_base_path_mapping(awsclient, domain_name, api_base_path,\n api_target_stage, api['id'])\n if ensure_cname:\n record_exists, record_correct = \\\n _record_exists_and_correct(awsclient, hosted_zone_id,\n route_53_record,\n cloudfront_distribution)\n if record_correct:\n print('Route53 record correctly set: %s --> %s' % (route_53_record,\n cloudfront_distribution))\n else:\n _ensure_correct_route_53_record(awsclient, hosted_zone_id,\n record_name=route_53_record,\n record_value=cloudfront_distribution)\n print('Route53 record set: %s --> %s' % (route_53_record,\n cloudfront_distribution))\n else:\n print('Skipping creating and checking DNS record')\n return 0", - "docstring": "Add custom domain to your API.\n\n :param api_name:\n :param api_target_stage:\n :param api_base_path:\n :param domain_name:\n :param route_53_record:\n :param ssl_cert:\n :param cert_name:\n :param cert_arn:\n :param hosted_zone_id:\n :return: exit_code" - }, - { - "code": "def to_dict_hook(obj):\n if hasattr(obj, 'to_dict'):\n result = obj.to_dict()\n assert isinstance(result, dict), 'to_dict must return a dictionary'\n result['_type'] = f'{obj.__module__}.{obj.__class__.__name__}'\n result['_version'] = 0\n return result\n raise TypeError(\n f'Object of type {obj.__class__.__name__} is not JSON serializable',\n )", - "docstring": "Convert internal objects to a serializable representation.\n\n During serialization if the object has the hook method `to_dict` it will be\n automatically called and metadata for decoding will be added. This allows\n for the translation of objects trees of arbitrary depth. E.g.:\n\n >>> class Root:\n >>> def __init__(self, left, right):\n >>> self.left = left\n >>> self.right = right\n >>> def to_dict(self):\n >>> return {\n >>> 'left': left,\n >>> 'right': right,\n >>> }\n >>> class Node:\n >>> def to_dict(self):\n >>> return {'value': 'node'}\n >>> root = Root(left=None(), right=None())\n >>> json.dumps(root, default=to_dict_hook)\n '{\n \"_type\": \"Root\",\n \"left\": {\"_type\": \"Node\", \"value\": \"node\"},\n \"right\": {\"_type\": \"Node\", \"value\": \"node\"}\n }'" - }, - { - "code": "def _make_readline_peeker(self):\n counter = itertools.count(0)\n def readline():\n try:\n return self._peek_buffer(next(counter))\n except StopIteration:\n return ''\n return readline", - "docstring": "Make a readline-like function which peeks into the source." - }, - { - "code": "def get_wordlist(stanzas):\n return sorted(list(set().union(*[stanza.words for stanza in stanzas])))", - "docstring": "Get an iterable of all final words in all stanzas" - }, - { - "code": "def _sin(x):\n return 0. if np.isclose(np.mod(x, np.pi), 0.) else np.sin(x)", - "docstring": "sine with case for pi multiples" - }, - { - "code": "def extrapolate_error(self):\n if self.numvars.idx_method > 2:\n self.numvars.extrapolated_error = modelutils.exp(\n modelutils.log(self.numvars.error) +\n (modelutils.log(self.numvars.error) -\n modelutils.log(self.numvars.last_error)) *\n (self.numconsts.nmb_methods-self.numvars.idx_method))\n else:\n self.numvars.extrapolated_error = -999.9", - "docstring": "Estimate the numerical error to be expected when applying all\n methods available based on the results of the current and the\n last method.\n\n Note that this expolation strategy cannot be applied on the first\n method. If the current method is the first one, `-999.9` is returned.\n\n >>> from hydpy.models.test_v1 import *\n >>> parameterstep()\n >>> model.numvars.error = 1e-2\n >>> model.numvars.last_error = 1e-1\n >>> model.numvars.idx_method = 10\n >>> model.extrapolate_error()\n >>> from hydpy import round_\n >>> round_(model.numvars.extrapolated_error)\n 0.01\n >>> model.numvars.idx_method = 9\n >>> model.extrapolate_error()\n >>> round_(model.numvars.extrapolated_error)\n 0.001" - }, - { - "code": "def sort(args):\n p = OptionParser(sort.__doc__)\n p.add_option(\"--sizes\", default=False, action=\"store_true\",\n help=\"Sort by decreasing size [default: %default]\")\n opts, args = p.parse_args(args)\n if len(args) != 1:\n sys.exit(p.print_help())\n fastafile, = args\n sortedfastafile = fastafile.rsplit(\".\", 1)[0] + \".sorted.fasta\"\n f = Fasta(fastafile, index=False)\n fw = must_open(sortedfastafile, \"w\")\n if opts.sizes:\n sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))\n logging.debug(\"Sort by size: max: {0}, min: {1}\".\\\n format(sortlist[0], sortlist[-1]))\n sortlist = [x for x, s in sortlist]\n else:\n sortlist = sorted(f.iterkeys())\n for key in sortlist:\n rec = f[key]\n SeqIO.write([rec], fw, \"fasta\")\n logging.debug(\"Sorted file written to `{0}`.\".format(sortedfastafile))\n fw.close()\n return sortedfastafile", - "docstring": "%prog sort fastafile\n\n Sort a list of sequences and output with sorted IDs, etc." - }, - { - "code": "def instance(self, other):\n assert '/' not in str(other)\n return Key(str(self) + ':' + str(other))", - "docstring": "Returns an instance Key, by appending a name to the namespace." - }, - { - "code": "def pearson_correlation(self):\n x, y, dt = self.data\n X, Y = np.array(x), np.array(y)\n X -= X.mean(0)\n Y -= Y.mean(0)\n X /= X.std(0)\n Y /= Y.std(0)\n return (np.mean(X*Y) ** 2) * 100", - "docstring": "Compute Pearson Correlation Coefficient." - }, - { - "code": "def __read(self):\n self._socket.setblocking(0)\n while not self._stop_event.is_set():\n ready = select.select([self._socket], [], [], 1)\n if ready[0]:\n data, sender = self._socket.recvfrom(1024)\n try:\n self._handle_heartbeat(sender, data)\n except Exception as ex:\n _logger.exception(\"Error handling the heart beat: %s\", ex)", - "docstring": "Reads packets from the socket" - }, - { - "code": "def read(self):\n if not self.ready_to_read():\n return None\n data = self._read()\n if data is None:\n return None\n return self._parse_message(data)", - "docstring": "If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.\n Otherwise, returns None." - }, - { - "code": "def convert_roipooling(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n pooled_shape = convert_string_to_list(attrs.get('pooled_size'))\n scale = float(attrs.get(\"spatial_scale\"))\n node = onnx.helper.make_node(\n 'MaxRoiPool',\n input_nodes,\n [name],\n pooled_shape=pooled_shape,\n spatial_scale=scale,\n name=name\n )\n return [node]", - "docstring": "Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool\n operator and return the created node." - }, - { - "code": "def tuple_replace(tup, *pairs):\n tuple_list = list(tup)\n for index, value in pairs:\n tuple_list[index] = value\n return tuple(tuple_list)", - "docstring": "Return a copy of a tuple with some elements replaced.\n\n :param tup: The tuple to be copied.\n :param pairs: Any number of (index, value) tuples where index is the index\n of the item to replace and value is the new value of the item." - }, - { - "code": "def calc_check_digit(digits):\n ints = [int(d) for d in digits]\n l = len(ints)\n odds = slice((l - 1) % 2, l, 2)\n even = slice(l % 2, l, 2)\n checksum = 3 * sum(ints[odds]) + sum(ints[even])\n return str(-checksum % 10)", - "docstring": "Calculate and return the GS1 check digit." - }, - { - "code": "def cudaDriverGetVersion():\n version = ctypes.c_int()\n status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))\n cudaCheckStatus(status)\n return version.value", - "docstring": "Get installed CUDA driver version.\n\n Return the version of the installed CUDA driver as an integer. If\n no driver is detected, 0 is returned.\n\n Returns\n -------\n version : int\n Driver version." - }, - { - "code": "def resources(self):\n resources = []\n for endpoint in self.endpoints:\n resource_type = endpoint.Meta.resource_type\n table = endpoint.Meta.table\n url = endpoint.name\n resources.append((resource_type, {'table': table, 'url': url}))\n return resources", - "docstring": "Return list of all registered resources." - }, - { - "code": "def entry_point():\n signal.signal(signal.SIGINT, lambda *_: getattr(os, '_exit')(0))\n config = get_arguments()\n setup_logging(config['verbose'])\n try:\n main(config)\n except HandledError:\n if config['raise']:\n raise\n logging.critical('Failure.')\n sys.exit(0 if config['ignore_errors'] else 1)", - "docstring": "Entry-point from setuptools." - }, - { - "code": "def intel_extractor(url, response):\r\n for rintel in rintels:\r\n res = re.sub(r'<(script).*?(?s)', '', response)\r\n res = re.sub(r'<[^<]+?>', '', res)\r\n matches = rintel[0].findall(res)\r\n if matches:\r\n for match in matches:\r\n verb('Intel', match)\r\n bad_intel.add((match, rintel[1], url))", - "docstring": "Extract intel from the response body." - }, - { - "code": "def has_method(obj, method_name):\n if method_name in dir(obj):\n return True\n log.error('Method \\'%s\\' not yet supported!', method_name)\n return False", - "docstring": "Find if the provided object has a specific method" - }, - { - "code": "def setup_url(self, url):\n r\n self.url = bytes(url, \"utf8\")\n res = librtmp.RTMP_SetupURL(self.rtmp, self.url)\n if res < 1:\n raise RTMPError(\"Unable to parse URL\")", - "docstring": "r\"\"\"Attempt to parse a RTMP URL.\n\n Additional options may be specified by appending space-separated\n key=value pairs to the URL. Special characters in values may need\n to be escaped to prevent misinterpretation by the option parser.\n The escape encoding uses a backslash followed by two hexadecimal\n digits representing the ASCII value of the character. E.g., spaces\n must be escaped as `\\\\20` and backslashes must be escaped as `\\\\5c`.\n\n :param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`\n\n Raises :exc:`RTMPError` if URL parsing fails." - }, - { - "code": "def is_file(dirname):\n if not os.path.isfile(dirname):\n msg = \"{0} is not an existing file\".format(dirname)\n raise argparse.ArgumentTypeError(msg)\n else:\n return dirname", - "docstring": "Checks if a path is an actual file that exists" - }, - { - "code": "def UploadSeparatePatches(issue, rpc_server, patchset, data, options):\n\tpatches = SplitPatch(data)\n\trv = []\n\tfor patch in patches:\n\t\tset_status(\"uploading patch for \" + patch[0])\n\t\tif len(patch[1]) > MAX_UPLOAD_SIZE:\n\t\t\tprint (\"Not uploading the patch for \" + patch[0] +\n\t\t\t\t\" because the file is too large.\")\n\t\t\tcontinue\n\t\tform_fields = [(\"filename\", patch[0])]\n\t\tif not options.download_base:\n\t\t\tform_fields.append((\"content_upload\", \"1\"))\n\t\tfiles = [(\"data\", \"data.diff\", patch[1])]\n\t\tctype, body = EncodeMultipartFormData(form_fields, files)\n\t\turl = \"/%d/upload_patch/%d\" % (int(issue), int(patchset))\n\t\tprint \"Uploading patch for \" + patch[0]\n\t\tresponse_body = rpc_server.Send(url, body, content_type=ctype)\n\t\tlines = response_body.splitlines()\n\t\tif not lines or lines[0] != \"OK\":\n\t\t\tStatusUpdate(\" --> %s\" % response_body)\n\t\t\tsys.exit(1)\n\t\trv.append([lines[1], patch[0]])\n\treturn rv", - "docstring": "Uploads a separate patch for each file in the diff output.\n\n\tReturns a list of [patch_key, filename] for each file." - }, - { - "code": "def _multi_permission_mask(mode):\n def compose(f, g):\n return lambda *args, **kwargs: g(f(*args, **kwargs))\n return functools.reduce(compose, map(_permission_mask, mode.split(',')))", - "docstring": "Support multiple, comma-separated Unix chmod symbolic modes.\n\n >>> _multi_permission_mask('a=r,u+w')(0) == 0o644\n True" - }, - { - "code": "def _get_ll_pointer_type(self, target_data, context=None):\n from . import Module, GlobalVariable\n from ..binding import parse_assembly\n if context is None:\n m = Module()\n else:\n m = Module(context=context)\n foo = GlobalVariable(m, self, name=\"foo\")\n with parse_assembly(str(m)) as llmod:\n return llmod.get_global_variable(foo.name).type", - "docstring": "Convert this type object to an LLVM type." - }, - { - "code": "def get_facts(self):\n output = self.device.facts\n uptime = self.device.uptime or -1\n interfaces = junos_views.junos_iface_table(self.device)\n interfaces.get()\n interface_list = interfaces.keys()\n return {\n 'vendor': u'Juniper',\n 'model': py23_compat.text_type(output['model']),\n 'serial_number': py23_compat.text_type(output['serialnumber']),\n 'os_version': py23_compat.text_type(output['version']),\n 'hostname': py23_compat.text_type(output['hostname']),\n 'fqdn': py23_compat.text_type(output['fqdn']),\n 'uptime': uptime,\n 'interface_list': interface_list\n }", - "docstring": "Return facts of the device." - }, - { - "code": "def qteCloseQtmacs(self):\n msgObj = QtmacsMessage()\n msgObj.setSignalName('qtesigCloseQtmacs')\n self.qtesigCloseQtmacs.emit(msgObj)\n for appName in self.qteGetAllAppletIDs():\n self.qteKillApplet(appName)\n self._qteFocusManager()\n for window in self._qteWindowList:\n window.close()\n self._qteFocusManager()\n self.deleteLater()", - "docstring": "Close Qtmacs.\n\n First kill all applets, then shut down Qtmacs.\n\n |Args|\n\n * **None**\n\n |Returns|\n\n * **None**\n\n |Raises|\n\n * **None**" - }, - { - "code": "def check_indexes_all_same(indexes, message=\"Indexes are not equal.\"):\n iterator = iter(indexes)\n first = next(iterator)\n for other in iterator:\n same = (first == other)\n if not same.all():\n bad_loc = np.flatnonzero(~same)[0]\n raise ValueError(\n \"{}\\nFirst difference is at index {}: \"\n \"{} != {}\".format(\n message, bad_loc, first[bad_loc], other[bad_loc]\n ),\n )", - "docstring": "Check that a list of Index objects are all equal.\n\n Parameters\n ----------\n indexes : iterable[pd.Index]\n Iterable of indexes to check.\n\n Raises\n ------\n ValueError\n If the indexes are not all the same." - }, - { - "code": "def get_ips_from_string(ip_str):\n ip_list = []\n for ip in ip_str.split(','):\n clean_ip = ip.strip().lower()\n if clean_ip:\n ip_list.append(clean_ip)\n ip_count = len(ip_list)\n if ip_count > 0:\n if is_valid_ip(ip_list[0]) and is_valid_ip(ip_list[-1]):\n return ip_list, ip_count\n return [], 0", - "docstring": "Given a string, it returns a list of one or more valid IP addresses" - }, - { - "code": "def announce(self, number):\n self.client.publish(self.keys.internal, self.keys.key(number))\n self.message('{} granted'.format(number))", - "docstring": "Announce an indicator change on both channels." - }, - { - "code": "def is_code_unit(self):\n for atom in self.expr.atoms():\n if not (str(atom).startswith(\"code\") or atom.is_Number):\n return False\n return True", - "docstring": "Is this a \"code\" unit?\n\n Returns\n -------\n True if the unit consists of atom units that being with \"code\".\n False otherwise" - }, - { - "code": "def load(self, infile):\n model = pickle.load(infile)\n self.__dict__.update(model.__dict__)", - "docstring": "Deserialize a model from a stored file.\n\n By default, unpickle an entire object. If `dump` is overridden to\n use a different storage format, `load` should be as well.\n\n :param file outfile: A file-like object from which to retrieve the\n serialized model." - }, - { - "code": "def bind(self, instance, auto=False):\n methods = [\n (m, cls.__dict__[m])\n for cls in inspect.getmro(type(instance))\n for m in cls.__dict__ if inspect.isfunction(cls.__dict__[m])\n ]\n try:\n deps_of_endpoints = [(method_ptr, self.entrypoint_deps(method_ptr))\n for (method_name, method_ptr) in methods]\n for (method_ptr, method_deps) in deps_of_endpoints:\n if len(method_deps) > 0:\n method_ptr(instance, **method_deps)\n except KeyError:\n pass\n if auto and instance not in self.current_scope.get_auto_bind_list():\n self.current_scope.auto_bind(instance)\n return instance", - "docstring": "Bind deps to instance\n\n :param instance:\n :param auto: follow update of DI and refresh binds once we will get something new\n :return:" - }, - { - "code": "def provide(self, name):\n rv = self[name]\n return rv(self) if callable(rv) else rv", - "docstring": "Gets the value registered with ``name`` and determines whether the\n value is a provider or a configuration setting. The ``KeyError`` is\n raised when the ``name`` is not found.\n\n The registered value is interpreted as a provider if it's callable. The\n provider is called with a single argument, the current\n :class:`Container` object. Returns the return value of a provider or\n the value itself in case the value is not callable.\n\n :param name:\n The name of the provider or configuration setting." - }, - { - "code": "def tokens(self):\n if self._tokens is None:\n self._tokens = TokenList(self._version, account_sid=self._solution['sid'], )\n return self._tokens", - "docstring": "Access the tokens\n\n :returns: twilio.rest.api.v2010.account.token.TokenList\n :rtype: twilio.rest.api.v2010.account.token.TokenList" - }, - { - "code": "def add_subcommands(self, command, *args, **kwargs):\n subcommands = kwargs.pop('subcommands', None)\n try:\n cmd = self[command]\n except KeyError:\n if 'formatter_class' not in kwargs:\n kwargs['formatter_class'] = self.formatter_class\n cmd = self.add_parser(command, *args, **kwargs)\n args, kwargs = tuple(), dict()\n if subcommands is not None:\n kwargs = subcommands\n child = CommandDecorator(\n argparser = self.argparser,\n commands = cmd.add_subparsers(*args, **kwargs),\n parent = self,\n name = command,\n )\n self.children.append(child)\n return child", - "docstring": "add subcommands.\n\n If command already defined, pass args and kwargs to add_subparsers()\n method, else to add_parser() method. This behaviour is for convenience,\n because I mostly use the sequence:\n\n >>> p = parser.add_parser('foo', help=\"some help\")\n >>> subparser = p.add_subparsers()\n\n If you want to configure your sub_parsers, you can do it with:\n\n >>> command.add_subcommands('cmd',\n help = \"cmd help\"\n subcommands = dict(\n title = \"title\"\n description = \"subcommands description\"\n )\n )" - }, - { - "code": "def _chunk_iter_progress(it, log, prefix):\n n_variants = 0\n before_all = time.time()\n before_chunk = before_all\n for chunk, chunk_length, chrom, pos in it:\n after_chunk = time.time()\n elapsed_chunk = after_chunk - before_chunk\n elapsed = after_chunk - before_all\n n_variants += chunk_length\n chrom = text_type(chrom, 'utf8')\n message = (\n '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' %\n (prefix, n_variants, elapsed, elapsed_chunk,\n int(chunk_length // elapsed_chunk))\n )\n if chrom:\n message += '; %s:%s' % (chrom, pos)\n print(message, file=log)\n log.flush()\n yield chunk, chunk_length, chrom, pos\n before_chunk = after_chunk\n after_all = time.time()\n elapsed = after_all - before_all\n print('%s all done (%s rows/s)' %\n (prefix, int(n_variants // elapsed)), file=log)\n log.flush()", - "docstring": "Wrap a chunk iterator for progress logging." - }, - { - "code": "def probe(*devices):\n for device in devices:\n _validate_device(device)\n cmd = 'partprobe -- {0}'.format(\" \".join(devices))\n out = __salt__['cmd.run'](cmd).splitlines()\n return out", - "docstring": "Ask the kernel to update its local partition data. When no args are\n specified all block devices are tried.\n\n Caution: Generally only works on devices with no mounted partitions and\n may take a long time to return if specified devices are in use.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt '*' partition.probe\n salt '*' partition.probe /dev/sda\n salt '*' partition.probe /dev/sda /dev/sdb" - }, - { - "code": "def adduser(app, username, password):\n with app.app_context():\n create_user(username=username, password=password)\n click.echo('user created!')", - "docstring": "Add new user with admin access" - }, - { - "code": "def as_tokens(ctx: List[ParserRuleContext]) -> List[str]:\n return [as_token(e) for e in ctx]", - "docstring": "Return a stringified list of identifiers in ctx\n\n :param ctx: JSG parser item with a set of identifiers\n :return:" - }, - { - "code": "def _prepare_full_scan(self, **kwargs):\n self.db.open(new=True)\n ignored_fs = set()\n ignored_fs |= set(self.IGNORE_PATHS)\n mounts = salt.utils.fsutils._get_mounts()\n for device, data in mounts.items():\n if device in self.IGNORE_MOUNTS:\n for mpt in data:\n ignored_fs.add(mpt['mount_point'])\n continue\n for mpt in data:\n if mpt['type'] in self.IGNORE_FS_TYPES:\n ignored_fs.add(mpt['mount_point'])\n ignored_all = list()\n for entry in sorted(list(ignored_fs)):\n valid = True\n for e_entry in ignored_all:\n if entry.startswith(e_entry):\n valid = False\n break\n if valid:\n ignored_all.append(entry)\n for ignored_dir in ignored_all:\n dir_obj = IgnoredDir()\n dir_obj.path = ignored_dir\n self.db.store(dir_obj)\n allowed = [elm for elm in kwargs.get(\"filter\", \"\").split(\",\") if elm]\n for allowed_dir in allowed:\n dir_obj = AllowedDir()\n dir_obj.path = allowed_dir\n self.db.store(dir_obj)\n return ignored_all", - "docstring": "Prepare full system scan by setting up the database etc." - }, - { - "code": "def _parse_coroutine(self):\n while True:\n d = yield\n if d == int2byte(0):\n pass\n elif d == IAC:\n d2 = yield\n if d2 == IAC:\n self.received_data(d2)\n elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):\n self.command_received(d2, None)\n elif d2 in (DO, DONT, WILL, WONT):\n d3 = yield\n self.command_received(d2, d3)\n elif d2 == SB:\n data = []\n while True:\n d3 = yield\n if d3 == IAC:\n d4 = yield\n if d4 == SE:\n break\n else:\n data.append(d4)\n else:\n data.append(d3)\n self.negotiate(b''.join(data))\n else:\n self.received_data(d)", - "docstring": "Parser state machine.\n Every 'yield' expression returns the next byte." - }, - { - "code": "def load_ia_module(cmd):\n try:\n if cmd in list(cmd_aliases.keys()) + list(cmd_aliases.values()):\n _module = 'internetarchive.cli.ia_{0}'.format(cmd)\n return __import__(_module, fromlist=['internetarchive.cli'])\n else:\n _module = 'ia_{0}'.format(cmd)\n for ep in iter_entry_points('internetarchive.cli.plugins'):\n if ep.name == _module:\n return ep.load()\n raise ImportError\n except (ImportError, DistributionNotFound):\n print(\"error: '{0}' is not an ia command! See 'ia help'\".format(cmd),\n file=sys.stderr)\n matches = '\\t'.join(difflib.get_close_matches(cmd, cmd_aliases.values()))\n if matches:\n print('\\nDid you mean one of these?\\n\\t{0}'.format(matches))\n sys.exit(127)", - "docstring": "Dynamically import ia module." - }, - { - "code": "def update_failover_dns_record(env, zone_id, **kwargs):\n client = boto3.Session(profile_name=env).client('route53')\n response = {}\n hosted_zone_info = client.get_hosted_zone(Id=zone_id)\n zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.')\n dns_name = kwargs.get('dns_name')\n failover_state = kwargs.get('failover_state')\n if failover_state.lower() != 'primary':\n primary_record = find_existing_record(env, zone_id, dns_name, check_key='Failover', check_value='PRIMARY')\n if not primary_record:\n raise PrimaryDNSRecordNotFound(\"Primary Failover DNS record not found: {}\".format(dns_name))\n if dns_name and dns_name.endswith(zone_name):\n dns_json = get_template(template_file='infrastructure/dns_failover_upsert.json.j2', **kwargs)\n LOG.info('Attempting to create DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name,\n kwargs['elb_aws_dns'], zone_id, zone_name)\n try:\n delete_existing_cname(env, zone_id, dns_name)\n response = client.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch=json.loads(dns_json), )\n LOG.info('Upserted DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'],\n zone_id, zone_name)\n except botocore.exceptions.ClientError as error:\n LOG.info('Error creating DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name,\n kwargs['elb_aws_dns'], zone_id, zone_name)\n LOG.debug(error)\n else:\n LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name)\n LOG.debug('Route53 JSON Response: \\n%s', pformat(response))", - "docstring": "Create a Failover Route53 alias record in _env_ zone.\n\n Args:\n env (str): Deployment environment.\n zone_id (str): Route53 zone id.\n\n Keyword Args:\n dns_name (str): FQDN of application's dns entry to add/update.\n dns_ttl (int): DNS time-to-live (ttl)\n elb_aws_dns (str): DNS A Record of ELB from AWS\n elb_dns_zone_id (str): Zone ID of ELB DNS\n failover_state (str): if the record is primary or secondary\n primary_region (str): Primary AWS region for DNS" - }, - { - "code": "def multimatch(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):\n raise NotImplementedError\n origin = origin if origin is None or isinstance(origin, set) else set([origin])\n rel = rel if rel is None or isinstance(rel, set) else set([rel])\n target = target if target is None or isinstance(target, set) else set([target])\n for index, curr_rel in enumerate(self._relationships):\n matches = True\n if origin and curr_rel[ORIGIN] not in origin:\n matches = False\n if rel and curr_rel[RELATIONSHIP] not in rel:\n matches = False\n if target and curr_rel[TARGET] not in target:\n matches = False\n if attrs:\n for k, v in attrs.items():\n if k not in curr_rel[ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:\n matches = False\n if matches:\n if include_ids:\n yield index, (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())\n else:\n yield (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())\n return", - "docstring": "Iterator over relationship IDs that match a pattern of components, with multiple options provided for each component\n\n origin - (optional) origin of the relationship (similar to an RDF subject), or set of values. If omitted any origin will be matched.\n rel - (optional) type IRI of the relationship (similar to an RDF predicate), or set of values. If omitted any relationship will be matched.\n target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object, or set of values. If omitted any target will be matched.\n attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).\n include_ids - If true include statement IDs with yield values" - }, - { - "code": "def clear_events(self, event_name):\n self.lock.acquire()\n try:\n q = self.get_event_q(event_name)\n q.queue.clear()\n except queue.Empty:\n return\n finally:\n self.lock.release()", - "docstring": "Clear all events of a particular name.\n\n Args:\n event_name: Name of the events to be popped." - }, - { - "code": "def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):\n if wave_min is not None:\n self.wave_min = wave_min\n if wave_max is not None:\n self.wave_max = wave_max\n raw_wave = self.raw[0]\n whr = np.logical_and(raw_wave * q.AA >= self.wave_min,\n raw_wave * q.AA <= self.wave_max)\n self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)\n self.throughput = self.raw[1][whr]\n print('Bandpass trimmed to',\n '{} - {}'.format(self.wave_min, self.wave_max))\n pts = len(self.wave)\n if isinstance(pixels_per_bin, int):\n self.pixels_per_bin = pixels_per_bin\n self.n_bins = int(pts/self.pixels_per_bin)\n elif isinstance(n_bins, int):\n self.n_bins = n_bins\n self.pixels_per_bin = int(pts/self.n_bins)\n else:\n raise ValueError(\"Please specify 'n_bins' OR 'pixels_per_bin' as integers.\")\n print('{} bins of {} pixels each.'.format(self.n_bins,\n self.pixels_per_bin))\n new_len = self.n_bins * self.pixels_per_bin\n start = (pts - new_len) // 2\n self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)\n self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)", - "docstring": "Break the filter up into bins and apply a throughput to each bin,\n useful for G141, G102, and other grisms\n\n Parameters\n ----------\n n_bins: int\n The number of bins to dice the throughput curve into\n pixels_per_bin: int (optional)\n The number of channels per bin, which will be used\n to calculate n_bins\n wave_min: astropy.units.quantity (optional)\n The minimum wavelength to use\n wave_max: astropy.units.quantity (optional)\n The maximum wavelength to use" - }, - { - "code": "def __distinguished_name(self, type, fname=None, lname=None,\n username=None):\n if username is None:\n uid = \"uid={}\".format(self.username)\n else:\n uid = \"uid={}\".format(username)\n dn_list = [\n uid,\n \"ou={}\".format(self.__organizational_unit(type)),\n self.client.basedn,\n ]\n return ','.join(dn_list)", - "docstring": "Assemble the DN of the user." - }, - { - "code": "def get_attached_instruments(\n self, expected: Dict[types.Mount, str])\\\n -> Dict[types.Mount, Dict[str, Optional[str]]]:\n to_return: Dict[types.Mount, Dict[str, Optional[str]]] = {}\n for mount in types.Mount:\n expected_instr = expected.get(mount, None)\n init_instr = self._attached_instruments.get(mount, {})\n found_model = init_instr.get('model', '')\n if expected_instr and found_model\\\n and not found_model.startswith(expected_instr):\n if self._strict_attached:\n raise RuntimeError(\n 'mount {}: expected instrument {} but got {}'\n .format(mount.name, expected_instr, init_instr))\n else:\n to_return[mount] = {\n 'model': find_config(expected_instr),\n 'id': None}\n elif found_model and expected_instr:\n to_return[mount] = init_instr\n elif found_model:\n to_return[mount] = init_instr\n elif expected_instr:\n to_return[mount] = {\n 'model': find_config(expected_instr),\n 'id': None}\n else:\n to_return[mount] = {\n 'model': None,\n 'id': None}\n return to_return", - "docstring": "Update the internal cache of attached instruments.\n\n This method allows after-init-time specification of attached simulated\n instruments. The method will return\n - the instruments specified at init-time, or if those do not exists,\n - the instruments specified in expected, or if that is not passed,\n - nothing\n\n :param expected: A mapping of mount to instrument model prefixes. When\n loading instruments from a prefix, we return the\n lexically-first model that matches the prefix. If the\n models specified in expected do not match the models\n specified in the `attached_instruments` argument of\n :py:meth:`__init__`, :py:attr:`RuntimeError` is\n raised.\n :raises RuntimeError: If an instrument is expected but not found.\n :returns: A dict of mount to either instrument model names or `None`." - }, - { - "code": "def spectra(self, alpha=None, nmax=None, convention='power', unit='per_l',\n base=10.):\n if alpha is None:\n if nmax is None:\n nmax = self.nmax\n spectra = _np.zeros((self.lmax+1, nmax))\n for iwin in range(nmax):\n coeffs = self.to_array(iwin)\n spectra[:, iwin] = _spectrum(coeffs, normalization='4pi',\n convention=convention, unit=unit,\n base=base)\n else:\n coeffs = self.to_array(alpha)\n spectra = _spectrum(coeffs, normalization='4pi',\n convention=convention, unit=unit, base=base)\n return spectra", - "docstring": "Return the spectra of one or more Slepian functions.\n\n Usage\n -----\n spectra = x.spectra([alpha, nmax, convention, unit, base])\n\n Returns\n -------\n spectra : ndarray, shape (lmax+1, nmax)\n A matrix with each column containing the spectrum of a Slepian\n function, and where the functions are arranged with increasing\n concentration factors. If alpha is set, only a single vector is\n returned, whereas if nmax is set, the first nmax spectra are\n returned.\n\n Parameters\n ----------\n alpha : int, optional, default = None\n The function number of the output spectrum, where alpha=0\n corresponds to the best concentrated Slepian function.\n nmax : int, optional, default = 1\n The number of best concentrated Slepian function power spectra\n to return.\n convention : str, optional, default = 'power'\n The type of spectrum to return: 'power' for power spectrum,\n 'energy' for energy spectrum, and 'l2norm' for the l2 norm\n spectrum.\n unit : str, optional, default = 'per_l'\n If 'per_l', return the total contribution to the spectrum for each\n spherical harmonic degree l. If 'per_lm', return the average\n contribution to the spectrum for each coefficient at spherical\n harmonic degree l. If 'per_dlogl', return the spectrum per log\n interval dlog_a(l).\n base : float, optional, default = 10.\n The logarithm base when calculating the 'per_dlogl' spectrum.\n\n Description\n -----------\n This function returns either the power spectrum, energy spectrum, or\n l2-norm spectrum of one or more of the Slepian funtions. Total power\n is defined as the integral of the function squared over all space,\n divided by the area the function spans. If the mean of the function is\n zero, this is equivalent to the variance of the function. The total\n energy is the integral of the function squared over all space and is\n 4pi times the total power. The l2-norm is the sum of the magnitude of\n the coefficients squared.\n\n The output spectrum can be expresed using one of three units. 'per_l'\n returns the contribution to the total spectrum from all angular orders\n at degree l. 'per_lm' returns the average contribution to the total\n spectrum from a single coefficient at degree l. The 'per_lm' spectrum\n is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns\n the contribution to the total spectrum from all angular orders over an\n infinitessimal logarithmic degree band. The contrubution in the band\n dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,\n and where spectrum(l, 'per_dlogl) is equal to\n spectrum(l, 'per_l')*l*log(a)." - }, - { - "code": "def fit(self, linear_predictor, event, time):\n risk_score = numpy.exp(linear_predictor)\n order = numpy.argsort(time, kind=\"mergesort\")\n risk_score = risk_score[order]\n uniq_times, n_events, n_at_risk = _compute_counts(event, time, order)\n divisor = numpy.empty(n_at_risk.shape, dtype=numpy.float_)\n value = numpy.sum(risk_score)\n divisor[0] = value\n k = 0\n for i in range(1, len(n_at_risk)):\n d = n_at_risk[i - 1] - n_at_risk[i]\n value -= risk_score[k:(k + d)].sum()\n k += d\n divisor[i] = value\n assert k == n_at_risk[0] - n_at_risk[-1]\n y = numpy.cumsum(n_events / divisor)\n self.cum_baseline_hazard_ = StepFunction(uniq_times, y)\n self.baseline_survival_ = StepFunction(self.cum_baseline_hazard_.x,\n numpy.exp(- self.cum_baseline_hazard_.y))\n return self", - "docstring": "Compute baseline cumulative hazard function.\n\n Parameters\n ----------\n linear_predictor : array-like, shape = (n_samples,)\n Linear predictor of risk: `X @ coef`.\n\n event : array-like, shape = (n_samples,)\n Contains binary event indicators.\n\n time : array-like, shape = (n_samples,)\n Contains event/censoring times.\n\n Returns\n -------\n self" - }, - { - "code": "def fetch(self, tickers, fields=None, date=None, date_from=None, date_to=None,\n freq='D', only_data=True, static=False):\n if static:\n query = self.construct_request(tickers, fields, date, freq='REP')\n else:\n query = self.construct_request(tickers, fields, date, date_from, date_to, freq)\n raw = self.request(query)\n if static:\n data, metadata = self.parse_record_static(raw)\n elif isinstance(tickers, basestring) or len(tickers) == 1:\n data, metadata = self.parse_record(raw)\n elif hasattr(tickers, '__len__'):\n metadata = pd.DataFrame()\n data = {}\n for indx in range(len(tickers)):\n dat, meta = self.parse_record(raw, indx)\n data[tickers[indx]] = dat\n metadata = metadata.append(meta, ignore_index=False)\n data = pd.concat(data)\n else:\n raise DatastreamException(('First argument should be either ticker or '\n 'list of tickers'))\n if only_data:\n return data\n else:\n return data, metadata", - "docstring": "Fetch data from TR DWE.\n\n tickers - ticker or list of tickers\n fields - list of fields.\n date - date for a single-date query\n date_from, date_to - date range (used only if \"date\" is not specified)\n freq - frequency of data: daily('D'), weekly('W') or monthly('M')\n only_data - if True then metadata will not be returned\n static - if True \"static\" request is created (i.e. not a series).\n In this case 'date_from', 'date_to' and 'freq' are ignored\n\n In case list of tickers is requested, a MultiIndex-dataframe is returned.\n\n Some of available fields:\n P - adjusted closing price\n PO - opening price\n PH - high price\n PL - low price\n VO - volume, which is expressed in 1000's of shares.\n UP - unadjusted price\n OI - open interest\n\n MV - market value\n EPS - earnings per share\n DI - dividend index\n MTVB - market to book value\n PTVB - price to book value\n ...\n\n The full list of data fields is available at http://dtg.tfn.com/." - }, - { - "code": "def phase_angle(ephemeris, body, t):\n earth = ephemeris['earth']\n sun = ephemeris['sun']\n body = ephemeris[body]\n pe = earth.at(t).observe(body)\n pe.position.au *= -1\n t2 = t.ts.tt_jd(t.tt - pe.light_time)\n ps = body.at(t2).observe(sun)\n return pe.separation_from(ps)", - "docstring": "Compute the phase angle of a body viewed from Earth.\n\n The ``body`` should be an integer or string that can be looked up in\n the given ``ephemeris``, which will also be asked to provide\n positions for the Earth and Sun. The return value will be an\n :class:`~skyfield.units.Angle` object." - }, - { - "code": "def detect_iter(self, det_iter, show_timer=False):\n num_images = det_iter._size\n if not isinstance(det_iter, mx.io.PrefetchingIter):\n det_iter = mx.io.PrefetchingIter(det_iter)\n start = timer()\n detections = self.mod.predict(det_iter).asnumpy()\n time_elapsed = timer() - start\n if show_timer:\n logging.info(\"Detection time for {} images: {:.4f} sec\".format(\n num_images, time_elapsed))\n result = Detector.filter_positive_detections(detections)\n return result", - "docstring": "detect all images in iterator\n\n Parameters:\n ----------\n det_iter : DetIter\n iterator for all testing images\n show_timer : Boolean\n whether to print out detection exec time\n\n Returns:\n ----------\n list of detection results" - }, - { - "code": "def _get_user(self, user):\n return ' '.join([user.username, user.first_name, user.last_name])", - "docstring": "Generate user filtering tokens." - }, - { - "code": "def text(el, strip=True):\n if not el:\n return \"\"\n text = el.text\n if strip:\n text = text.strip()\n return text", - "docstring": "Return the text of a ``BeautifulSoup`` element" - }, - { - "code": "def get_service_packages(self):\n api = self._get_api(billing.DefaultApi)\n package_response = api.get_service_packages()\n packages = []\n for state in PACKAGE_STATES:\n items = getattr(package_response, state) or []\n for item in ensure_listable(items):\n params = item.to_dict()\n params['state'] = state\n packages.append(ServicePackage(params))\n return packages", - "docstring": "Get all service packages" - }, - { - "code": "def load(self):\n if VERBOSE_PREF:\n print('[pref.load()]')\n fpath = self.get_fpath()\n try:\n with open(fpath, 'rb') as f:\n if VERBOSE_PREF:\n print('load: %r' % fpath)\n pref_dict = pickle.load(f)\n except EOFError as ex1:\n util_dbg.printex(ex1, 'did not load pref fpath=%r correctly' % fpath, iswarning=True)\n raise\n except ImportError as ex2:\n util_dbg.printex(ex2, 'did not load pref fpath=%r correctly' % fpath, iswarning=True)\n raise\n if not util_type.is_dict(pref_dict):\n raise Exception('Preference file is corrupted')\n self.add_dict(pref_dict)\n return True", - "docstring": "Read pref dict stored on disk. Overwriting current values." - }, - { - "code": "def quantile_for_single_value(self, **kwargs):\n if self._is_transposed:\n kwargs[\"axis\"] = kwargs.get(\"axis\", 0) ^ 1\n return self.transpose().quantile_for_single_value(**kwargs)\n axis = kwargs.get(\"axis\", 0)\n q = kwargs.get(\"q\", 0.5)\n assert type(q) is float\n def quantile_builder(df, **kwargs):\n try:\n return pandas.DataFrame.quantile(df, **kwargs)\n except ValueError:\n return pandas.Series()\n func = self._build_mapreduce_func(quantile_builder, **kwargs)\n result = self._full_axis_reduce(axis, func)\n if axis == 0:\n result.index = [q]\n else:\n result.columns = [q]\n return result", - "docstring": "Returns quantile of each column or row.\n\n Returns:\n A new QueryCompiler object containing the quantile of each column or row." - }, - { - "code": "def results(self):\n \"Returns a dict of outputs from the GPTask execution.\"\n if self._results is None:\n results = self._json_struct['results']\n def result_iterator():\n for result in results:\n datatype = None\n conversion = None\n for param in self.parent.parameters:\n if param['name'] == result['paramName']:\n datatype = param['datatype']\n if datatype is None:\n conversion = str\n else:\n conversion = datatype.fromJson\n dt = result['paramName']\n val = conversion(result['value'])\n yield (dt, val)\n self._results = dict(res for res in result_iterator())\n return self._results", - "docstring": "Returns a dict of outputs from the GPTask execution." - }, - { - "code": "def build_geometry_by_stop(\n feed: \"Feed\",\n stop_ids: Optional[List[str]] = None,\n *,\n use_utm: bool = False,\n) -> Dict:\n d = {}\n stops = feed.stops.copy()\n if stop_ids is not None:\n stops = stops[stops[\"stop_id\"].isin(stop_ids)]\n stops = stops[stops.stop_lat.notna() & stops.stop_lon.notna()]\n if use_utm:\n for stop, group in stops.groupby(\"stop_id\"):\n lat, lon = group[[\"stop_lat\", \"stop_lon\"]].values[0]\n d[stop] = sg.Point(utm.from_latlon(lat, lon)[:2])\n else:\n for stop, group in stops.groupby(\"stop_id\"):\n lat, lon = group[[\"stop_lat\", \"stop_lon\"]].values[0]\n d[stop] = sg.Point([lon, lat])\n return d", - "docstring": "Return a dictionary with the structure\n stop_id -> Shapely Point with coordinates of the stop.\n\n Parameters\n ----------\n feed : Feed\n use_utm : boolean\n If ``True``, then return each point in UTM coordinates\n appropriate to the region; otherwise use the default WGS84\n coordinates\n stop_ids : list\n Stop IDs (strings) from ``feed.stops`` to restrict output to\n\n Returns\n -------\n dictionary\n Each key is a stop ID and each value is a Shapely Point with\n coordinates of the stop\n\n Notes\n -----\n Assume the following feed attributes are not ``None``:\n\n - ``feed.stops``" - }, - { - "code": "def get_body(name):\n try:\n body, propag = _bodies[name.lower()]\n body.propagate = propag.propagate\n except KeyError as e:\n raise UnknownBodyError(e.args[0])\n return body", - "docstring": "Retrieve a given body orbits and parameters\n\n Args:\n name (str): Object name\n Return:\n Body:" - }, - { - "code": "def default(self, value):\n if isinstance(value, messages.Enum):\n return str(value)\n if six.PY3 and isinstance(value, bytes):\n return value.decode('utf8')\n if isinstance(value, messages.Message):\n result = {}\n for field in value.all_fields():\n item = value.get_assigned_value(field.name)\n if item not in (None, [], ()):\n result[field.name] = (\n self.__protojson_protocol.encode_field(field, item))\n for unknown_key in value.all_unrecognized_fields():\n unrecognized_field, _ = value.get_unrecognized_field_info(\n unknown_key)\n result[unknown_key] = unrecognized_field\n return result\n return super(MessageJSONEncoder, self).default(value)", - "docstring": "Return dictionary instance from a message object.\n\n Args:\n value: Value to get dictionary for. If not encodable, will\n call superclasses default method." - }, - { - "code": "def append(self, key, samples, sampling_rate):\n if not np.issubdtype(samples.dtype, np.floating):\n raise ValueError('Samples are required as np.float32!')\n if len(samples.shape) > 1:\n raise ValueError('Only single channel supported!')\n existing = self.get(key, mem_map=True)\n samples = (samples * MAX_INT16_VALUE).astype(np.int16)\n if existing is not None:\n existing_samples, existing_sr = existing\n if existing_sr != sampling_rate:\n raise ValueError('Different sampling-rate than existing data!')\n num_existing = existing_samples.shape[0]\n self._file[key].resize(num_existing + samples.shape[0], 0)\n self._file[key][num_existing:] = samples\n else:\n dset = self._file.create_dataset(key, data=samples,\n chunks=True, maxshape=(None,))\n dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate", - "docstring": "Append the given samples to the data that already exists\n in the container for the given key.\n\n Args:\n key (str): A key to store the data for.\n samples (numpy.ndarray): 1-D array of audio samples (int-16).\n sampling_rate (int): The sampling-rate of the audio samples.\n\n Note:\n The container has to be opened in advance.\n For appending to existing data the HDF5-Dataset has to be chunked,\n so it is not allowed to first add data via ``set``." - }, - { - "code": "def _get_compositor_prereqs(self, parent, prereq_names, skip=False,\n **dfilter):\n prereq_ids = []\n unknowns = set()\n for prereq in prereq_names:\n n, u = self._find_dependencies(prereq, **dfilter)\n if u:\n unknowns.update(u)\n if skip:\n u_str = \", \".join([str(x) for x in u])\n LOG.debug('Skipping optional %s: Unknown dataset %s',\n str(prereq), u_str)\n else:\n prereq_ids.append(n)\n self.add_child(parent, n)\n return prereq_ids, unknowns", - "docstring": "Determine prerequisite Nodes for a composite.\n\n Args:\n parent (Node): Compositor node to add these prerequisites under\n prereq_names (sequence): Strings (names), floats (wavelengths), or\n DatasetIDs to analyze.\n skip (bool, optional): If True, prerequisites are considered\n optional if they can't be found and a\n debug message is logged. If False (default),\n the missing prerequisites are not logged\n and are expected to be handled by the\n caller." - }, - { - "code": "def generate_transpose(node_name, in_name, out_name, axes, base_name, func_counter):\n trans = nnabla_pb2.Function()\n trans.type = \"Transpose\"\n set_function_name(trans, node_name, base_name, func_counter)\n trans.input.extend([in_name])\n trans.output.extend([out_name])\n tp = trans.transpose_param\n tp.axes.extend(axes)\n return trans", - "docstring": "Generate a Transpose operator to transpose the specified buffer." - }, - { - "code": "def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None):\n if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace,\n schema_agreement_wait=max_schema_agreement_wait, force=True):\n raise DriverException(\"Keyspace metadata was not refreshed. See log for details.\")", - "docstring": "Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication\n and durability settings. It does not refresh tables, types, etc. contained in the keyspace.\n\n See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior" - }, - { - "code": "def compute_files_to_download(client_hashes, server_hashes):\n to_dload, to_delete = [], []\n for filename in server_hashes:\n if filename not in client_hashes:\n to_dload.append(filename)\n continue\n if client_hashes[filename] != server_hashes[filename]:\n to_dload.append(filename)\n for filename in client_hashes:\n if filename not in server_hashes:\n to_delete.append(filename)\n return [to_dload, to_delete]", - "docstring": "Given a dictionary of file hashes from the client and the\n server, specify which files should be downloaded from the server\n\n :param client_hashes: a dictionary where the filenames are keys and the\n values are md5 hashes as strings\n :param server_hashes: a dictionary where the filenames are keys and the\n values are md5 hashes as strings\n :return: a list of 2 lists -> [to_dload, to_delete]\n to_dload- a list of filenames to get from the server\n to_delete- a list of filenames to delete from the folder\n\n Note: we will get a file from the server if a) it is not on the\n client or b) the md5 differs between the client and server\n\n Note: we will mark a file for deletion if it is not available on\n the server" - }, - { - "code": "def _create_worker(self, worker):\n worker.sig_started.connect(self._start)\n self._workers.append(worker)", - "docstring": "Common worker setup." - }, - { - "code": "def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):\n q_samples = np.array(q_samples)\n if not (q_samples.shape[0] == self.samples_int and\n q_samples.shape[1] == self.samples_prob):\n raise ValueError('Shape of q_samples should be [M_int, M_prob]')\n if grad_samples is not None:\n grad_samples = np.array(grad_samples)\n if not (grad_samples.shape[0] == self.samples_int and\n grad_samples.shape[1] == self.samples_prob):\n raise ValueError(\n)\n if method is None:\n method = self.method\n if method.lower() == 'empirical':\n return self._evalMetricEmpirical(q_samples, grad_samples)\n elif method.lower() == 'kernel':\n return self._evalMetricKernel(q_samples, grad_samples)\n else:\n raise ValueError('Unsupported metric evalation method')", - "docstring": "Evaluates the horsetail matching metric from given samples of the quantity\n of interest and gradient instead of evaluating them at a design.\n\n :param np.ndarray q_samples: samples of the quantity of interest,\n size (M_int, M_prob)\n :param np.ndarray grad_samples: samples of the gradien,\n size (M_int, M_prob, n_x)\n\n :return: metric_value - value of the metric\n\n :rtype: float" - }, - { - "code": "def _run_task_internal(self, task):\n hosts = self._list_available_hosts()\n self.inventory.restrict_to(hosts)\n runner = cirruscluster.ext.ansible.runner.Runner(\n pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,\n module_args=task.module_args, forks=self.forks,\n remote_pass=self.remote_pass, module_path=self.module_path,\n timeout=self.timeout, remote_user=task.play.remote_user,\n remote_port=task.play.remote_port, module_vars=task.module_vars,\n private_key_file=self.private_key_file,\n private_key=self.private_key,\n setup_cache=self.SETUP_CACHE, basedir=task.play.basedir,\n conditional=task.only_if, callbacks=self.runner_callbacks,\n sudo=task.sudo, sudo_user=task.sudo_user,\n transport=task.transport, sudo_pass=task.sudo_pass, is_playbook=True\n )\n if task.async_seconds == 0:\n results = runner.run()\n else:\n results, poller = runner.run_async(task.async_seconds)\n self.stats.compute(results)\n if task.async_poll_interval > 0:\n results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)\n contacted = results.get('contacted',{})\n dark = results.get('dark', {})\n self.inventory.lift_restriction()\n if len(contacted.keys()) == 0 and len(dark.keys()) == 0:\n return None\n return results", - "docstring": "run a particular module step in a playbook" - }, - { - "code": "def drawSquiggle(self, p1, p2, breadth = 2):\n p1 = Point(p1)\n p2 = Point(p2)\n S = p2 - p1\n rad = abs(S)\n cnt = 4 * int(round(rad / (4 * breadth), 0))\n if cnt < 4:\n raise ValueError(\"points too close\")\n mb = rad / cnt\n matrix = TOOLS._hor_matrix(p1, p2)\n i_mat = ~matrix\n k = 2.4142135623765633\n points = []\n for i in range (1, cnt): \n if i % 4 == 1:\n p = Point(i, -k) * mb\n elif i % 4 == 3:\n p = Point(i, k) * mb\n else:\n p = Point(i, 0) * mb\n points.append(p * i_mat)\n points = [p1] + points + [p2]\n cnt = len(points)\n i = 0\n while i + 2 < cnt:\n self.drawCurve(points[i], points[i+1], points[i+2])\n i += 2\n return p2", - "docstring": "Draw a squiggly line from p1 to p2." - }, - { - "code": "def get_asset(self):\n import predix.data.asset\n asset = predix.data.asset.Asset()\n return asset", - "docstring": "Returns an instance of the Asset Service." - }, - { - "code": "def _scalar_coef_op_right(func):\r\n @wraps(func)\r\n def verif(self, scoef):\r\n if isinstance(scoef, numbers.Number):\r\n return ScalarCoefs(func(self, self._vec, scoef),\r\n self.nmax, self.mmax)\r\n else:\r\n raise TypeError(err_msg['no_combi_SC'])\r\n return verif", - "docstring": "decorator for operator overloading when ScalarCoef is on the\r\n right" - }, - { - "code": "def get(self, key):\n key = self._service_key(key)\n return self._service_ops['get'](key)", - "docstring": "Return the object in `service` named by `key` or None.\n\n Args:\n key: Key naming the object to retrieve.\n\n Returns:\n object or None" - }, - { - "code": "def teams(self):\n teams = self._teamlist.teams()\n current_teams = set(self._teamobjects.keys())\n new_teams = set(teams.keys())\n added = new_teams - current_teams\n removed = current_teams - new_teams\n for team in removed:\n del self._teamobjects[team]\n for team in added:\n self._teamobjects[team] = GitHubTeam(\n self._api, self._env, self._org, teams[team], team)\n return self._teamobjects.values()", - "docstring": "Return a sequence of `GitHubTeam` objects, one for each team in this\n org." - }, - { - "code": "def handle_initialize(self, data):\n self.tuner.update_search_space(data)\n send(CommandType.Initialized, '')\n return True", - "docstring": "data is search space" - }, - { - "code": "def setup_addon_register(self, harpoon):\n self.addon_getter = AddonGetter()\n self.addon_getter.add_namespace(\"harpoon.crosshairs\", Result.FieldSpec(), Addon.FieldSpec())\n register = Register(self.addon_getter, self)\n if \"addons\" in harpoon:\n addons = harpoon[\"addons\"]\n if type(addons) in (MergedOptions, dict) or getattr(addons, \"is_dict\", False):\n spec = sb.dictof(sb.string_spec(), sb.listof(sb.string_spec()))\n meta = Meta(harpoon, []).at(\"addons\")\n for namespace, adns in spec.normalise(meta, addons).items():\n register.add_pairs(*[(namespace, adn) for adn in adns])\n register.recursive_import_known()\n register.recursive_resolve_imported()\n return register", - "docstring": "Setup our addon register" - }, - { - "code": "def to_toml(value, pretty=False):\n if not toml:\n raise NotImplementedError('No supported TOML library available')\n return toml.dumps(make_toml_friendly(value)).rstrip()", - "docstring": "Serializes the given value to TOML.\n\n :param value: the value to serialize\n :param pretty:\n this argument is ignored, as no TOML libraries support this type of\n operation\n :type pretty: bool\n :rtype: str" - }, - { - "code": "def power_on(env, identifier):\n mgr = SoftLayer.HardwareManager(env.client)\n hw_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'hardware')\n env.client['Hardware_Server'].powerOn(id=hw_id)", - "docstring": "Power on a server." - }, - { - "code": "def bethe_find_crystalfield(populations, hopping):\n zero = lambda orb: [bethe_filling_zeroT(-em, tz) - pop \\\n for em, tz, pop in zip(orb, hopping, populations)]\n return fsolve(zero, np.zeros(len(populations)))", - "docstring": "Return the orbital energies to have the system populates as\n desired by the given individual populations" - }, - { - "code": "def validate(self):\n if self.access_token is None:\n raise ConfigurationError('No access token provided. '\n 'Set your access token during client initialization using: '\n '\"basecrm.Client(access_token= )\"')\n if re.search(r'\\s', self.access_token):\n raise ConfigurationError('Provided access token is invalid '\n 'as it contains disallowed characters. '\n 'Please double-check you access token.')\n if len(self.access_token) != 64:\n raise ConfigurationError('Provided access token is invalid '\n 'as it has invalid length. '\n 'Please double-check your access token.')\n if not self.base_url or not re.match(self.URL_REGEXP, self.base_url):\n raise ConfigurationError('Provided base url is invalid '\n 'as it not a valid URI. '\n 'Please make sure it incldues the schema part, '\n 'both http and https are accepted, '\n 'and the hierarchical part')\n return True", - "docstring": "Validates whether a configuration is valid.\n\n :rtype: bool\n :raises ConfigurationError: if no ``access_token`` provided.\n :raises ConfigurationError: if provided ``access_token`` is invalid - contains disallowed characters.\n :raises ConfigurationError: if provided ``access_token`` is invalid - has invalid length.\n :raises ConfigurationError: if provided ``base_url`` is invalid." - }, - { - "code": "def noisy_operation(self, operation: 'cirq.Operation') -> 'cirq.OP_TREE':\n if not hasattr(self.noisy_moments, '_not_overridden'):\n return self.noisy_moments([ops.Moment([operation])],\n operation.qubits)\n if not hasattr(self.noisy_moment, '_not_overridden'):\n return self.noisy_moment(ops.Moment([operation]), operation.qubits)\n assert False, 'Should be unreachable.'", - "docstring": "Adds noise to an individual operation.\n\n Args:\n operation: The operation to make noisy.\n\n Returns:\n An OP_TREE corresponding to the noisy operations implementing the\n noisy version of the given operation." - }, - { - "code": "def create(self, port, value, timestamp=None):\n session = self._session\n datapoint_class = self._datapoint_class\n attributes = {\n 'port': port,\n 'value': value,\n }\n if timestamp is not None:\n attributes['timestamp'] = to_iso_date(timestamp)\n attributes = build_request_body('data-point', None,\n attributes=attributes)\n def _process(json):\n data = json.get('data')\n return datapoint_class(data, session)\n return session.post(self._base_url, CB.json(201, _process),\n json=attributes)", - "docstring": "Post a new reading to a timeseries.\n\n A reading is comprised of a `port`, a `value` and a timestamp.\n\n A port is like a tag for the given reading and gives an\n indication of the meaning of the value.\n\n The value of the reading can be any valid json value.\n\n The timestamp is considered the time the reading was taken, as\n opposed to the `created` time of the data-point which\n represents when the data-point was stored in the Helium\n API. If the timestamp is not given the server will construct a\n timestemp upon receiving the new reading.\n\n Args:\n\n port(string): The port to use for the new data-point\n value: The value for the new data-point\n\n Keyword Args:\n\n timestamp(:class:`datetime`): An optional :class:`datetime` object" - }, - { - "code": "def set_prefix(self, elt, pyobj):\n if isinstance(pyobj, tuple):\n namespaceURI,localName = pyobj\n self.prefix = elt.getPrefix(namespaceURI)", - "docstring": "use this method to set the prefix of the QName,\n method looks in DOM to find prefix or set new prefix.\n This method must be called before get_formatted_content." - }, - { - "code": "def write_yara(self, output_file):\n fout = open(output_file, 'wb')\n fout.write('\\n')\n for iocid in self.yara_signatures:\n signature = self.yara_signatures[iocid]\n fout.write(signature)\n fout.write('\\n')\n fout.close()\n return True", - "docstring": "Write out yara signatures to a file." - }, - { - "code": "def lock_exists_in_either_channel_side(\n channel_state: NettingChannelState,\n secrethash: SecretHash,\n) -> bool:\n lock = get_lock(channel_state.our_state, secrethash)\n if not lock:\n lock = get_lock(channel_state.partner_state, secrethash)\n return lock is not None", - "docstring": "Check if the lock with `secrethash` exists in either our state or the partner's state" - }, - { - "code": "def decompress_messages(self, partitions_offmsgs):\n for pomsg in partitions_offmsgs:\n if pomsg['message']:\n pomsg['message'] = self.decompress_fun(pomsg['message'])\n yield pomsg", - "docstring": "Decompress pre-defined compressed fields for each message." - }, - { - "code": "def _parse_volumes(volume_values: dict) -> str:\n for v_values in volume_values:\n for v_key, v_value in v_values.items():\n if v_key == 'source':\n if v_value == '.':\n source = os.path.dirname(\n os.path.abspath(__file__))\n else:\n source = v_value\n if v_key == 'target':\n target = v_value\n volume_spec = [source + ':' + target]\n return volume_spec", - "docstring": "Parse volumes key.\n\n Args:\n volume_values (dict): volume configuration values\n\n Returns:\n string, volume specification with mount source and container path" - }, - { - "code": "def clone_to_path(gh_token, folder, sdk_git_id, branch_or_commit=None, *, pr_number=None):\n _LOGGER.info(\"Clone SDK repository %s\", sdk_git_id)\n url_parsing = urlsplit(sdk_git_id)\n sdk_git_id = url_parsing.path\n if sdk_git_id.startswith(\"/\"):\n sdk_git_id = sdk_git_id[1:]\n credentials_part = ''\n if gh_token:\n login = user_from_token(gh_token).login\n credentials_part = '{user}:{token}@'.format(\n user=login,\n token=gh_token\n )\n else:\n _LOGGER.warning('Will clone the repo without writing credentials')\n https_authenticated_url = 'https://{credentials}github.com/{sdk_git_id}.git'.format(\n credentials=credentials_part,\n sdk_git_id=sdk_git_id\n )\n _git_clone_to_path(https_authenticated_url, folder)\n if pr_number:\n try:\n checkout_with_fetch(folder, \"pull/{}/merge\".format(pr_number))\n return\n except Exception:\n pass\n checkout_with_fetch(folder, \"pull/{}/head\".format(pr_number))\n if branch_or_commit:\n repo = Repo(str(folder))\n repo.git.checkout(branch_or_commit)", - "docstring": "Clone the given repo_id to the folder.\n\n If PR number is specified fetch the magic branches\n pull//head or pull//merge from Github. \"merge\" is tried first, and fallback to \"head\".\n Beware that pr_number implies detached head, and then no push is possible.\n\n If branch is specified, checkout this branch or commit finally.\n\n :param str branch_or_commit: If specified, switch to this branch/commit.\n :param int pr_number: PR number." - }, - { - "code": "def editTileService(self,\n serviceDefinition=None,\n minScale=None,\n maxScale=None,\n sourceItemId=None,\n exportTilesAllowed=False,\n maxExportTileCount=100000):\n params = {\n \"f\" : \"json\",\n }\n if not serviceDefinition is None:\n params[\"serviceDefinition\"] = serviceDefinition\n if not minScale is None:\n params['minScale'] = float(minScale)\n if not maxScale is None:\n params['maxScale'] = float(maxScale)\n if not sourceItemId is None:\n params[\"sourceItemId\"] = sourceItemId\n if not exportTilesAllowed is None:\n params[\"exportTilesAllowed\"] = exportTilesAllowed\n if not maxExportTileCount is None:\n params[\"maxExportTileCount\"] = int(maxExportTileCount)\n url = self._url + \"/edit\"\n return self._post(url=url,\n param_dict=params,\n securityHandler=self._securityHandler,\n proxy_url=self._securityHandler.proxy_url,\n proxy_port=self._securityHandler.proxy_port)", - "docstring": "This post operation updates a Tile Service's properties\n\n Inputs:\n serviceDefinition - updates a service definition\n minScale - sets the services minimum scale for caching\n maxScale - sets the service's maximum scale for caching\n sourceItemId - The Source Item ID is the GeoWarehouse Item ID of the map service\n exportTilesAllowed - sets the value to let users export tiles\n maxExportTileCount - sets the maximum amount of tiles to be exported\n from a single call." - }, - { - "code": "def _expand_pattern_lists(pattern, **mappings):\n expanded_patterns = []\n f = string.Formatter()\n for (_, field_name, _, _) in f.parse(pattern):\n if field_name is None:\n continue\n (value, _) = f.get_field(field_name, None, mappings)\n if isinstance(value, list):\n token = '{{{0}}}'.format(field_name)\n expanded = [pattern.replace(token, six.text_type(elem)) for elem in value]\n for expanded_item in expanded:\n result = _expand_pattern_lists(expanded_item, **mappings)\n expanded_patterns += result\n return expanded_patterns\n return [pattern]", - "docstring": "Expands the pattern for any list-valued mappings, such that for any list of\n length N in the mappings present in the pattern, N copies of the pattern are\n returned, each with an element of the list substituted.\n\n pattern:\n A pattern to expand, for example ``by-role/{grains[roles]}``\n\n mappings:\n A dictionary of variables that can be expanded into the pattern.\n\n Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains\n\n .. code-block:: yaml\n\n grains:\n roles:\n - web\n - database\n\n This function will expand into two patterns,\n ``[by-role/web, by-role/database]``.\n\n Note that this method does not expand any non-list patterns." - }, - { - "code": "def wait(objects, count=None, timeout=None):\n for obj in objects:\n if not hasattr(obj, 'add_done_callback'):\n raise TypeError('Expecting sequence of waitable objects')\n if count is None:\n count = len(objects)\n if count < 0 or count > len(objects):\n raise ValueError('count must be between 0 and len(objects)')\n if count == 0:\n return [], objects\n pending = list(objects)\n done = []\n try:\n for obj in _wait(pending, timeout):\n done.append(obj)\n if len(done) == count:\n break\n except Timeout:\n pass\n return done, list(filter(bool, pending))", - "docstring": "Wait for one or more waitable objects.\n\n This method waits until *count* elements from the sequence of waitable\n objects *objects* have become ready. If *count* is ``None`` (the default),\n then wait for all objects to become ready.\n\n What \"ready\" is means depends on the object type. A waitable object is a\n objects that implements the ``add_done_callback()`` and\n ``remove_done_callback`` methods. This currently includes:\n\n * :class:`~gruvi.Event` - an event is ready when its internal flag is set.\n * :class:`~gruvi.Future` - a future is ready when its result is set.\n * :class:`~gruvi.Fiber` - a fiber is ready when has terminated.\n * :class:`~gruvi.Process` - a process is ready when the child has exited." - }, - { - "code": "def _dom_class(self, obj1, obj2):\n if isinstance(obj1, Double) or isinstance(obj2, Double):\n return Double\n if isinstance(obj1, Float) or isinstance(obj2, Float):\n return Float", - "docstring": "Return the dominating numeric class between the two\n\n :obj1: TODO\n :obj2: TODO\n :returns: TODO" - }, - { - "code": "def validate(option, value):\n lower = option.lower()\n validator = VALIDATORS.get(lower, raise_config_error)\n value = validator(option, value)\n return lower, value", - "docstring": "Generic validation function." - }, - { - "code": "def npar(self):\n self.control_data.npar = self.parameter_data.shape[0]\n return self.control_data.npar", - "docstring": "get number of parameters\n\n Returns\n -------\n npar : int\n the number of parameters" - }, - { - "code": "def facilityNetToMs():\n a = TpPd(pd=0x3)\n b = MessageType(mesType=0x3a)\n c = Facility()\n packet = a / b / c\n return packet", - "docstring": "FACILITY Section 9.3.9.1" - }, - { - "code": "def _wrap_with(color_code):\n def inner(text, bold=False):\n code = color_code\n if bold:\n code = flo(\"1;{code}\")\n return flo('\\033[{code}m{text}\\033[0m')\n return inner", - "docstring": "Color wrapper.\n\n Example:\n >>> blue = _wrap_with('34')\n >>> print(blue('text'))\n \\033[34mtext\\033[0m" - }, - { - "code": "def expression(value):\n if isinstance(value, Expression):\n return Expression(value._type, value._value)\n if hasattr(value, 'spl_json'):\n sj = value.spl_json()\n return Expression(sj['type'], sj['value'])\n return Expression('splexpr', value)", - "docstring": "Create an SPL expression.\n\n Args:\n value: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value.\n\n Returns:\n Expression: SPL expression from `value`." - }, - { - "code": "def utc_datetime_and_leap_second(self):\n year, month, day, hour, minute, second = self._utc_tuple(\n _half_millisecond)\n second, fraction = divmod(second, 1.0)\n second = second.astype(int)\n leap_second = second // 60\n second -= leap_second\n milli = (fraction * 1000).astype(int) * 1000\n if self.shape:\n utcs = [utc] * self.shape[0]\n argsets = zip(year, month, day, hour, minute, second, milli, utcs)\n dt = array([datetime(*args) for args in argsets])\n else:\n dt = datetime(year, month, day, hour, minute, second, milli, utc)\n return dt, leap_second", - "docstring": "Convert to a Python ``datetime`` in UTC, plus a leap second value.\n\n Convert this time to a `datetime`_ object and a leap second::\n\n dt, leap_second = t.utc_datetime_and_leap_second()\n\n If the third-party `pytz`_ package is available, then its\n ``utc`` timezone will be used as the timezone of the return\n value. Otherwise, Skyfield uses its own ``utc`` timezone.\n\n The leap second value is provided because a Python ``datetime``\n can only number seconds ``0`` through ``59``, but leap seconds\n have a designation of at least ``60``. The leap second return\n value will normally be ``0``, but will instead be ``1`` if the\n date and time are a UTC leap second. Add the leap second value\n to the ``second`` field of the ``datetime`` to learn the real\n name of the second.\n\n If this time is an array, then an array of ``datetime`` objects\n and an array of leap second integers is returned, instead of a\n single value each." - }, - { - "code": "def cmu_mocap_49_balance(data_set='cmu_mocap'):\n train_motions = ['18', '19']\n test_motions = ['20']\n data = cmu_mocap('49', train_motions, test_motions, sample_every=4, data_set=data_set)\n data['info'] = \"One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of \" + data['info']\n return data", - "docstring": "Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009." - }, - { - "code": "async def keepalive_ping(self) -> None:\n if self.ping_interval is None:\n return\n try:\n while True:\n await asyncio.sleep(self.ping_interval, loop=self.loop)\n ping_waiter = await self.ping()\n if self.ping_timeout is not None:\n try:\n await asyncio.wait_for(\n ping_waiter, self.ping_timeout, loop=self.loop\n )\n except asyncio.TimeoutError:\n logger.debug(\"%s ! timed out waiting for pong\", self.side)\n self.fail_connection(1011)\n break\n except asyncio.CancelledError:\n raise\n except Exception:\n logger.warning(\"Unexpected exception in keepalive ping task\", exc_info=True)", - "docstring": "Send a Ping frame and wait for a Pong frame at regular intervals.\n\n This coroutine exits when the connection terminates and one of the\n following happens:\n - :meth:`ping` raises :exc:`ConnectionClosed`, or\n - :meth:`close_connection` cancels :attr:`keepalive_ping_task`." - }, - { - "code": "def visit_listcomp(self, node, parent):\n newnode = nodes.ListComp(node.lineno, node.col_offset, parent)\n newnode.postinit(\n self.visit(node.elt, newnode),\n [self.visit(child, newnode) for child in node.generators],\n )\n return newnode", - "docstring": "visit a ListComp node by returning a fresh instance of it" - }, - { - "code": "def uninstall(self):\n if self.is_installed():\n installed = self.installed_dir()\n if installed.is_symlink():\n installed.unlink()\n else:\n shutil.rmtree(str(installed))", - "docstring": "Delete code inside NApp directory, if existent." - }, - { - "code": "def run_script_from_macro(self, args):\n self.__macroArgs = args[\"args\"].split(',')\n try:\n self.run_script(args[\"name\"])\n except Exception as e:\n self.set_return_value(\"{ERROR: %s}\" % str(e))", - "docstring": "Used internally by AutoKey for phrase macros" - }, - { - "code": "def colorize(string, stack):\n codes = optimize(stack)\n if len(codes):\n prefix = SEQ % ';'.join(map(str, codes))\n suffix = SEQ % STYLE.reset\n return prefix + string + suffix\n else:\n return string", - "docstring": "Apply optimal ANSI escape sequences to the string." - }, - { - "code": "def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout):\n raise NotImplementedError(\"SysInterface.dispatch_job_hook\")", - "docstring": "Hook to dispatch a single job" - }, - { - "code": "def _generate_read_callable(name, display_name, arguments, regex, doc, supported):\n def f(self, *args, **kwargs):\n url = self._generate_url(regex, args)\n if 'params' in kwargs:\n url += \"?\" + urllib.urlencode(kwargs['params'])\n return self._get_data(url, accept=(kwargs.get('accept')))\n f.__name__ = str('read_%s' % name)\n f.__doc__ = doc\n f._resource_uri = regex\n f._get_args = arguments\n f._put_or_post_args = None \n f.resource_name = display_name\n f.is_api_call = True\n f.is_supported_api = supported\n return f", - "docstring": "Returns a callable which conjures the URL for the resource and GETs a response" - }, - { - "code": "def get_gradebook_admin_session(self, proxy):\n if not self.supports_gradebook_admin():\n raise errors.Unimplemented()\n return sessions.GradebookAdminSession(proxy=proxy, runtime=self._runtime)", - "docstring": "Gets the OsidSession associated with the gradebook administration service.\n\n arg: proxy (osid.proxy.Proxy): a proxy\n return: (osid.grading.GradebookAdminSession) - a\n ``GradebookAdminSession``\n raise: NullArgument - ``proxy`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: Unimplemented - ``supports_gradebook_admin() is false``\n *compliance: optional -- This method must be implemented if\n ``supports_gradebook_admin()`` is true.*" - }, - { - "code": "def private_config_content(self, private_config):\n try:\n private_config_path = os.path.join(self.working_dir, \"private-config.cfg\")\n if private_config is None:\n private_config = ''\n if len(private_config) == 0 and os.path.exists(private_config_path):\n return\n with open(private_config_path, 'w+', encoding='utf-8') as f:\n if len(private_config) == 0:\n f.write('')\n else:\n private_config = private_config.replace(\"%h\", self._name)\n f.write(private_config)\n except OSError as e:\n raise IOUError(\"Can't write private-config file '{}': {}\".format(private_config_path, e))", - "docstring": "Update the private config\n\n :param private_config: content of the private configuration file" - }, - { - "code": "def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,\n knowledge_base):\n _ = stderr, time_taken, args, knowledge_base\n self.CheckReturn(cmd, return_val)\n result = rdf_protodict.AttributedDict()\n for entry in self._field_parser.ParseEntries(stdout):\n line_str = \" \".join(entry)\n mount_rslt = self.mount_re.match(line_str)\n if mount_rslt:\n device, mount_point, fs_type, option_str = mount_rslt.groups()\n result = rdf_client_fs.Filesystem()\n result.device = device\n result.mount_point = mount_point\n result.type = fs_type\n options = KeyValueParser(term=\",\").ParseToOrderedDict(option_str)\n for k, v in iteritems(options):\n options[k] = v or [True]\n result.options = rdf_protodict.AttributedDict(**options)\n yield result", - "docstring": "Parse the mount command output." - }, - { - "code": "async def refresh(self) -> None:\n LOGGER.debug('NodePool.refresh >>>')\n await pool.refresh_pool_ledger(self.handle)\n LOGGER.debug('NodePool.refresh <<<')", - "docstring": "Refresh local copy of pool ledger and update node pool connections." - }, - { - "code": "def unique_array(arr):\n if not len(arr):\n return np.asarray(arr)\n elif pd:\n if isinstance(arr, np.ndarray) and arr.dtype.kind not in 'MO':\n return pd.unique(arr)\n values = []\n for v in arr:\n if (isinstance(v, datetime_types) and\n not isinstance(v, cftime_types)):\n v = pd.Timestamp(v).to_datetime64()\n values.append(v)\n return pd.unique(values)\n else:\n arr = np.asarray(arr)\n _, uniq_inds = np.unique(arr, return_index=True)\n return arr[np.sort(uniq_inds)]", - "docstring": "Returns an array of unique values in the input order.\n\n Args:\n arr (np.ndarray or list): The array to compute unique values on\n\n Returns:\n A new array of unique values" - }, - { - "code": "def get_feed_url_from_video(video_url):\n web_page = Webpage(video_url)\n web_page.get_html_source()\n channel_id = \\\n web_page.soup.find_all(\"div\", {\"class\": \"yt-user-info\"})[0].a[\n \"href\"]\n channel_id = str(channel_id).strip().replace(\"/channel/\",\n \"\")\n return YoutubeChannel.get_feed_url_from_id(channel_id)", - "docstring": "Gets channel id and then creates feed url\n\n :param video_url: Url of video\n :return: feed url" - }, - { - "code": "def _maybe_connect(self, to_pid, callback=None):\n callback = stack_context.wrap(callback or (lambda stream: None))\n def streaming_callback(data):\n log.info('Received %d bytes from %s, discarding.' % (len(data), to_pid))\n log.debug(' data: %r' % (data,))\n def on_connect(exit_cb, stream):\n log.info('Connection to %s established' % to_pid)\n with self._connection_callbacks_lock:\n self._connections[to_pid] = stream\n self.__dispatch_on_connect_callbacks(to_pid, stream)\n self.__loop.add_callback(\n stream.read_until_close,\n exit_cb,\n streaming_callback=streaming_callback)\n create = False\n with self._connection_callbacks_lock:\n stream = self._connections.get(to_pid)\n callbacks = self._connection_callbacks.get(to_pid)\n if not stream:\n self._connection_callbacks[to_pid].append(callback)\n if not callbacks:\n create = True\n if stream:\n self.__loop.add_callback(callback, stream)\n return\n if not create:\n return\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n if not sock:\n raise self.SocketError('Failed opening socket')\n stream = IOStream(sock, io_loop=self.__loop)\n stream.set_nodelay(True)\n stream.set_close_callback(partial(self.__on_exit, to_pid, b'reached end of stream'))\n connect_callback = partial(on_connect, partial(self.__on_exit, to_pid), stream)\n log.info('Establishing connection to %s' % to_pid)\n stream.connect((to_pid.ip, to_pid.port), callback=connect_callback)\n if stream.closed():\n raise self.SocketError('Failed to initiate stream connection')\n log.info('Maybe connected to %s' % to_pid)", - "docstring": "Asynchronously establish a connection to the remote pid." - }, - { - "code": "def wrap_synchro(fn):\n @functools.wraps(fn)\n def _wrap_synchro(*args, **kwargs):\n motor_obj = fn(*args, **kwargs)\n if isinstance(motor_obj, motor.MotorCollection):\n client = MongoClient(delegate=motor_obj.database.client)\n database = Database(client, motor_obj.database.name)\n return Collection(database, motor_obj.name, delegate=motor_obj)\n if isinstance(motor_obj, motor.motor_tornado.MotorClientSession):\n return ClientSession(delegate=motor_obj)\n if isinstance(motor_obj, _MotorTransactionContext):\n return _SynchroTransactionContext(motor_obj)\n if isinstance(motor_obj, motor.MotorDatabase):\n client = MongoClient(delegate=motor_obj.client)\n return Database(client, motor_obj.name, delegate=motor_obj)\n if isinstance(motor_obj, motor.motor_tornado.MotorChangeStream):\n return ChangeStream(motor_obj)\n if isinstance(motor_obj, motor.motor_tornado.MotorLatentCommandCursor):\n return CommandCursor(motor_obj)\n if isinstance(motor_obj, motor.motor_tornado.MotorCommandCursor):\n return CommandCursor(motor_obj)\n if isinstance(motor_obj, _MotorRawBatchCommandCursor):\n return CommandCursor(motor_obj)\n if isinstance(motor_obj, motor.motor_tornado.MotorCursor):\n return Cursor(motor_obj)\n if isinstance(motor_obj, _MotorRawBatchCursor):\n return Cursor(motor_obj)\n if isinstance(motor_obj, motor.MotorGridIn):\n return GridIn(None, delegate=motor_obj)\n if isinstance(motor_obj, motor.MotorGridOut):\n return GridOut(None, delegate=motor_obj)\n if isinstance(motor_obj, motor.motor_tornado.MotorGridOutCursor):\n return GridOutCursor(motor_obj)\n else:\n return motor_obj\n return _wrap_synchro", - "docstring": "If decorated Synchro function returns a Motor object, wrap in a Synchro\n object." - }, - { - "code": "def _timed_process(self, *args, **kwargs):\n for processor in self._processors:\n start_time = _time.process_time()\n processor.process(*args, **kwargs)\n process_time = int(round((_time.process_time() - start_time) * 1000, 2))\n self.process_times[processor.__class__.__name__] = process_time", - "docstring": "Track Processor execution time for benchmarking." - }, - { - "code": "def execute(self, read_response=True):\n if self.command != pyhsm.defines.YSM_NULL:\n cmd_buf = struct.pack('BB', len(self.payload) + 1, self.command)\n else:\n cmd_buf = chr(self.command)\n cmd_buf += self.payload\n debug_info = None\n unlock = self.stick.acquire()\n try:\n if self.stick.debug:\n debug_info = \"%s (payload %i/0x%x)\" % (pyhsm.defines.cmd2str(self.command), \\\n len(self.payload), len(self.payload))\n self.stick.write(cmd_buf, debug_info)\n if not read_response:\n return None\n return self._read_response()\n finally:\n unlock()", - "docstring": "Write command to HSM and read response.\n\n @param read_response: Whether to expect a response or not.\n @type read_response: bool" - }, - { - "code": "def update_time(self):\n value = self._properties.get(\"updateTime\")\n if value is not None:\n try:\n value = datetime.datetime.strptime(\n value, datetime_helpers._RFC3339_MICROS\n )\n except ValueError:\n DatetimeNS = datetime_helpers.DatetimeWithNanoseconds\n value = DatetimeNS.from_rfc3339(value)\n naive = value.tzinfo is None or value.tzinfo.utcoffset(value) is None\n if naive:\n value = pytz.utc.localize(value)\n return value", - "docstring": "Retrieve the timestamp at which the variable was updated.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables\n\n Returns:\n :class:`~api_core.datetime_helpers.DatetimeWithNanoseconds`,\n :class:`datetime.datetime` or ``NoneType``:\n Datetime object parsed from RFC3339 valid timestamp, or\n ``None`` if the property is not set locally.\n\n Raises:\n ValueError: if value is not a valid RFC3339 timestamp" - }, - { - "code": "def get_bounding_box(points):\n assert len(points) > 0, \"At least one point has to be given.\"\n min_x, max_x = points[0]['x'], points[0]['x']\n min_y, max_y = points[0]['y'], points[0]['y']\n for point in points:\n min_x, max_x = min(min_x, point['x']), max(max_x, point['x'])\n min_y, max_y = min(min_y, point['y']), max(max_y, point['y'])\n p1 = Point(min_x, min_y)\n p2 = Point(max_x, max_y)\n return BoundingBox(p1, p2)", - "docstring": "Get the bounding box of a list of points.\n\n Parameters\n ----------\n points : list of points\n\n Returns\n -------\n BoundingBox" - }, - { - "code": "def is_running(self):\n pp = self.pid\n if pp:\n try:\n proc = psutil.Process(pp)\n if proc.status in (psutil.STATUS_STOPPED,\n psutil.STATUS_DEAD,\n psutil.STATUS_ZOMBIE):\n self.stop()\n return False\n else:\n return True\n except psutil.NoSuchProcess:\n pass\n return False", - "docstring": "True if the subprocess is running.\n\n If it's a zombie then we call\n :func:`desub.Desub.stop` to kill it with fire and return False." - }, - { - "code": "def _create_path_if_not_exist(self, path):\n if path and not os.path.exists(path):\n os.makedirs(path)", - "docstring": "Creates a folders path if it doesn't exist" - }, - { - "code": "def _fix_contig_orientation(contigs_fa, ref_fa, outfile, min_id=90, min_length=20, breaklen=200):\n if not os.path.exists(contigs_fa):\n raise Error('Cannot fix orientation of assembly contigs because file not found: ' + contigs_fa)\n tmp_coords = os.path.join(outfile + '.tmp.rename.coords')\n pymummer.nucmer.Runner(\n ref_fa,\n contigs_fa,\n tmp_coords,\n min_id=min_id,\n min_length=min_length,\n breaklen=breaklen,\n maxmatch=True,\n ).run()\n to_revcomp = set()\n not_revcomp = set()\n file_reader = pymummer.coords_file.reader(tmp_coords)\n for hit in file_reader:\n if hit.on_same_strand():\n not_revcomp.add(hit.qry_name)\n else:\n to_revcomp.add(hit.qry_name)\n os.unlink(tmp_coords)\n in_both = to_revcomp.intersection(not_revcomp)\n f = pyfastaq.utils.open_file_write(outfile)\n seq_reader = pyfastaq.sequences.file_reader(contigs_fa)\n for seq in seq_reader:\n if seq.id in to_revcomp and seq.id not in in_both:\n seq.revcomp()\n print(seq, file=f)\n pyfastaq.utils.close(f)\n return in_both", - "docstring": "Changes orientation of each contig to match the reference, when possible.\n Returns a set of names of contigs that had hits in both orientations to the reference" - }, - { - "code": "def remove_root_catalog(self, catalog_id):\n if self._catalog_session is not None:\n return self._catalog_session.remove_root_catalog(catalog_id=catalog_id)\n return self._hierarchy_session.remove_root(id_=catalog_id)", - "docstring": "Removes a root catalog.\n\n arg: catalog_id (osid.id.Id): the ``Id`` of a catalog\n raise: NotFound - ``catalog_id`` is not a root\n raise: NullArgument - ``catalog_id`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure occurred\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def get_sectie_by_id_and_afdeling(self, id, afdeling):\n try:\n aid = afdeling.id\n except AttributeError:\n aid = afdeling\n afdeling = self.get_kadastrale_afdeling_by_id(aid)\n afdeling.clear_gateway()\n def creator():\n url = self.base_url + '/municipality/%s/department/%s/section/%s' % (afdeling.gemeente.id, afdeling.id, id)\n h = self.base_headers\n p = {\n 'geometry': 'full',\n 'srs': '31370'\n }\n res = capakey_rest_gateway_request(url, h, p).json()\n return Sectie(\n res['sectionCode'],\n afdeling,\n self._parse_centroid(res['geometry']['center']),\n self._parse_bounding_box(res['geometry']['boundingBox']),\n res['geometry']['shape'],\n )\n if self.caches['long'].is_configured:\n key = 'get_sectie_by_id_and_afdeling_rest\n sectie = self.caches['long'].get_or_create(key, creator)\n else:\n sectie = creator()\n sectie.set_gateway(self)\n return sectie", - "docstring": "Get a `sectie`.\n\n :param id: An id of a sectie. eg. \"A\"\n :param afdeling: The :class:`Afdeling` for in which the `sectie` can \\\n be found. Can also be the id of and `afdeling`.\n :rtype: A :class:`Sectie`." - }, - { - "code": "def getTradeHistory(pair, connection=None, info=None, count=None):\n if info is not None:\n info.validate_pair(pair)\n if connection is None:\n connection = common.BTCEConnection()\n response = connection.makeJSONRequest(\"/api/3/trades/%s\" % pair)\n if type(response) is not dict:\n raise TypeError(\"The response is not a dict.\")\n history = response.get(pair)\n if type(history) is not list:\n raise TypeError(\"The response is a %r, not a list.\" % type(history))\n result = []\n if count is not None:\n history = history[:count]\n for h in history:\n h[\"pair\"] = pair\n t = Trade(**h)\n result.append(t)\n return result", - "docstring": "Retrieve the trade history for the given pair. Returns a list of\n Trade instances. If count is not None, it should be an integer, and\n specifies the number of items from the trade history that will be\n processed and returned." - }, - { - "code": "def get_phi_subvariables(self, var):\n if not self.is_phi_variable(var):\n return set()\n return self._phi_variables[var]", - "docstring": "Get sub-variables that phi variable `var` represents.\n\n :param SimVariable var: The variable instance.\n :return: A set of sub-variables, or an empty set if `var` is not a phi variable.\n :rtype: set" - }, - { - "code": "def clipRegionToScreen(self):\n if not self.isRegionValid():\n return None\n screens = PlatformManager.getScreenDetails()\n total_x, total_y, total_w, total_h = Screen(-1).getBounds()\n containing_screen = None\n for screen in screens:\n s_x, s_y, s_w, s_h = screen[\"rect\"]\n if self.x >= s_x and self.x+self.w <= s_x+s_w and self.y >= s_y and self.y+self.h <= s_y+s_h:\n return self\n elif self.x+self.w <= s_x or s_x+s_w <= self.x or self.y+self.h <= s_y or s_y+s_h <= self.y:\n continue\n elif self.x == total_x and self.y == total_y and self.w == total_w and self.h == total_h:\n return self\n else:\n x = max(self.x, s_x)\n y = max(self.y, s_y)\n w = min(self.w, s_w)\n h = min(self.h, s_h)\n return Region(x, y, w, h)\n return None", - "docstring": "Returns the part of the region that is visible on a screen\n\n If the region equals to all visible screens, returns Screen(-1).\n If the region is visible on multiple screens, returns the screen with the smallest ID.\n Returns None if the region is outside the screen." - }, - { - "code": "def render(self):\n breadcrumb_template = template.loader.get_template(self.template)\n extra_context = {\"breadcrumb\": self}\n return breadcrumb_template.render(extra_context, self.request)", - "docstring": "Renders the table using the template from the table options." - }, - { - "code": "def register_introspection_functions(self):\n self.funcs.update({'system.listMethods' : self.system_listMethods,\n 'system.methodSignature' : self.system_methodSignature,\n 'system.methodHelp' : self.system_methodHelp})", - "docstring": "Registers the XML-RPC introspection methods in the system\n namespace.\n\n see http://xmlrpc.usefulinc.com/doc/reserved.html" - }, - { - "code": "def get_namespace_by_keyword_version(self, keyword: str, version: str) -> Optional[Namespace]:\n filt = and_(Namespace.keyword == keyword, Namespace.version == version)\n return self.session.query(Namespace).filter(filt).one_or_none()", - "docstring": "Get a namespace with a given keyword and version." - }, - { - "code": "def load_datetime(value, dt_format):\n if dt_format.endswith('%z'):\n dt_format = dt_format[:-2]\n offset = value[-5:]\n value = value[:-5]\n if offset != offset.replace(':', ''):\n offset = '+' + offset.replace(':', '')\n value = value[:-1]\n return OffsetTime(offset).localize(datetime.strptime(value, dt_format))\n return datetime.strptime(value, dt_format)", - "docstring": "Create timezone-aware datetime object" - }, - { - "code": "async def dump_string(writer, val):\n await dump_varint(writer, len(val))\n await writer.awrite(val)", - "docstring": "Binary string dump\n\n :param writer:\n :param val:\n :return:" - }, - { - "code": "def is_supported(cls, file=None, request=None, response=None,\n url_info=None):\n tests = (\n (response, cls.is_response),\n (file, cls.is_file),\n (request, cls.is_request),\n (url_info, cls.is_url)\n )\n for instance, method in tests:\n if instance:\n try:\n result = method(instance)\n except NotImplementedError:\n pass\n else:\n if result:\n return True\n elif result is VeryFalse:\n return VeryFalse", - "docstring": "Given the hints, return whether the document is supported.\n\n Args:\n file: A file object containing the document.\n request (:class:`.http.request.Request`): An HTTP request.\n response (:class:`.http.request.Response`): An HTTP response.\n url_info (:class:`.url.URLInfo`): A URLInfo.\n\n Returns:\n bool: If True, the reader should be able to read it." - }, - { - "code": "def log(self, sequence, infoarray) -> None:\n if isinstance(sequence, sequencetools.ModelSequence):\n descr = sequence.descr_model\n else:\n descr = 'node'\n if self._isolate:\n descr = '%s_%s' % (descr, sequence.descr_sequence)\n if ((infoarray is not None) and\n (infoarray.info['type'] != 'unmodified')):\n descr = '%s_%s' % (descr, infoarray.info['type'])\n dirpath = sequence.dirpath_ext\n try:\n files = self.folders[dirpath]\n except KeyError:\n files: Dict[str, 'NetCDFFile'] = collections.OrderedDict()\n self.folders[dirpath] = files\n try:\n file_ = files[descr]\n except KeyError:\n file_ = NetCDFFile(\n name=descr,\n flatten=self._flatten,\n isolate=self._isolate,\n timeaxis=self._timeaxis,\n dirpath=dirpath)\n files[descr] = file_\n file_.log(sequence, infoarray)", - "docstring": "Prepare a |NetCDFFile| object suitable for the given |IOSequence|\n object, when necessary, and pass the given arguments to its\n |NetCDFFile.log| method." - }, - { - "code": "def get_field_for_object(field_type, field_id, form):\r\n field_name = field_type + '_' + str(field_id)\r\n return form.__getitem__(field_name)", - "docstring": "This tag allows one to get a specific series or event form field\r\n in registration views." - }, - { - "code": "def optimize_with_repeates(self,fast=None,verbose=None,n_times=10,lambd=None,lambd_g=None,lambd_n=None):\n verbose = dlimix.getVerbose(verbose)\n if not self.init: self._initGP(fast)\n opt_list = []\n fixed0 = sp.zeros_like(self.gp.getParams()['dataTerm'])\n for i in range(n_times):\n scales1 = self._getScalesRand()\n fixed1 = 1e-1*sp.randn(fixed0.shape[0],fixed0.shape[1])\n conv = self.trainGP(fast=fast,scales0=scales1,fixed0=fixed1,lambd=lambd,lambd_g=lambd_g,lambd_n=lambd_n)\n if conv:\n temp=1\n for j in range(len(opt_list)):\n if sp.allclose(abs(self.getScales()),abs(opt_list[j]['scales'])):\n temp=0\n opt_list[j]['counter']+=1\n break\n if temp==1:\n opt = {}\n opt['counter'] = 1\n opt['LML'] = self.getLML()\n opt['scales'] = self.getScales()\n opt_list.append(opt)\n LML = sp.array([opt_list[i]['LML'] for i in range(len(opt_list))])\n index = LML.argsort()[::-1]\n out = []\n if verbose:\n print(\"\\nLocal mimima\\n\")\n print(\"n_times\\t\\tLML\")\n print(\"------------------------------------\")\n for i in range(len(opt_list)):\n out.append(opt_list[index[i]])\n if verbose:\n print((\"%d\\t\\t%f\" % (opt_list[index[i]]['counter'], opt_list[index[i]]['LML'])))\n print(\"\")\n return out", - "docstring": "Train the model repeadly up to a number specified by the users with random restarts and\n return a list of all relative minima that have been found. This list is sorted according to\n least likelihood. Each list term is a dictionary with keys \"counter\", \"LML\", and \"scales\".\n\n After running this function, the vc object will be set at the last iteration. Thus, if you\n wish to get the vc object of one of the repeats, then set the scales. For example:\n\n vc.setScales(scales=optimize_with_repeates_output[0][\"scales\"])\n\n Args:\n fast: Boolean. if set to True initalize kronSumGP\n verbose: Boolean. If set to True, verbose output is produced. (default True)\n n_times: number of re-starts of the optimization. (default 10)" - }, - { - "code": "def get_density(self, compound='', element=''):\n _stack = self.stack\n if compound == '':\n _list_compounds = _stack.keys()\n list_all_dict = {}\n for _compound in _list_compounds:\n _list_element = _stack[_compound]['elements']\n list_all_dict[_compound] = {}\n for _element in _list_element:\n list_all_dict[_compound][_element] = self.get_density(\n compound=_compound,\n element=_element)\n return list_all_dict\n list_compounds = _stack.keys()\n if compound not in list_compounds:\n list_compounds_joined = ', '.join(list_compounds)\n raise ValueError(\"Compound '{}' could not be find in {}\".format(compile, list_compounds_joined))\n if element == '':\n element = compound\n list_element = _stack[compound].keys()\n if element not in list_element:\n list_element_joined = ', '.join(list_element)\n raise ValueError(\"Element '{}' should be any of those elements: {}\".format(element, list_element_joined))\n return _stack[compound][element]['density']['value']", - "docstring": "returns the list of isotopes for the element of the compound defined with their density\n\n Parameters:\n ===========\n compound: string (default is empty). If empty, all the stoichiometric will be displayed\n element: string (default is same as compound).\n\n Raises:\n =======\n ValueError if element is not defined in the stack" - }, - { - "code": "def save(self, force_insert=False, force_update=False):\n if force_insert and force_update:\n raise ValueError(\"Cannot force both insert and updating in resource saving.\")\n data = {}\n for name, field in self._meta.fields.items():\n if field.serialize:\n data[name] = field.dehydrate(getattr(self, name, None))\n insert = True if force_insert or self.resource_uri is None else False\n if insert:\n resp = self._meta.api.http_resource(\"POST\", self._meta.resource_name, data=self._meta.api.resource_serialize(data))\n else:\n resp = self._meta.api.http_resource(\"PUT\", self.resource_uri, data=self._meta.api.resource_serialize(data))\n if \"Location\" in resp.headers:\n resp = self._meta.api.http_resource(\"GET\", resp.headers[\"Location\"])\n elif resp.status_code == 204:\n resp = self._meta.api.http_resource(\"GET\", self.resource_uri)\n else:\n return\n data = self._meta.api.resource_deserialize(resp.text)\n self.__init__(**data)", - "docstring": "Saves the current instance. Override this in a subclass if you want to\n control the saving process.\n\n The 'force_insert' and 'force_update' parameters can be used to insist\n that the \"save\" must be a POST or PUT respectively. Normally, they\n should not be set." - }, - { - "code": "def getDataAtRva(self, rva, size):\n return self.getDataAtOffset(self.getOffsetFromRva(rva), size)", - "docstring": "Gets binary data at a given RVA.\n \n @type rva: int\n @param rva: The RVA to get the data from.\n \n @type size: int\n @param size: The size of the data to be obtained. \n \n @rtype: str\n @return: The data obtained at the given RVA." - }, - { - "code": "def exclude(self, **attrs):\n for k, v in attrs.items():\n exclude = getattr(self, '_exclude_' + k, None)\n if exclude:\n exclude(v)\n else:\n self._exclude_misc(k, v)", - "docstring": "Remove items from distribution that are named in keyword arguments\n\n For example, 'dist.exclude(py_modules=[\"x\"])' would remove 'x' from\n the distribution's 'py_modules' attribute. Excluding packages uses\n the 'exclude_package()' method, so all of the package's contained\n packages, modules, and extensions are also excluded.\n\n Currently, this method only supports exclusion from attributes that are\n lists or tuples. If you need to add support for excluding from other\n attributes in this or a subclass, you can add an '_exclude_X' method,\n where 'X' is the name of the attribute. The method will be called with\n the value passed to 'exclude()'. So, 'dist.exclude(foo={\"bar\":\"baz\"})'\n will try to call 'dist._exclude_foo({\"bar\":\"baz\"})', which can then\n handle whatever special exclusion logic is needed." - }, - { - "code": "def set_priorities(SO_methods, ask):\n SO_methods = [meth.strip() for meth in SO_methods]\n SO_defaults = ['SO-SUN', 'SO-GPS-DIFF', 'SO-SUN-SIGHT', 'SO-SIGHT', 'SO-SIGHT-BS',\n 'SO-CMD-NORTH', 'SO-MAG', 'SO-SM', 'SO-REC', 'SO-V', 'SO-CORE', 'SO-NO']\n SO_priorities, prior_list = [], []\n if len(SO_methods) >= 1:\n for l in range(len(SO_defaults)):\n if SO_defaults[l] in SO_methods:\n SO_priorities.append(SO_defaults[l])\n pri, change = 0, \"1\"\n if ask == 1:\n print(\n)\n for m in range(len(SO_defaults)):\n if SO_defaults[m] in SO_methods:\n SO_priorities[SO_methods.index(SO_defaults[m])] = pri\n pri += 1\n while change == \"1\":\n prior_list = SO_priorities\n for m in range(len(SO_methods)):\n print(SO_methods[m], SO_priorities[m])\n change = input(\"Change these? 1/[0] \")\n if change != \"1\":\n break\n SO_priorities = []\n for l in range(len(SO_methods)):\n print(SO_methods[l])\n print(\" Priority? \", prior_list)\n pri = int(input())\n SO_priorities.append(pri)\n del prior_list[prior_list.index(pri)]\n return SO_priorities", - "docstring": "figure out which sample_azimuth to use, if multiple orientation methods" - }, - { - "code": "def update_lambda_configuration( self,\n lambda_arn,\n function_name,\n handler,\n description='Zappa Deployment',\n timeout=30,\n memory_size=512,\n publish=True,\n vpc_config=None,\n runtime='python2.7',\n aws_environment_variables=None,\n aws_kms_key_arn=None\n ):\n print(\"Updating Lambda function configuration..\")\n if not vpc_config:\n vpc_config = {}\n if not self.credentials_arn:\n self.get_credentials_arn()\n if not aws_kms_key_arn:\n aws_kms_key_arn = ''\n if not aws_environment_variables:\n aws_environment_variables = {}\n lambda_aws_config = self.lambda_client.get_function_configuration(FunctionName=function_name)\n if \"Environment\" in lambda_aws_config:\n lambda_aws_environment_variables = lambda_aws_config[\"Environment\"].get(\"Variables\", {})\n for key, value in lambda_aws_environment_variables.items():\n if key not in aws_environment_variables:\n aws_environment_variables[key] = value\n response = self.lambda_client.update_function_configuration(\n FunctionName=function_name,\n Runtime=runtime,\n Role=self.credentials_arn,\n Handler=handler,\n Description=description,\n Timeout=timeout,\n MemorySize=memory_size,\n VpcConfig=vpc_config,\n Environment={'Variables': aws_environment_variables},\n KMSKeyArn=aws_kms_key_arn,\n TracingConfig={\n 'Mode': 'Active' if self.xray_tracing else 'PassThrough'\n }\n )\n resource_arn = response['FunctionArn']\n if self.tags:\n self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)\n return resource_arn", - "docstring": "Given an existing function ARN, update the configuration variables." - }, - { - "code": "def deserialize(self, value, **kwargs):\n kwargs.update({'trusted': kwargs.get('trusted', False)})\n if self.deserializer is not None:\n return self.deserializer(value, **kwargs)\n if value is None:\n return None\n return self.wrapper(value).astype(self.dtype[0])", - "docstring": "De-serialize the property value from JSON\n\n If no deserializer has been registered, this converts the value\n to the wrapper class with given dtype." - }, - { - "code": "def Execute(self, http, sleep_between_polls=5, max_retries=5,\n max_batch_size=None, batch_request_callback=None):\n requests = [request for request in self.api_requests\n if not request.terminal_state]\n batch_size = max_batch_size or len(requests)\n for attempt in range(max_retries):\n if attempt:\n time.sleep(sleep_between_polls)\n for i in range(0, len(requests), batch_size):\n batch_http_request = BatchHttpRequest(\n batch_url=self.batch_url,\n callback=batch_request_callback,\n response_encoding=self.response_encoding\n )\n for request in itertools.islice(requests,\n i, i + batch_size):\n batch_http_request.Add(\n request.http_request, request.HandleResponse)\n batch_http_request.Execute(http)\n if hasattr(http.request, 'credentials'):\n if any(request.authorization_failed\n for request in itertools.islice(requests,\n i, i + batch_size)):\n http.request.credentials.refresh(http)\n requests = [request for request in self.api_requests if not\n request.terminal_state]\n if not requests:\n break\n return self.api_requests", - "docstring": "Execute all of the requests in the batch.\n\n Args:\n http: httplib2.Http object for use in the request.\n sleep_between_polls: Integer number of seconds to sleep between\n polls.\n max_retries: Max retries. Any requests that have not succeeded by\n this number of retries simply report the last response or\n exception, whatever it happened to be.\n max_batch_size: int, if specified requests will be split in batches\n of given size.\n batch_request_callback: function of (http_response, exception) passed\n to BatchHttpRequest which will be run on any given results.\n\n Returns:\n List of ApiCalls." - }, - { - "code": "def document_geom(geom):\n docstring = dedent(geom.__doc__)\n signature = make_signature(geom.__name__,\n geom.DEFAULT_PARAMS,\n common_geom_params,\n common_geom_param_values)\n usage = GEOM_SIGNATURE_TPL.format(signature=signature)\n contents = OrderedDict(('**{}**'.format(ae), '')\n for ae in sorted(geom.REQUIRED_AES))\n if geom.DEFAULT_AES:\n d = geom.DEFAULT_AES.copy()\n d['group'] = ''\n contents.update(sorted(d.items()))\n table = dict_to_table(('Aesthetic', 'Default value'), contents)\n aesthetics_table = AESTHETICS_TABLE_TPL.format(table=table)\n tpl = dedent(geom._aesthetics_doc.lstrip('\\n'))\n aesthetics_doc = tpl.format(aesthetics_table=aesthetics_table)\n aesthetics_doc = indent(aesthetics_doc, ' '*4)\n d = geom.DEFAULT_PARAMS\n common_parameters = GEOM_PARAMS_TPL.format(\n default_stat=d['stat'],\n default_position=d['position'],\n default_na_rm=d['na_rm'],\n default_inherit_aes=d.get('inherit_aes', True),\n _aesthetics_doc=aesthetics_doc,\n **common_params_doc)\n docstring = docstring.replace('{usage}', usage)\n docstring = docstring.replace('{common_parameters}',\n common_parameters)\n geom.__doc__ = docstring\n return geom", - "docstring": "Create a structured documentation for the geom\n\n It replaces `{usage}`, `{common_parameters}` and\n `{aesthetics}` with generated documentation." - }, - { - "code": "def attrs(self, attribute_name):\n desc = u'attrs({!r})'.format(attribute_name)\n return self.map(lambda el: el.get_attribute(attribute_name), desc).results", - "docstring": "Retrieve HTML attribute values from the elements matched by the query.\n\n Example usage:\n\n .. code:: python\n\n # Assume that the query matches html elements:\n #
and
\n >> q.attrs('class')\n ['foo', 'bar']\n\n Args:\n attribute_name (str): The name of the attribute values to retrieve.\n\n Returns:\n A list of attribute values for `attribute_name`." - }, - { - "code": "def num_frames(self, num_samples):\n return math.ceil(float(max(num_samples - self.frame_size, 0)) / float(self.hop_size)) + 1", - "docstring": "Return the number of frames that will be used for a signal with the length of ``num_samples``." - }, - { - "code": "def find_generations(container, with_data=False):\n generations = []\n generations.append(set())\n all_children = set()\n if with_data:\n stochastics_to_iterate = container.stochastics | container.observed_stochastics\n else:\n stochastics_to_iterate = container.stochastics\n for s in stochastics_to_iterate:\n all_children.update(s.extended_children & stochastics_to_iterate)\n generations[0] = stochastics_to_iterate - all_children\n children_remaining = True\n gen_num = 0\n while children_remaining:\n gen_num += 1\n generations.append(set())\n for s in generations[gen_num - 1]:\n generations[gen_num].update(\n s.extended_children & stochastics_to_iterate)\n thisgen_children = set()\n for s in generations[gen_num]:\n thisgen_children.update(\n s.extended_children & stochastics_to_iterate)\n generations[gen_num] -= thisgen_children\n if len(thisgen_children) == 0:\n children_remaining = False\n return generations", - "docstring": "A generation is the set of stochastic variables that only has parents in\n previous generations." - }, - { - "code": "def calculate_sampling_decision(trace_header, recorder, sampling_req):\n if trace_header.sampled is not None and trace_header.sampled != '?':\n return trace_header.sampled\n elif not recorder.sampling:\n return 1\n else:\n decision = recorder.sampler.should_trace(sampling_req)\n return decision if decision else 0", - "docstring": "Return 1 or the matched rule name if should sample and 0 if should not.\n The sampling decision coming from ``trace_header`` always has\n the highest precedence. If the ``trace_header`` doesn't contain\n sampling decision then it checks if sampling is enabled or not\n in the recorder. If not enbaled it returns 1. Otherwise it uses user\n defined sampling rules to decide." - }, - { - "code": "def render(args):\n srcpath = (\n os.path.join(os.getcwd(), 'templates') if args['--srcpath'] is None\n else args['--srcpath'] if os.path.isabs(args['--srcpath'])\n else os.path.join(os.getcwd(), args['--srcpath'])\n )\n if not os.path.isdir(srcpath):\n print(\"The templates directory '%s' is invalid.\"\n % srcpath)\n sys.exit(1)\n if args['--outpath'] is not None:\n outpath = args['--outpath']\n else:\n outpath = os.getcwd()\n if not os.path.isdir(outpath):\n print(\"The output directory '%s' is invalid.\"\n % outpath)\n sys.exit(1)\n staticdirs = args['--static']\n staticpaths = None\n if staticdirs:\n staticpaths = staticdirs.split(\",\")\n for path in staticpaths:\n path = os.path.join(srcpath, path)\n if not os.path.isdir(path):\n print(\"The static files directory '%s' is invalid.\" % path)\n sys.exit(1)\n site = staticjinja.make_site(\n searchpath=srcpath,\n outpath=outpath,\n staticpaths=staticpaths\n )\n use_reloader = args['watch']\n site.render(use_reloader=use_reloader)", - "docstring": "Render a site.\n\n :param args:\n A map from command-line options to their values. For example:\n\n {\n '--help': False,\n '--outpath': None,\n '--srcpath': None,\n '--static': None,\n '--version': False,\n 'build': True,\n 'watch': False\n }" - }, - { - "code": "def set_menu(self, menu):\n self.menu = menu\n wx_menu = menu.wx_menu()\n self.frame.SetMenuBar(wx_menu)\n self.frame.Bind(wx.EVT_MENU, self.on_menu)", - "docstring": "add a menu from the parent" - }, - { - "code": "def execute_with_client(quiet=False,\n bootstrap_server=False,\n create_client=True):\n def wrapper(f):\n def wrapper2(self, *args, **kwargs):\n client = self.current_client(\n quiet=quiet,\n bootstrap_server=bootstrap_server,\n create_client=create_client)\n if client and client.running:\n return f(self, client, *args, **kwargs)\n return wrapper2\n return wrapper", - "docstring": "Decorator that gets a client and performs an operation on it." - }, - { - "code": "def command_health(self):\n if len(self.args) == 1 and self.args[0] == \"health\":\n PackageHealth(mode=\"\").test()\n elif (len(self.args) == 2 and self.args[0] == \"health\" and\n self.args[1] == \"--silent\"):\n PackageHealth(mode=self.args[1]).test()\n else:\n usage(\"\")", - "docstring": "Check package health" - }, - { - "code": "def _shuffle_tfrecord(path, random_gen):\n record_iter = tf.compat.v1.io.tf_record_iterator(path)\n all_records = [\n r for r in utils.tqdm(\n record_iter, desc=\"Reading...\", unit=\" examples\", leave=False)\n ]\n random_gen.shuffle(all_records)\n with tf.io.TFRecordWriter(path) as writer:\n for record in utils.tqdm(\n all_records, desc=\"Writing...\", unit=\" examples\", leave=False):\n writer.write(record)", - "docstring": "Shuffle a single record file in memory." - }, - { - "code": "def load_profiles(self, overwrite=False):\n for profile in self.minimum_needs.get_profiles(overwrite):\n self.profile_combo.addItem(profile)\n minimum_needs = self.minimum_needs.get_full_needs()\n self.profile_combo.setCurrentIndex(\n self.profile_combo.findText(minimum_needs['profile']))", - "docstring": "Load the profiles into the dropdown list.\n\n :param overwrite: If we overwrite existing profiles from the plugin.\n :type overwrite: bool" - }, - { - "code": "def create_data_item_from_data(self, data: numpy.ndarray, title: str=None) -> DataItem:\n return DataItem(self.__document_controller.add_data(data, title))", - "docstring": "Create a data item in the library from data.\n\n .. versionadded:: 1.0\n .. deprecated:: 1.1\n Use :py:meth:`~nion.swift.Facade.Library.create_data_item_from_data` instead.\n\n Scriptable: No" - }, - { - "code": "def main(argv=None):\n args = parse_arguments(sys.argv if argv is None else argv)\n temp_dir = os.path.join(args.output, 'tmp')\n if args.cloud:\n pipeline_name = 'DataflowRunner'\n else:\n pipeline_name = 'DirectRunner'\n os.environ['TF_CPP_MIN_LOG_LEVEL']='3'\n options = {\n 'job_name': args.job_name,\n 'temp_location': temp_dir,\n 'project': args.project_id,\n 'setup_file':\n os.path.abspath(os.path.join(\n os.path.dirname(__file__),\n 'setup.py')),\n }\n if args.num_workers:\n options['num_workers'] = args.num_workers\n if args.worker_machine_type:\n options['worker_machine_type'] = args.worker_machine_type\n pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)\n p = beam.Pipeline(pipeline_name, options=pipeline_options)\n preprocess(pipeline=p, args=args)\n pipeline_result = p.run()\n if not args.async:\n pipeline_result.wait_until_finish()\n if args.async and args.cloud:\n print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' %\n (pipeline_result.job_id(), args.project_id))", - "docstring": "Run Preprocessing as a Dataflow." - }, - { - "code": "def _expect_response(cls, response, code):\n if response.code != code:\n raise errors.ClientError(\n 'Expected {!r} response but got {!r}'.format(\n code, response.code))\n return response", - "docstring": "Ensure we got the expected response code." - }, - { - "code": "def builds(self, request, pk=None):\n builds = self.get_object().builds.prefetch_related('test_runs').order_by('-datetime')\n page = self.paginate_queryset(builds)\n serializer = BuildSerializer(page, many=True, context={'request': request})\n return self.get_paginated_response(serializer.data)", - "docstring": "List of builds for the current project." - }, - { - "code": "def stream(input, encoding=None, errors='strict'):\n\tinput = (i for i in input if i)\n\tif encoding:\n\t\tinput = iterencode(input, encoding, errors=errors)\n\treturn input", - "docstring": "Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.\n\t\n\tUsed internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body." - }, - { - "code": "def agents():\n print 'The following LiveSync agents are active:'\n agent_list = LiveSyncAgent.find().order_by(LiveSyncAgent.backend_name, db.func.lower(LiveSyncAgent.name)).all()\n table_data = [['ID', 'Name', 'Backend', 'Initial Export', 'Queue']]\n for agent in agent_list:\n initial = (cformat('%{green!}done%{reset}') if agent.initial_data_exported else\n cformat('%{yellow!}pending%{reset}'))\n if agent.backend is None:\n backend_title = cformat('%{red!}invalid backend ({})%{reset}').format(agent.backend_name)\n else:\n backend_title = agent.backend.title\n table_data.append([unicode(agent.id), agent.name, backend_title, initial,\n unicode(agent.queue.filter_by(processed=False).count())])\n table = AsciiTable(table_data)\n table.justify_columns[4] = 'right'\n print table.table\n if not all(a.initial_data_exported for a in agent_list):\n print\n print \"You need to perform the initial data export for some agents.\"\n print cformat(\"To do so, run \"\n \"%{yellow!}indico livesync initial_export %{reset}%{yellow}%{reset} for those agents.\")", - "docstring": "Lists the currently active agents" - }, - { - "code": "def symbol_list(what_list):\n if what_list is \"list1\":\n symbol=['ro','bo','ko','go','mo'\\\n ,'r-','b-','k-','g-','m-','r--','b--','k--'\\\n ,'g--','r1']\n elif what_list is \"list2\":\n symbol=['r-','b--','g-.','k:','md','.','o','v','^','<','>','1','2',\\\n '3','4','s','p','*','h','H','+']\n elif what_list is \"lines1\":\n symbol=['b--','k--','r--','c--','m--','g--','b-','k-','r-','c-','m-','g-','b.','b-.','k-.','r-.','c-.','m-.','g-.','b:','k:','r:','c:','m:','g:']\n elif what_list is \"lines2\":\n symbol=['g:','r-.','k-','b--','k-.','b+','r:','b-','c--','m--','g--','r-','c-','m-','g-','k-.','c-.','m-.','g-.','k:','r:','c:','m:','b-.','b:']\n return symbol", - "docstring": "provide default symbol lists\n\n Parameters\n ----------\n what_list : string\n String name of symbol lists provided; \"list1\", \"list2\",\n \"lines1\" or \"lines2\"." - }, - { - "code": "def size(self, size):\n clone = self._clone()\n clone._size = size\n return clone", - "docstring": "Set the query size of this QuerySet should execute its query against." - }, - { - "code": "def redraw_now(self, whence=0):\n try:\n time_start = time.time()\n self.redraw_data(whence=whence)\n self.update_image()\n time_done = time.time()\n time_delta = time_start - self.time_last_redraw\n time_elapsed = time_done - time_start\n self.time_last_redraw = time_done\n self.logger.debug(\n \"widget '%s' redraw (whence=%d) delta=%.4f elapsed=%.4f sec\" % (\n self.name, whence, time_delta, time_elapsed))\n except Exception as e:\n self.logger.error(\"Error redrawing image: %s\" % (str(e)))\n try:\n (type, value, tb) = sys.exc_info()\n tb_str = \"\".join(traceback.format_tb(tb))\n self.logger.error(\"Traceback:\\n%s\" % (tb_str))\n except Exception:\n tb_str = \"Traceback information unavailable.\"\n self.logger.error(tb_str)", - "docstring": "Redraw the displayed image.\n\n Parameters\n ----------\n whence\n See :meth:`get_rgb_object`." - }, - { - "code": "def convex_conj(self):\n convex_conjs = [func.convex_conj for func in self.functionals]\n return SeparableSum(*convex_conjs)", - "docstring": "The convex conjugate functional.\n\n Convex conjugate distributes over separable sums, so the result is\n simply the separable sum of the convex conjugates." - }, - { - "code": "def delete_series(self, series):\n url = \"db/{0}/series/{1}\".format(\n self._database,\n series\n )\n self.request(\n url=url,\n method='DELETE',\n expected_response_code=204\n )\n return True", - "docstring": "Drop a series on the InfluxDB server.\n\n :param series: the name of the series to delete\n :type series: string\n :rtype: boolean" - }, - { - "code": "def linkify_s_by_sd(self, services):\n for servicedep in self:\n setattr(servicedep, \"service_description_string\", \"undefined\")\n setattr(servicedep, \"dependent_service_description_string\", \"undefined\")\n if getattr(servicedep, 'service_description', None) is None or\\\n getattr(servicedep, 'dependent_service_description', None) is None:\n continue\n services.add_act_dependency(servicedep.dependent_service_description,\n servicedep.service_description,\n servicedep.notification_failure_criteria,\n getattr(servicedep, 'dependency_period', ''),\n servicedep.inherits_parent)\n services.add_chk_dependency(servicedep.dependent_service_description,\n servicedep.service_description,\n servicedep.execution_failure_criteria,\n getattr(servicedep, 'dependency_period', ''),\n servicedep.inherits_parent)\n setattr(servicedep, \"service_description_string\",\n services[servicedep.service_description].get_name())\n setattr(servicedep, \"dependent_service_description_string\",\n services[servicedep.dependent_service_description].get_name())", - "docstring": "Add dependency in service objects\n\n :return: None" - }, - { - "code": "def add_album_art(file_name, album_art):\n img = requests.get(album_art, stream=True)\n img = img.raw\n audio = EasyMP3(file_name, ID3=ID3)\n try:\n audio.add_tags()\n except _util.error:\n pass\n audio.tags.add(\n APIC(\n encoding=3,\n mime='image/png',\n type=3,\n desc='Cover',\n data=img.read()\n )\n )\n audio.save()\n return album_art", - "docstring": "Add album_art in .mp3's tags" - }, - { - "code": "def startfile(fpath, detatch=True, quote=False, verbose=False, quiet=True):\n print('[cplat] startfile(%r)' % fpath)\n fpath = normpath(fpath)\n if not exists(fpath):\n raise Exception('Cannot start nonexistant file: %r' % fpath)\n if not WIN32:\n fpath = pipes.quote(fpath)\n if LINUX:\n outtup = cmd(('xdg-open', fpath), detatch=detatch, verbose=verbose, quiet=quiet)\n elif DARWIN:\n outtup = cmd(('open', fpath), detatch=detatch, verbose=verbose, quiet=quiet)\n elif WIN32:\n os.startfile(fpath)\n else:\n raise RuntimeError('Unknown Platform')\n if outtup is not None:\n out, err, ret = outtup\n if not ret:\n raise Exception(out + ' -- ' + err)\n pass", - "docstring": "Uses default program defined by the system to open a file.\n\n References:\n http://stackoverflow.com/questions/2692873/quote-posix-shell-special-characters-in-python-output" - }, - { - "code": "def visit_Try(self, node):\n currs = (node,)\n raises = ()\n for handler in node.handlers:\n self.result.add_node(handler)\n for n in node.body:\n self.result.add_node(n)\n for curr in currs:\n self.result.add_edge(curr, n)\n currs, nraises = self.visit(n)\n for nraise in nraises:\n if isinstance(nraise, ast.Raise):\n for handler in node.handlers:\n self.result.add_edge(nraise, handler)\n else:\n raises += (nraise,)\n for handler in node.handlers:\n ncurrs, nraises = self.visit(handler)\n currs += ncurrs\n raises += nraises\n return currs, raises", - "docstring": "OUT = body's U handler's\n RAISES = handler's\n this equation is not has good has it could be...\n but we need type information to be more accurate" - }, - { - "code": "def get_variables_in_module(module,\n collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n return module.get_variables(collection=collection)", - "docstring": "Returns tuple of `tf.Variable`s declared inside an `snt.Module`.\n\n Note that this operates by searching the variable scope a module contains,\n and so does not know about any modules which were constructed elsewhere but\n used inside this module.\n\n Args:\n module: `snt.Module` instance to query the scope of.\n collection: Collection to restrict query to. By default this is\n `tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable\n variables such as moving averages.\n\n Returns:\n A tuple of `tf.Variable` objects.\n\n Raises:\n NotConnectedError: If the module is not connected to the Graph." - }, - { - "code": "def do_shell(self, args: argparse.Namespace) -> None:\n import subprocess\n tokens = [args.command] + args.command_args\n for index, _ in enumerate(tokens):\n if tokens[index]:\n first_char = tokens[index][0]\n if first_char in constants.QUOTES:\n tokens[index] = utils.strip_quotes(tokens[index])\n tokens[index] = os.path.expanduser(tokens[index])\n if first_char in constants.QUOTES:\n tokens[index] = first_char + tokens[index] + first_char\n expanded_command = ' '.join(tokens)\n with self.sigint_protection:\n proc = subprocess.Popen(expanded_command,\n stdout=subprocess.PIPE if isinstance(self.stdout, utils.StdSim) else self.stdout,\n stderr=subprocess.PIPE if isinstance(sys.stderr, utils.StdSim) else sys.stderr,\n shell=True)\n proc_reader = utils.ProcReader(proc, self.stdout, sys.stderr)\n proc_reader.wait()", - "docstring": "Execute a command as if at the OS prompt" - }, - { - "code": "def j_delete(self, *args):\n uid = args[0]\n current_infor = MPost.get_by_uid(uid)\n tslug = MCategory.get_by_uid(current_infor.extinfo['def_cat_uid'])\n is_deleted = MPost.delete(uid)\n MCategory.update_count(current_infor.extinfo['def_cat_uid'])\n if is_deleted:\n output = {\n 'del_info': 1,\n 'cat_slug': tslug.slug,\n 'cat_id': tslug.uid,\n 'kind': current_infor.kind\n }\n else:\n output = {\n 'del_info': 0,\n }\n return json.dump(output, self)", - "docstring": "Delete the post, but return the JSON." - }, - { - "code": "def read_pdb(pdbfname, as_string=False):\n pybel.ob.obErrorLog.StopLogging()\n if os.name != 'nt':\n maxsize = resource.getrlimit(resource.RLIMIT_STACK)[-1]\n resource.setrlimit(resource.RLIMIT_STACK, (min(2 ** 28, maxsize), maxsize))\n sys.setrecursionlimit(10 ** 5)\n return readmol(pdbfname, as_string=as_string)", - "docstring": "Reads a given PDB file and returns a Pybel Molecule." - }, - { - "code": "def save(self, filename, compressed=True):\n if not self.has_data:\n return False\n _, file_ext = os.path.splitext(filename)\n if compressed:\n if file_ext != COMPRESSED_TENSOR_EXT:\n raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT))\n np.savez_compressed(filename,\n self.data[:self.cur_index,...])\n else:\n if file_ext != TENSOR_EXT:\n raise ValueError('Can only save tensor with .npy extension')\n np.save(filename, self.data[:self.cur_index,...])\n return True", - "docstring": "Save a tensor to disk." - }, - { - "code": "def validate(args):\n p = OptionParser(validate.__doc__)\n opts, args = p.parse_args(args)\n try:\n agpfile, componentfasta, targetfasta = args\n except Exception as e:\n sys.exit(p.print_help())\n agp = AGP(agpfile)\n build = Fasta(targetfasta)\n bacs = Fasta(componentfasta, index=False)\n for aline in agp:\n try:\n build_seq = build.sequence(dict(chr=aline.object,\n start=aline.object_beg, stop=aline.object_end))\n if aline.is_gap:\n assert build_seq.upper() == aline.gap_length * 'N', \\\n \"gap mismatch: %s\" % aline\n else:\n bac_seq = bacs.sequence(dict(chr=aline.component_id,\n start=aline.component_beg, stop=aline.component_end,\n strand=aline.orientation))\n assert build_seq.upper() == bac_seq.upper(), \\\n \"sequence mismatch: %s\" % aline\n logging.debug(\"%s:%d-%d verified\" % (aline.object,\n aline.object_beg, aline.object_end))\n except Exception as e:\n logging.error(e)", - "docstring": "%prog validate agpfile componentfasta targetfasta\n\n validate consistency between agpfile and targetfasta" - }, - { - "code": "def delete_threat_list(self, threat_list):\n log.info('Deleting cached threat list \"{}\"'.format(repr(threat_list)))\n q =\n params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]\n with self.get_cursor() as dbc:\n dbc.execute(q, params)", - "docstring": "Delete threat list entry." - }, - { - "code": "def update_fw_params(self, rtr_id=-1, fw_type=-1):\n if rtr_id != -1:\n self.router_id = rtr_id\n if fw_type != -1:\n self.fw_type = fw_type", - "docstring": "Updates the FW parameters." - }, - { - "code": "def copy(self):\n content = [(k, v) for k, v in self.items()]\n intidx = [(k, v) for k, v in content if isinstance(k, int)]\n args = [v for k, v in sorted(intidx)]\n kwargs = {k: v\n for k, v in content\n if not isinstance(k, int) and not self.is_special(k)}\n return self.__class__(*args, **kwargs)", - "docstring": "Return a copy of this `Fact`." - }, - { - "code": "def createEditor(self, delegate, parent, option):\n return FontCtiEditor(self, delegate, parent=parent)", - "docstring": "Creates a FontCtiEditor.\n For the parameters see the AbstractCti documentation." - }, - { - "code": "def predict_epitopes_from_args(args):\n mhc_model = mhc_binding_predictor_from_args(args)\n variants = variant_collection_from_args(args)\n gene_expression_dict = rna_gene_expression_dict_from_args(args)\n transcript_expression_dict = rna_transcript_expression_dict_from_args(args)\n predictor = TopiaryPredictor(\n mhc_model=mhc_model,\n padding_around_mutation=args.padding_around_mutation,\n ic50_cutoff=args.ic50_cutoff,\n percentile_cutoff=args.percentile_cutoff,\n min_transcript_expression=args.rna_min_transcript_expression,\n min_gene_expression=args.rna_min_gene_expression,\n only_novel_epitopes=args.only_novel_epitopes,\n raise_on_error=not args.skip_variant_errors)\n return predictor.predict_from_variants(\n variants=variants,\n transcript_expression_dict=transcript_expression_dict,\n gene_expression_dict=gene_expression_dict)", - "docstring": "Returns an epitope collection from the given commandline arguments.\n\n Parameters\n ----------\n args : argparse.Namespace\n Parsed commandline arguments for Topiary" - }, - { - "code": "def kfold(self, k=5, stratify=False, shuffle=True, seed=33):\n if stratify:\n kf = StratifiedKFold(n_splits=k, random_state=seed, shuffle=shuffle)\n else:\n kf = KFold(n_splits=k, random_state=seed, shuffle=shuffle)\n for train_index, test_index in kf.split(self.X_train, self.y_train):\n X_train, y_train = idx(self.X_train, train_index), self.y_train[train_index]\n X_test, y_test = idx(self.X_train, test_index), self.y_train[test_index]\n yield X_train, y_train, X_test, y_test, train_index, test_index", - "docstring": "K-Folds cross validation iterator.\n\n Parameters\n ----------\n k : int, default 5\n stratify : bool, default False\n shuffle : bool, default True\n seed : int, default 33\n\n Yields\n -------\n X_train, y_train, X_test, y_test, train_index, test_index" - }, - { - "code": "def load_class(module_name, class_name):\n try:\n plugmod = import_module(module_name)\n except Exception as exc:\n warn(\"Importing built-in plugin %s.%s raised an exception: %r\" %\n (module_name, class_name, repr(exc)), ImportWarning)\n return None\n else:\n return getattr(plugmod, class_name)", - "docstring": "Return class object specified by module name and class name.\n\n Return None if module failed to be imported.\n\n :param module_name: string module name\n :param class_name: string class name" - }, - { - "code": "def _write_export(export, file_obj=None):\n if file_obj is None:\n return export\n if hasattr(file_obj, 'write'):\n out_file = file_obj\n else:\n out_file = open(file_obj, 'wb')\n try:\n out_file.write(export)\n except TypeError:\n out_file.write(export.encode('utf-8'))\n out_file.close()\n return export", - "docstring": "Write a string to a file.\n If file_obj isn't specified, return the string\n\n Parameters\n ---------\n export: a string of the export data\n file_obj: a file-like object or a filename" - }, - { - "code": "def _handle_response(self, resp):\n http_code = resp.status_code\n if http_code not in self.http_success_status_codes:\n raise RequestError(\n response=resp,\n message='[HTTP {}] {}'.format(http_code, resp.reason)\n )\n try:\n body = resp.json()\n except ValueError:\n raise RequestError(\n response=resp,\n message='[HTTP {}] response body: {}'.format(\n http_code,\n resp.text\n )\n )\n else:\n if 'error' in body:\n error_code = body['error'].get('code', '?')\n raise RequestError(\n response=resp,\n message='[HTTP {}][ERR {}] {}'.format(\n resp.status_code,\n error_code,\n body['error'].get('message', 'no error message')\n ),\n error_code=error_code\n )\n return body", - "docstring": "Handle the response from QuadrigaCX.\n\n :param resp: Response from QuadrigaCX.\n :type resp: requests.models.Response\n :return: Response body.\n :rtype: dict\n :raise quadriga.exceptions.RequestError: If HTTP OK was not returned." - }, - { - "code": "def reject(self, condition=None, description=None):\n if self._can_settle_message():\n self._response = errors.MessageRejected(\n condition=condition,\n description=description,\n encoding=self._encoding)\n self._settler(self._response)\n self.state = constants.MessageState.ReceivedSettled\n return True\n return False", - "docstring": "Send a response disposition to the service to indicate that\n a received message has been rejected. If the client is running in PeekLock\n mode, the service will wait on this disposition. Otherwise it will\n be ignored. A rejected message will increment the messages delivery count.\n Returns `True` is message was rejected, or `False` if the message\n was already settled.\n\n :param condition: The AMQP rejection code. By default this is `amqp:internal-error`.\n :type condition: bytes or str\n :param description: A description/reason to accompany the rejection.\n :type description: bytes or str\n :rtype: bool\n :raises: TypeError if the message is being sent rather than received." - }, - { - "code": "def get_rbounds(step):\n if step.geom is not None:\n rcmb = step.geom.rcmb\n else:\n rcmb = step.sdat.par['geometry']['r_cmb']\n if step.sdat.par['geometry']['shape'].lower() == 'cartesian':\n rcmb = 0\n rcmb = max(rcmb, 0)\n return rcmb, rcmb + 1", - "docstring": "Radial or vertical position of boundaries.\n\n Args:\n step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\n instance.\n Returns:\n tuple of floats: radial or vertical positions of boundaries of the\n domain." - }, - { - "code": "def _drop_gracefully(self):\n mr_id = self.request.get(\"mapreduce_id\")\n logging.error(\"Failed to kick off job %s\", mr_id)\n state = model.MapreduceState.get_by_job_id(mr_id)\n if not self._check_mr_state(state, mr_id):\n return\n config = util.create_datastore_write_config(state.mapreduce_spec)\n model.MapreduceControl.abort(mr_id, config=config)\n state.active = False\n state.result_status = model.MapreduceState.RESULT_FAILED\n ControllerCallbackHandler._finalize_job(state.mapreduce_spec, state)", - "docstring": "See parent." - }, - { - "code": "def active_trail_nodes(self, start, observed=None):\n if observed and self.parent_node in observed:\n return set(start)\n else:\n return set(self.nodes()) - set(observed if observed else [])", - "docstring": "Returns all the nodes reachable from start via an active trail.\n\n Parameters\n ----------\n start: Graph node\n\n observed : List of nodes (optional)\n If given the active trail would be computed assuming these nodes to be observed.\n\n Examples\n --------\n >>> from pgmpy.models import NaiveBayes\n >>> model = NaiveBayes()\n >>> model.add_nodes_from(['a', 'b', 'c', 'd'])\n >>> model.add_edges_from([('a', 'b'), ('a', 'c'), ('a', 'd')])\n >>> model.active_trail_nodes('a')\n {'a', 'b', 'c', 'd'}\n >>> model.active_trail_nodes('a', ['b', 'c'])\n {'a', 'd'}\n >>> model.active_trail_nodes('b', ['a'])\n {'b'}" - }, - { - "code": "def join_cols(cols):\n return \", \".join([i for i in cols]) if isinstance(cols, (list, tuple, set)) else cols", - "docstring": "Join list of columns into a string for a SQL query" - }, - { - "code": "def _options_dir(name):\n _check_portname(name)\n _root = '/var/db/ports'\n new_dir = os.path.join(_root, name.replace('/', '_'))\n old_dir = os.path.join(_root, name.split('/')[-1])\n if os.path.isdir(old_dir):\n return old_dir\n return new_dir", - "docstring": "Retrieve the path to the dir containing OPTIONS file for a given port" - }, - { - "code": "def is_deb_package_installed(pkg):\n with settings(hide('warnings', 'running', 'stdout', 'stderr'),\n warn_only=True, capture=True):\n result = sudo('dpkg-query -l \"%s\" | grep -q ^.i' % pkg)\n return not bool(result.return_code)", - "docstring": "checks if a particular deb package is installed" - }, - { - "code": "def _get_repo_url():\n default_repo = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'\n repo_url = os.environ.get('MXNET_GLUON_REPO', default_repo)\n if repo_url[-1] != '/':\n repo_url = repo_url+'/'\n return repo_url", - "docstring": "Return the base URL for Gluon dataset and model repository." - }, - { - "code": "def _create_messages(self, metrics):\n messages = []\n for m in metrics:\n messages.append(str(m))\n logger.debug('Messages: %s', messages)\n return messages", - "docstring": "Create a list of zabbix messages from a list of ZabbixMetrics.\n\n :type metrics_array: list\n :param metrics_array: List of :class:`zabbix.sender.ZabbixMetric`.\n\n :rtype: list\n :return: List of zabbix messages." - }, - { - "code": "def parse_profile_from_hcard(hcard: str, handle: str):\n from federation.entities.diaspora.entities import DiasporaProfile\n doc = html.fromstring(hcard)\n profile = DiasporaProfile(\n name=_get_element_text_or_none(doc, \".fn\"),\n image_urls={\n \"small\": _get_element_attr_or_none(doc, \".entity_photo_small .photo\", \"src\"),\n \"medium\": _get_element_attr_or_none(doc, \".entity_photo_medium .photo\", \"src\"),\n \"large\": _get_element_attr_or_none(doc, \".entity_photo .photo\", \"src\"),\n },\n public=True if _get_element_text_or_none(doc, \".searchable\") == \"true\" else False,\n id=handle,\n handle=handle,\n guid=_get_element_text_or_none(doc, \".uid\"),\n public_key=_get_element_text_or_none(doc, \".key\"),\n )\n return profile", - "docstring": "Parse all the fields we can from a hCard document to get a Profile.\n\n :arg hcard: HTML hcard document (str)\n :arg handle: User handle in username@domain.tld format\n :returns: ``federation.entities.diaspora.entities.DiasporaProfile`` instance" - }, - { - "code": "def repr_feature(feature, max_keys=100, indent=8, lexigraphic=False):\n if isinstance(feature, (str, bytes)):\n try:\n ustr = feature.decode('utf8')\n return ustr\n except:\n return repr(feature)\n if isinstance(feature, StringCounter):\n return repr_stringcounter(feature, max_keys, indent, lexigraphic)\n elif isinstance(feature, unicode):\n return feature\n else:\n return repr(feature)\n assert False, 'internal logic failure, no branch taken'", - "docstring": "generate a pretty-printed string for a feature\n\n Currently implemented:\n * StringCounter\n\n @max_keys: truncate long counters\n\n @indent: indent multi-line displays by this many spaces\n\n @lexigraphic: instead of sorting counters by count (default), sort\n keys lexigraphically" - }, - { - "code": "def _restore_port_binding(self,\n switch_ip, pvlan_ids,\n port, native_vlan):\n intf_type, nexus_port = nexus_help.split_interface_name(port)\n if native_vlan != 0:\n self.driver.send_enable_vlan_on_trunk_int(\n switch_ip, native_vlan,\n intf_type, nexus_port, True)\n if len(pvlan_ids) == 1:\n return\n concat_vlans = ''\n compressed_vlans = self._get_compressed_vlan_list(pvlan_ids)\n for pvlan in compressed_vlans:\n if concat_vlans == '':\n concat_vlans = \"%s\" % pvlan\n else:\n concat_vlans += \",%s\" % pvlan\n if len(concat_vlans) >= const.CREATE_PORT_VLAN_LENGTH:\n self.driver.send_enable_vlan_on_trunk_int(\n switch_ip, concat_vlans,\n intf_type, nexus_port, False)\n concat_vlans = ''\n if len(concat_vlans):\n self.driver.send_enable_vlan_on_trunk_int(\n switch_ip, concat_vlans,\n intf_type, nexus_port, False)", - "docstring": "Restores a set of vlans for a given port." - }, - { - "code": "def truncate(self, frames=None):\n if frames is None:\n frames = self.tell()\n err = _snd.sf_command(self._file, _snd.SFC_FILE_TRUNCATE,\n _ffi.new(\"sf_count_t*\", frames),\n _ffi.sizeof(\"sf_count_t\"))\n if err:\n raise RuntimeError(\"Error truncating the file\")\n self._info.frames = frames", - "docstring": "Truncate the file to a given number of frames.\n\n After this command, the read/write position will be at the new\n end of the file.\n\n Parameters\n ----------\n frames : int, optional\n Only the data before `frames` is kept, the rest is deleted.\n If not specified, the current read/write position is used." - }, - { - "code": "def make(base_classes=(), have_mt=False):\n good_bc = ModelFactory.__fix_bases(base_classes, have_mt)\n print \"Base classes are:\", good_bc\n key = \"\".join(map(str, good_bc))\n if key in ModelFactory.__memoized:\n return ModelFactory.__memoized[key]\n cls = new.classobj('', good_bc, {'__module__': '__main__', '__doc__': None})\n ModelFactory.__memoized[key] = cls\n return cls", - "docstring": "Use this static method to build a model class that\n possibly derives from other classes. If have_mt is True,\n then returned class will take into account multi-threading\n issues when dealing with observable properties." - }, - { - "code": "def register_templatetags():\n from turboengine.conf import settings\n from google.appengine.ext.webapp import template\n for python_file in settings.TEMPLATE_PATH:\n template.register_template_library(python_file)", - "docstring": "Register templatetags defined in settings as basic templatetags" - }, - { - "code": "def reads(text, fmt, as_version=4, **kwargs):\n fmt = copy(fmt)\n fmt = long_form_one_format(fmt)\n ext = fmt['extension']\n if ext == '.ipynb':\n return nbformat.reads(text, as_version, **kwargs)\n format_name = read_format_from_metadata(text, ext) or fmt.get('format_name')\n if format_name:\n format_options = {}\n else:\n format_name, format_options = guess_format(text, ext)\n if format_name:\n fmt['format_name'] = format_name\n fmt.update(format_options)\n reader = TextNotebookConverter(fmt)\n notebook = reader.reads(text, **kwargs)\n rearrange_jupytext_metadata(notebook.metadata)\n if format_name and insert_or_test_version_number():\n notebook.metadata.setdefault('jupytext', {}).setdefault('text_representation', {}).update(\n {'extension': ext, 'format_name': format_name})\n return notebook", - "docstring": "Read a notebook from a string" - }, - { - "code": "def asRFC2822(self, tzinfo=None, includeDayOfWeek=True):\n dtime = self.asDatetime(tzinfo)\n if tzinfo is None:\n rfcoffset = '-0000'\n else:\n rfcoffset = '%s%02i%02i' % _timedeltaToSignHrMin(dtime.utcoffset())\n rfcstring = ''\n if includeDayOfWeek:\n rfcstring += self.rfc2822Weekdays[dtime.weekday()] + ', '\n rfcstring += '%i %s %4i %02i:%02i:%02i %s' % (\n dtime.day,\n self.rfc2822Months[dtime.month - 1],\n dtime.year,\n dtime.hour,\n dtime.minute,\n dtime.second,\n rfcoffset)\n return rfcstring", - "docstring": "Return this Time formatted as specified in RFC 2822.\n\n RFC 2822 specifies the format of email messages.\n\n RFC 2822 says times in email addresses should reflect the local\n timezone. If tzinfo is a datetime.tzinfo instance, the returned\n formatted string will reflect that timezone. Otherwise, the timezone\n will be '-0000', which RFC 2822 defines as UTC, but with an unknown\n local timezone.\n\n RFC 2822 states that the weekday is optional. The parameter\n includeDayOfWeek indicates whether or not to include it." - }, - { - "code": "def check_used(self, pkg):\n used = []\n dep_path = self.meta.log_path + \"dep/\"\n logs = find_package(\"\", dep_path)\n for log in logs:\n deps = Utils().read_file(dep_path + log)\n for dep in deps.splitlines():\n if pkg == dep:\n used.append(log)\n return used", - "docstring": "Check if dependencies used" - }, - { - "code": "def _execute_show(self, show_command):\n rpc_command = '{show_command}'.format(\n show_command=escape_xml(show_command)\n )\n response = self._execute_rpc(rpc_command)\n raw_response = response.xpath('.//CLI/Exec')[0].text\n return raw_response.strip() if raw_response else ''", - "docstring": "Executes an operational show-type command." - }, - { - "code": "def set_action_name(self, name):\n if self._open and name is not None:\n self._open[-1].name = name\n self.notify()", - "docstring": "Set the name of the top group, if present." - }, - { - "code": "def receive(self, what=None):\n if self.failed:\n raise ValueError(\"{} cannot receive as it has failed.\"\n .format(self))\n received_transmissions = []\n if what is None:\n pending_transmissions = self.transmissions(direction=\"incoming\",\n status=\"pending\")\n for transmission in pending_transmissions:\n transmission.status = \"received\"\n transmission.receive_time = timenow()\n received_transmissions.append(transmission)\n elif isinstance(what, Transmission):\n if what in self.transmissions(direction=\"incoming\",\n status=\"pending\"):\n transmission.status = \"received\"\n what.receive_time = timenow()\n received_transmissions.append(what)\n else:\n raise(ValueError(\"{} cannot receive {} as it is not \"\n \"in its pending_transmissions\"\n .format(self, what)))\n else:\n raise ValueError(\"Nodes cannot receive {}\".format(what))\n self.update([t.info for t in received_transmissions])", - "docstring": "Receive some transmissions.\n\n Received transmissions are marked as received, then their infos are\n passed to update().\n\n \"what\" can be:\n\n 1. None (the default) in which case all pending transmissions are\n received.\n 2. a specific transmission.\n\n Will raise an error if the node is told to receive a transmission it has\n not been sent." - }, - { - "code": "def load_energy():\n tbl_name = 'energy_usage'\n data = get_example_data('energy.json.gz')\n pdf = pd.read_json(data)\n pdf.to_sql(\n tbl_name,\n db.engine,\n if_exists='replace',\n chunksize=500,\n dtype={\n 'source': String(255),\n 'target': String(255),\n 'value': Float(),\n },\n index=False)\n print('Creating table [wb_health_population] reference')\n tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()\n if not tbl:\n tbl = TBL(table_name=tbl_name)\n tbl.description = 'Energy consumption'\n tbl.database = utils.get_or_create_main_db()\n if not any(col.metric_name == 'sum__value' for col in tbl.metrics):\n tbl.metrics.append(SqlMetric(\n metric_name='sum__value',\n expression='SUM(value)',\n ))\n db.session.merge(tbl)\n db.session.commit()\n tbl.fetch_metadata()\n slc = Slice(\n slice_name='Energy Sankey',\n viz_type='sankey',\n datasource_type='table',\n datasource_id=tbl.id,\n params=textwrap.dedent(\n),\n )\n misc_dash_slices.add(slc.slice_name)\n merge_slice(slc)\n slc = Slice(\n slice_name='Energy Force Layout',\n viz_type='directed_force',\n datasource_type='table',\n datasource_id=tbl.id,\n params=textwrap.dedent(\n),\n )\n misc_dash_slices.add(slc.slice_name)\n merge_slice(slc)\n slc = Slice(\n slice_name='Heatmap',\n viz_type='heatmap',\n datasource_type='table',\n datasource_id=tbl.id,\n params=textwrap.dedent(\n),\n )\n misc_dash_slices.add(slc.slice_name)\n merge_slice(slc)", - "docstring": "Loads an energy related dataset to use with sankey and graphs" - }, - { - "code": "def success(self, cmd, desc=''):\n return self._label_desc(cmd, desc, self.success_color)", - "docstring": "Style for a success message." - }, - { - "code": "def filter_by_rand(self, p:float, seed:int=None):\n \"Keep random sample of `items` with probability `p` and an optional `seed`.\"\n if seed is not None: np.random.seed(seed)\n return self.filter_by_func(lambda o: rand_bool(p))", - "docstring": "Keep random sample of `items` with probability `p` and an optional `seed`." - }, - { - "code": "def raise_and_log_error(self, error, message):\n self.log('raising %s, traceback %s\\n' %\n (error, traceback.format_exc()))\n raise error(message)", - "docstring": "Raise error, including message and original traceback.\n\n error: the error to raise\n message: the user-facing error message" - }, - { - "code": "def validate_password(self, password):\n hash = sha256()\n hash.update((password + self.password[:64]).encode('utf-8'))\n return self.password[64:] == hash.hexdigest()", - "docstring": "Check the password against existing credentials.\n\n :param password: the password that was provided by the user to\n try and authenticate. This is the clear text version that we will\n need to match against the hashed one in the database.\n :type password: unicode object.\n :return: Whether the password is valid.\n :rtype: bool" - }, - { - "code": "def _load_rules(forcefield):\n rules = dict()\n for rule_name, smarts in forcefield.atomTypeDefinitions.items():\n overrides = forcefield.atomTypeOverrides.get(rule_name)\n if overrides is not None:\n overrides = set(overrides)\n else:\n overrides = set()\n rules[rule_name] = SMARTSGraph(smarts_string=smarts,\n parser=forcefield.parser,\n name=rule_name,\n overrides=overrides)\n return rules", - "docstring": "Load atomtyping rules from a forcefield into SMARTSGraphs." - }, - { - "code": "def field_name_exist(self, field_name):\n fields = self.class_item.get_fields()\n for f in fields:\n if f.name == field_name:\n return True\n return False", - "docstring": "Check if there is already a field_name field in the current class\n It is useful before allowing to rename a field to check name does\n not already exist." - }, - { - "code": "def parFactory(fields, strict=0):\n if len(fields) < 3 or None in fields[0:3]:\n raise SyntaxError(\"At least 3 fields must be given\")\n type = fields[1]\n if type in _string_types:\n return IrafParS(fields,strict)\n elif type == 'R':\n return StrictParR(fields,1)\n elif type in _real_types:\n return IrafParR(fields,strict)\n elif type == \"I\":\n return StrictParI(fields,1)\n elif type == \"i\":\n return IrafParI(fields,strict)\n elif type == \"b\":\n return IrafParB(fields,strict)\n elif type == \"ar\":\n return IrafParAR(fields,strict)\n elif type == \"ai\":\n return IrafParAI(fields,strict)\n elif type == \"as\":\n return IrafParAS(fields,strict)\n elif type == \"ab\":\n return IrafParAB(fields,strict)\n elif type[:1] == \"a\":\n raise SyntaxError(\"Cannot handle arrays of type %s\" % type)\n else:\n raise SyntaxError(\"Cannot handle parameter type %s\" % type)", - "docstring": "parameter factory function\n\n fields is a list of the comma-separated fields (as in the .par file).\n Each entry is a string or None (indicating that field was omitted.)\n\n Set the strict parameter to a non-zero value to do stricter parsing\n (to find errors in the input)" - }, - { - "code": "def verify_token(self, token, requested_access):\n client = API(options.url_auth,\n auth_username=options.service_id,\n auth_password=options.client_secret,\n ssl_options=ssl_server_options())\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n body = urllib.urlencode({'token': token, 'requested_access': requested_access})\n client.auth.verify.prepare_request(headers=headers, request_timeout=180)\n try:\n result = yield client.auth.verify.post(body=body)\n except tornado.httpclient.HTTPError as ex:\n logging.exception(ex.message)\n raise HTTPError(500, 'Internal Server Error')\n raise Return(result['has_access'])", - "docstring": "Check the token bearer is permitted to access the resource\n\n :param token: Access token\n :param requested_access: the access level the client has requested\n :returns: boolean" - }, - { - "code": "def get_probs(self, sampler=None, rerun=None, store=True):\n if rerun is None:\n rerun = sampler is not None\n if self._probs is not None and not rerun:\n return self._probs\n if sampler is None:\n sampler = self.vqe.sampler\n probs = sampler(self.circuit, range(self.circuit.n_qubits))\n if store:\n self._probs = probs\n return probs", - "docstring": "Get probabilities." - }, - { - "code": "def separate_reach_logs(log_str):\n log_lines = log_str.splitlines()\n reach_logs = []\n reach_lines = []\n adding_reach_lines = False\n for l in log_lines[:]:\n if not adding_reach_lines and 'Beginning reach' in l:\n adding_reach_lines = True\n elif adding_reach_lines and 'Reach finished' in l:\n adding_reach_lines = False\n reach_logs.append(('SUCCEEDED', '\\n'.join(reach_lines)))\n reach_lines = []\n elif adding_reach_lines:\n reach_lines.append(l.split('readers - ')[1])\n log_lines.remove(l)\n if adding_reach_lines:\n reach_logs.append(('FAILURE', '\\n'.join(reach_lines)))\n return '\\n'.join(log_lines), reach_logs", - "docstring": "Get the list of reach logs from the overall logs." - }, - { - "code": "def remove_pad(x, pad_remover, mode):\n x = expert_utils.flatten_all_but_last(x)\n if mode != ModeKeys.PREDICT:\n x = pad_remover.remove(x)\n x = tf.expand_dims(x, axis=0)\n return x", - "docstring": "Remove padding by concatenating all dimension into one.\n\n Args:\n x (tf.Tensor): input of shape [batch_size, length, depth]\n pad_remover (obj): a PadRemover object\n mode (ModeKeys): infer, train or eval. If inference, the padding remover is\n not applied\n\n Returns:\n tf.Tensor of shape [1,length_nonpad,depth] where\n length_nonpad <= batch_size*length" - }, - { - "code": "def _apply_incoming_manipulators(self, son, collection):\n for manipulator in self.__incoming_manipulators:\n son = manipulator.transform_incoming(son, collection)\n return son", - "docstring": "Apply incoming manipulators to `son`." - }, - { - "code": "def module(self):\n if self._module is None:\n root = self\n while self._module is None and root is not None:\n if isinstance(root, Module):\n self._module = root\n else:\n root = root.parent\n return self._module", - "docstring": "Returns the module that this code element belongs to." - }, - { - "code": "def fwhm(x, y, k=10):\n class MultiplePeaks(Exception):\n pass\n class NoPeaksFound(Exception):\n pass\n half_max = np.amax(y) / 2.0\n s = splrep(x, y - half_max)\n roots = sproot(s)\n if len(roots) > 2:\n raise MultiplePeaks(\"The dataset appears to have multiple peaks, and \"\n \"thus the FWHM can't be determined.\")\n elif len(roots) < 2:\n raise NoPeaksFound(\"No proper peaks were found in the data set; likely \"\n \"the dataset is flat (e.g. all zeros).\")\n else:\n return roots[0], roots[1]", - "docstring": "Determine full-with-half-maximum of a peaked set of points, x and y.\n\n Assumes that there is only one peak present in the datasset. The function\n uses a spline interpolation of order k." - }, - { - "code": "def format_close(code: int, reason: str) -> str:\n if 3000 <= code < 4000:\n explanation = \"registered\"\n elif 4000 <= code < 5000:\n explanation = \"private use\"\n else:\n explanation = CLOSE_CODES.get(code, \"unknown\")\n result = f\"code = {code} ({explanation}), \"\n if reason:\n result += f\"reason = {reason}\"\n else:\n result += \"no reason\"\n return result", - "docstring": "Display a human-readable version of the close code and reason." - }, - { - "code": "def _zom_arg(lexer):\n tok = next(lexer)\n if isinstance(tok, COMMA):\n return (_expr(lexer), ) + _zom_arg(lexer)\n else:\n lexer.unpop_token(tok)\n return tuple()", - "docstring": "Return zero or more arguments." - }, - { - "code": "def gatherkeys(args):\n oldmask = os.umask(0o77)\n try:\n try:\n tmpd = tempfile.mkdtemp()\n LOG.info(\"Storing keys in temp directory %s\", tmpd)\n sucess = False\n for host in args.mon:\n sucess = gatherkeys_with_mon(args, host, tmpd)\n if sucess:\n break\n if not sucess:\n LOG.error(\"Failed to connect to host:%s\" ,', '.join(args.mon))\n raise RuntimeError('Failed to connect any mon')\n had_error = False\n date_string = time.strftime(\"%Y%m%d%H%M%S\")\n for keytype in [\"admin\", \"mds\", \"mgr\", \"mon\", \"osd\", \"rgw\"]:\n filename = keytype_path_to(args, keytype)\n tmp_path = os.path.join(tmpd, filename)\n if not os.path.exists(tmp_path):\n LOG.error(\"No key retrived for '%s'\" , keytype)\n had_error = True\n continue\n if not os.path.exists(filename):\n LOG.info(\"Storing %s\" % (filename))\n shutil.move(tmp_path, filename)\n continue\n if _keyring_equivalent(tmp_path, filename):\n LOG.info(\"keyring '%s' already exists\" , filename)\n continue\n backup_keyring = \"%s-%s\" % (filename, date_string)\n LOG.info(\"Replacing '%s' and backing up old key as '%s'\", filename, backup_keyring)\n shutil.copy(filename, backup_keyring)\n shutil.move(tmp_path, filename)\n if had_error:\n raise RuntimeError('Failed to get all key types')\n finally:\n LOG.info(\"Destroy temp directory %s\" %(tmpd))\n shutil.rmtree(tmpd)\n finally:\n os.umask(oldmask)", - "docstring": "Gather keys from any mon and store in current working directory.\n\n Backs up keys from previous installs and stores new keys." - }, - { - "code": "def most_similar(self, word, number=5):\n if self.word_vectors is None:\n raise Exception('Model must be fit before querying')\n if self.dictionary is None:\n raise Exception('No word dictionary supplied')\n try:\n word_idx = self.dictionary[word]\n except KeyError:\n raise Exception('Word not in dictionary')\n return self._similarity_query(self.word_vectors[word_idx], number)[1:]", - "docstring": "Run a similarity query, retrieving number\n most similar words." - }, - { - "code": "def gen_multivalued_slot(self, target_name_base: str, target_type: IRIREF) -> IRIREF:\n list_shape_id = IRIREF(target_name_base + \"__List\")\n if list_shape_id not in self.list_shapes:\n list_shape = Shape(id=list_shape_id, closed=True)\n list_shape.expression = EachOf()\n expressions = [TripleConstraint(predicate=RDF.first, valueExpr=target_type, min=0, max=1)]\n targets = ShapeOr()\n targets.shapeExprs = [(NodeConstraint(values=[RDF.nil])), list_shape_id]\n expressions.append(TripleConstraint(predicate=RDF.rest, valueExpr=targets))\n list_shape.expression.expressions = expressions\n self.shapes.append(list_shape)\n self.list_shapes.append(list_shape_id)\n return list_shape_id", - "docstring": "Generate a shape that represents an RDF list of target_type\n\n @param target_name_base:\n @param target_type:\n @return:" - }, - { - "code": "def add_custom_metadata(self, key, value, meta_type=None):\n self.metadata.append({'key': key, 'value': value, 'type': meta_type})", - "docstring": "Add custom metadata to the Video. meta_type is required for XML API." - }, - { - "code": "def request(\n self, method, path, data=None, files=None, json=None, params=None\n ):\n params = deepcopy(params) or {}\n params[\"raw_json\"] = 1\n if isinstance(data, dict):\n data = deepcopy(data)\n data[\"api_type\"] = \"json\"\n data = sorted(data.items())\n url = urljoin(self._requestor.oauth_url, path)\n return self._request_with_retries(\n data=data,\n files=files,\n json=json,\n method=method,\n params=params,\n url=url,\n )", - "docstring": "Return the json content from the resource at ``path``.\n\n :param method: The request verb. E.g., get, post, put.\n :param path: The path of the request. This path will be combined with\n the ``oauth_url`` of the Requestor.\n :param data: Dictionary, bytes, or file-like object to send in the body\n of the request.\n :param files: Dictionary, mapping ``filename`` to file-like object.\n :param json: Object to be serialized to JSON in the body of the\n request.\n :param params: The query parameters to send with the request.\n\n Automatically refreshes the access token if it becomes invalid and a\n refresh token is available. Raises InvalidInvocation in such a case if\n a refresh token is not available." - }, - { - "code": "def send_request(self, request, response=None):\n local_response = None\n if response is None:\n if request.request_type == \"json\":\n local_response = ResponseJson()\n elif request.request_type == \"xml\":\n local_response = ResponseXml()\n else:\n raise UnknownRequestType()\n try:\n server_request = ur.urlopen(\n self.url,\n request.get_request().encode(\"utf-8\"),\n self.timeout\n )\n server_response = server_request.read()\n if isinstance(server_response, bytes):\n server_response = server_response.decode(\"utf-8\")\n if response is None:\n local_response.set_response(\n server_response\n )\n else:\n response.set_response(server_response)\n except ue.HTTPError as e:\n if e.code == 500:\n server_response = e.fp.read()\n if isinstance(server_response, bytes):\n server_response = server_response.decode(\"utf-8\")\n if response is None:\n local_response.set_response(server_response)\n else:\n response.set_response(server_response)\n else:\n raise e\n if response is None:\n return local_response", - "docstring": "Send the request.\n\n Sends the request and retrieves the results, formats them and returns\n them in a dict or a list (when it's a batchresponse). If something\n goes wrong, raises a SoapFailure or a HTTPError on system-side\n failures. Note: AuthRequest raises an HTTPError on failed\n authentications!\n\n :param request: The request to send\n :type request: pythonzimbra.request.Request\n :param response: A prebuilt response object\n :type response: pythonzimbra.response.Response\n :raises: pythonzimbra.exceptions.communication.SoapFailure or\n urllib2.HTTPError" - }, - { - "code": "def show_instance(name, session=None, call=None):\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n log.debug('show_instance-> name: %s session: %s', name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record['is_a_template'] and not record['is_control_domain']:\n try:\n base_template_name = record['other_config']['base_template_name']\n except Exception:\n base_template_name = None\n log.debug(\n 'VM %s, doesnt have base_template_name attribute',\n record['name_label']\n )\n ret = {'id': record['uuid'],\n 'image': base_template_name,\n 'name': record['name_label'],\n 'size': record['memory_dynamic_max'],\n 'state': record['power_state'],\n 'private_ips': get_vm_ip(name, session),\n 'public_ips': None}\n __utils__['cloud.cache_node'](\n ret,\n __active_provider_name__,\n __opts__\n )\n return ret", - "docstring": "Show information about a specific VM or template\n\n .. code-block:: bash\n\n salt-cloud -a show_instance xenvm01\n\n .. note:: memory is memory_dynamic_max" - }, - { - "code": "def _change_property_with_validity_check(self, property_name, value):\n assert isinstance(property_name, string_types)\n old_value = getattr(self, property_name)\n setattr(self, property_name, value)\n valid, message = self._check_validity()\n if not valid:\n setattr(self, property_name, old_value)\n class_name = self.__class__.__name__\n raise ValueError(\"The {2}'s '{0}' could not be changed: {1}\".format(property_name[1:], message, class_name))", - "docstring": "Helper method to change a property and reset it if the validity check fails\n\n :param str property_name: The name of the property to be changed, e.g. '_data_flow_id'\n :param value: The new desired value for this property\n :raises exceptions.ValueError: if a property could not be changed" - }, - { - "code": "def selectlt(table, field, value, complement=False):\n value = Comparable(value)\n return selectop(table, field, value, operator.lt, complement=complement)", - "docstring": "Select rows where the given field is less than the given value." - }, - { - "code": "def ParseRow(header,\n row):\n precondition.AssertDictType(row, Text, Text)\n result = rdf_osquery.OsqueryRow()\n for column in header.columns:\n result.values.append(row[column.name])\n return result", - "docstring": "Parses a single row of osquery output.\n\n Args:\n header: A parsed header describing the row format.\n row: A row in a \"parsed JSON\" representation.\n\n Returns:\n A parsed `rdf_osquery.OsqueryRow` instance." - }, - { - "code": "def _number_finder(s, regex, numconv):\n s = regex.split(s)\n if len(s) == 1:\n return tuple(s)\n s = remove_empty(s)\n for i in range(len(s)):\n try:\n s[i] = numconv(s[i])\n except ValueError:\n pass\n if not isinstance(s[0], six.string_types):\n return [''] + s\n else:\n return s", - "docstring": "Helper to split numbers" - }, - { - "code": "def blockwise_inner_join(data, left, foreign_key, right,\n force_repeat=None,\n foreign_key_name=None):\n if isinstance(foreign_key, string_types):\n foreign_key = data[foreign_key]\n return _blockwise_inner_join(data, left, foreign_key, right,\n force_repeat, foreign_key_name)", - "docstring": "Perform a blockwise inner join.\n\n Perform a blockwise inner join from names specified in ``left`` to\n ``right`` via ``foreign_key``: left->foreign_key->right.\n\n Parameters\n ----------\n data : array\n A structured NumPy array.\n left : array\n Array of left side column names.\n foreign_key : array or string\n NumPy array or string ``foreign_key`` column name. This column can be\n either an integer or an array of ints. If ``foreign_key`` is an array\n of int column, left column will be treated according to left column\n type:\n\n * Scalar columns or columns in ``force_repeat`` will be repeated\n * Array columns not in ``force_repeat`` will be assumed to the\n same length as ``foreign_key`` and will be stretched by index\n right : array\n Array of right side column names. These are array columns that each\n index ``foreign_key`` points to. These columns are assumed to have the\n same length.\n force_repeat : array, optional (default=None)\n Array of left column names that will be forced to stretch even if it's\n an array (useful when you want to emulate a multiple join).\n foreign_key_name : str, optional (default=None)\n The name of foreign key column in the output array.\n\n Examples\n --------\n >>> import numpy as np\n >>> from root_numpy import blockwise_inner_join\n >>> test_data = np.array([\n (1.0, np.array([11, 12, 13]), np.array([1, 0, 1]), 0, np.array([1, 2, 3])),\n (2.0, np.array([21, 22, 23]), np.array([-1, 2, -1]), 1, np.array([31, 32, 33]))],\n dtype=[('sl', np.float), ('al', 'O'), ('fk', 'O'), ('s_fk', np.int), ('ar', 'O')])\n\n >>> blockwise_inner_join(test_data, ['sl', 'al'], test_data['fk'], ['ar'])\n array([(1.0, 11, 2, 1), (1.0, 12, 1, 0), (1.0, 13, 2, 1), (2.0, 22, 33, 2)],\n dtype=[('sl', '>> blockwise_inner_join(test_data, ['sl', 'al'], test_data['fk'], ['ar'], force_repeat=['al'])\n array([(1.0, [11, 12, 13], 2, 1), (1.0, [11, 12, 13], 1, 0),\n (1.0, [11, 12, 13], 2, 1), (2.0, [21, 22, 23], 33, 2)],\n dtype=[('sl', ' datetime.timedelta(seconds=seconds):\n raise InvalidArguments(until, since)\n kwargs['since'] = since.isoformat()\n kwargs['until'] = until.isoformat()\n return getattr(Entity, 'find').__func__(cls, *args, **kwargs)", - "docstring": "Find notifications.\n\n Optional kwargs are:\n since:\n datetime instance\n until:\n datetime instance\n\n If not specified, until will default to now(), and since will default\n to 30 days prior to until.\n\n As per PD spec, date range must not exceed 1 month." - }, - { - "code": "def getbugs(self, idlist,\n include_fields=None, exclude_fields=None, extra_fields=None,\n permissive=True):\n data = self._getbugs(idlist, include_fields=include_fields,\n exclude_fields=exclude_fields, extra_fields=extra_fields,\n permissive=permissive)\n return [(b and Bug(self, dict=b,\n autorefresh=self.bug_autorefresh)) or None\n for b in data]", - "docstring": "Return a list of Bug objects with the full complement of bug data\n already loaded. If there's a problem getting the data for a given id,\n the corresponding item in the returned list will be None." - }, - { - "code": "def get_iss_info(self):\n self.write_data([self.ISS_CMD, self.ISS_VERSION])\n response = self.read_data(3)\n if len(response) == 3:\n response = self.decode(response)\n self.module = response[0]\n self.firmware = response[1]\n self._mode = response[2]\n else:\n raise USBISSError(\"Could not get version details\")", - "docstring": "Get information about the USB-ISS\n\n Querying will return three bytes;\n - the module ID (7),\n - firmware version (currently 2),\n - the current operating mode." - }, - { - "code": "def get_desktop_env():\n desktop = os.environ.get(\"XDG_CURRENT_DESKTOP\")\n if desktop:\n return desktop\n desktop = os.environ.get(\"DESKTOP_SESSION\")\n if desktop:\n return desktop\n desktop = os.environ.get(\"GNOME_DESKTOP_SESSION_ID\")\n if desktop:\n return \"GNOME\"\n desktop = os.environ.get(\"MATE_DESKTOP_SESSION_ID\")\n if desktop:\n return \"MATE\"\n desktop = os.environ.get(\"SWAYSOCK\")\n if desktop:\n return \"SWAY\"\n desktop = os.environ.get(\"DESKTOP_STARTUP_ID\")\n if desktop and \"awesome\" in desktop:\n return \"AWESOME\"\n return None", - "docstring": "Identify the current running desktop environment." - }, - { - "code": "def disable_tracing_hostname(url, blacklist_hostnames=None):\n if blacklist_hostnames is None:\n _tracer = execution_context.get_opencensus_tracer()\n try:\n blacklist_hostnames = [\n '{}:{}'.format(\n _tracer.exporter.host_name,\n _tracer.exporter.port\n )\n ]\n except(AttributeError):\n blacklist_hostnames = []\n return url in blacklist_hostnames", - "docstring": "Disable tracing for the provided blacklist URLs, by default not tracing\n the exporter url.\n\n If the url path starts with the blacklisted path, return True.\n\n :type blacklist_hostnames: list\n :param blacklist_hostnames: URL that not tracing.\n\n :rtype: bool\n :returns: True if not tracing, False if tracing." - }, - { - "code": "def line_plot(self, x='year', y='value', **kwargs):\n df = self.as_pandas(with_metadata=kwargs)\n variables = df['variable'].unique()\n if x in variables or y in variables:\n keep_vars = set([x, y]) & set(variables)\n df = df[df['variable'].isin(keep_vars)]\n idx = list(set(df.columns) - set(['value']))\n df = (df\n .reset_index()\n .set_index(idx)\n .value\n .unstack(level='variable')\n .rename_axis(None, axis=1)\n .reset_index()\n .set_index(META_IDX)\n )\n if x != 'year' and y != 'year':\n df = df.drop('year', axis=1)\n ax, handles, labels = plotting.line_plot(\n df.dropna(), x=x, y=y, **kwargs)\n return ax", - "docstring": "Plot timeseries lines of existing data\n\n see pyam.plotting.line_plot() for all available options" - }, - { - "code": "def expose(rule, **options):\n def decorator(f):\n if not hasattr(f, \"urls\"):\n f.urls = []\n if isinstance(rule, (list, tuple)):\n f.urls.extend(rule)\n else:\n f.urls.append((rule, options))\n return f\n return decorator", - "docstring": "Decorator to add an url rule to a function" - }, - { - "code": "def send_text(self, text, **options):\n return self.bot.send_message(self.id, text, **options)", - "docstring": "Send a text message to the chat.\n\n :param str text: Text of the message to send\n :param options: Additional sendMessage options (see\n https://core.telegram.org/bots/api#sendmessage" - }, - { - "code": "def cancel(self, *args, **kwargs):\n super().cancel()\n for future in self.traverse():\n if not future.cancelled():\n future.cancel()", - "docstring": "Manually cancel all tasks assigned to this event loop." - }, - { - "code": "def drop(self, labels=None, axis=0, index=None, columns=None,\n level=None, inplace=False, errors='raise'):\n return super().drop(labels=labels, axis=axis, index=index,\n columns=columns, level=level, inplace=inplace,\n errors=errors)", - "docstring": "Drop specified labels from rows or columns.\n\n Remove rows or columns by specifying label names and corresponding\n axis, or by specifying directly index or column names. When using a\n multi-index, labels on different levels can be removed by specifying\n the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index or column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Whether to drop labels from the index (0 or 'index') or\n columns (1 or 'columns').\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is equivalent to ``index=labels``).\n\n .. versionadded:: 0.21.0\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n .. versionadded:: 0.21.0\n level : int or level name, optional\n For MultiIndex, level from which the labels will be removed.\n inplace : bool, default False\n If True, do operation inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are\n dropped.\n\n Returns\n -------\n DataFrame\n DataFrame without the removed index or column labels.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis.\n\n See Also\n --------\n DataFrame.loc : Label-location based indexer for selection by label.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n DataFrame.drop_duplicates : Return DataFrame with duplicate rows\n removed, optionally only considering certain columns.\n Series.drop : Return Series with specified index labels removed.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n Drop columns and/or rows of MultiIndex DataFrame\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],\n ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],\n ... [250, 150], [1.5, 0.8], [320, 250],\n ... [1, 0.8], [0.3, 0.2]])\n >>> df\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n length 1.5 1.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n length 1.5 0.8\n falcon speed 320.0 250.0\n weight 1.0 0.8\n length 0.3 0.2\n\n >>> df.drop(index='cow', columns='small')\n big\n lama speed 45.0\n weight 200.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n\n >>> df.drop(index='length', level=1)\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n falcon speed 320.0 250.0\n weight 1.0 0.8" - }, - { - "code": "def update_forum_votes(sender, **kwargs):\r\n vote = kwargs['instance']\r\n if vote.content_type.app_label != \"fretboard\":\r\n return\r\n if vote.content_type.model == \"topic\":\r\n t = get_model('fretboard', 'Topic').objects.get(id=vote.object.id)\r\n t.votes = t.score()\r\n t.save(update_fields=['votes'])\r\n elif vote.content_type.model == \"post\":\r\n p = get_model('fretboard', 'Post').objects.get(id=vote.object.id)\r\n p.votes = p.score()\r\n p.save(update_fields=['votes'])", - "docstring": "When a Vote is added, re-saves the topic or post to update vote count.\r\n Since Votes can be assigned\r\n to any content type, first makes sure we are dealing with a forum post or topic.\r\n\r\n Deprecated 1-6-14 by storing score as cached property" - }, - { - "code": "def add_role(ctx, role):\n if role is None:\n log('Specify the role with --role')\n return\n if ctx.obj['username'] is None:\n log('Specify the username with --username')\n return\n change_user = ctx.obj['db'].objectmodels['user'].find_one({\n 'name': ctx.obj['username']\n })\n if role not in change_user.roles:\n change_user.roles.append(role)\n change_user.save()\n log('Done')\n else:\n log('User already has that role!', lvl=warn)", - "docstring": "Grant a role to an existing user" - }, - { - "code": "def randomize_es(es_queryset):\n return es_queryset.query(\n query.FunctionScore(\n functions=[function.RandomScore()]\n )\n ).sort(\"-_score\")", - "docstring": "Randomize an elasticsearch queryset." - }, - { - "code": "def recover_async(self, requeue=False):\n args = Writer()\n args.write_bit(requeue)\n self.send_frame(MethodFrame(self.channel_id, 60, 100, args))", - "docstring": "Redeliver all unacknowledged messages on this channel.\n\n This method is deprecated in favour of the synchronous\n recover/recover-ok" - }, - { - "code": "def get_assessment_part_form(self, *args, **kwargs):\n if isinstance(args[-1], list) or 'assessment_part_record_types' in kwargs:\n return self.get_assessment_part_form_for_create(*args, **kwargs)\n else:\n return self.get_assessment_part_form_for_update(*args, **kwargs)", - "docstring": "Pass through to provider AssessmentPartAdminSession.get_assessment_part_form_for_update" - }, - { - "code": "def quotify(x):\n def _quotify(key, value, fmt, meta):\n if key == 'Quoted':\n ret = []\n quote = '\"' if value[0]['t'] == 'DoubleQuote' else \"'\"\n if value[1][0]['t'] == 'Str':\n value[1][0]['c'] = quote + value[1][0]['c']\n else:\n ret.append(Str(quote))\n if value[1][-1]['t'] == 'Str':\n value[1][-1]['c'] = value[1][-1]['c'] + quote\n ret += value[1]\n else:\n ret += value[1] + [Str(quote)]\n return ret\n return None\n return walk(walk(x, _quotify, '', {}), join_strings, '', {})", - "docstring": "Replaces Quoted elements in element list 'x' with quoted strings.\n\n Pandoc uses the Quoted element in its json when --smart is enabled.\n Output to TeX/pdf automatically triggers --smart.\n\n stringify() ignores Quoted elements. Use quotify() first to replace\n Quoted elements in 'x' with quoted strings. 'x' should be a deep copy so\n that the underlying document is left untouched.\n\n Returns x." - }, - { - "code": "def line(self, x1, y1, x2, y2, color=\"black\", width=1):\n return self.tk.create_line(\n x1, y1, x2, y2, \n width = width,\n fill = \"\" if color is None else utils.convert_color(color)\n )", - "docstring": "Draws a line between 2 points\n\n :param int x1:\n The x position of the starting point.\n\n :param int y1:\n The y position of the starting point.\n\n :param int x2:\n The x position of the end point.\n\n :param int y2:\n The y position of the end point.\n\n :param str color:\n The color of the line. Defaults to `\"black\"`.\n\n :param int width:\n The width of the line. Defaults to `1`.\n\n :return:\n The id of the line." - }, - { - "code": "def combine(self, other):\n for term in other.metadata.keys():\n if term not in self.metadata:\n self.metadata[term] = {}\n fields = other.metadata[term].keys()\n for field in fields:\n if field not in self.metadata[term]:\n self.metadata[term][field] = {}\n keys = other.metadata[term][field].keys()\n for key in keys:\n if key not in self.metadata[term][field]:\n self.metadata[term][field][key] = other.metadata[term][field][\n key\n ]\n else:\n self.metadata[term][field][key].extend(\n other.metadata[term][field][key]\n )", - "docstring": "An instance of lunr.MatchData will be created for every term that\n matches a document.\n\n However only one instance is required in a lunr.Index~Result. This\n method combines metadata from another instance of MatchData with this\n object's metadata." - }, - { - "code": "def configure(self, args):\n for plug in self._plugins:\n plug_name = self.plugin_name(plug)\n plug.enabled = getattr(args, \"plugin_%s\" % plug_name, False)\n if plug.enabled and getattr(plug, \"configure\", None):\n if callable(getattr(plug, \"configure\", None)):\n plug.configure(args)\n LOG.debug(\"Available plugins: %s\", self._plugins)\n self.plugins = [plugin for plugin in self._plugins if getattr(plugin, \"enabled\", False)]\n LOG.debug(\"Enabled plugins: %s\", self.plugins)", - "docstring": "Configure the set of plugins with the given args.\n\n After configuration, disabled plugins are removed from the plugins list." - }, - { - "code": "def downsample_with_averaging(array, factor):\n factor = tuple(factor)\n output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor))\n temp = np.zeros(output_shape, dtype=np.float32)\n counts = np.zeros(output_shape, np.int)\n for offset in np.ndindex(factor):\n part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]\n indexing_expr = tuple(np.s_[:s] for s in part.shape)\n temp[indexing_expr] += part\n counts[indexing_expr] += 1\n return np.cast[array.dtype](temp / counts)", - "docstring": "Downsample x by factor using averaging.\n\n @return: The downsampled array, of the same type as x." - }, - { - "code": "def process_metadata(meta):\n mvgds = []\n metadata = meta[0]\n for mvgd in meta:\n if isinstance(mvgd['mv_grid_districts'], list):\n mvgds.extend(mvgd['mv_grid_districts'])\n else:\n mvgds.append(mvgd['mv_grid_districts'])\n metadata['mv_grid_districts'] = mvgds\n return metadata", - "docstring": "Merge metadata of run on multiple grid districts\n\n Parameters\n ----------\n meta: list of dict\n Metadata of run of each MV grid district\n\n Returns\n -------\n dict\n Single metadata dict including merge metadata" - }, - { - "code": "def create_child_folder(self, folder_name):\n if not folder_name:\n return None\n if self.root:\n url = self.build_url(self._endpoints.get('root_folders'))\n else:\n url = self.build_url(\n self._endpoints.get('child_folders').format(id=self.folder_id))\n response = self.con.post(url,\n data={self._cc('displayName'): folder_name})\n if not response:\n return None\n folder = response.json()\n return self.__class__(parent=self, **{self._cloud_data_key: folder})", - "docstring": "Creates a new child folder\n\n :param str folder_name: name of the new folder to create\n :return: newly created folder\n :rtype: ContactFolder or None" - }, - { - "code": "def _parse_signal_lines(signal_lines):\n n_sig = len(signal_lines)\n signal_fields = {}\n for field in SIGNAL_SPECS.index:\n signal_fields[field] = n_sig * [None]\n for ch in range(n_sig):\n (signal_fields['file_name'][ch], signal_fields['fmt'][ch],\n signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch],\n signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch],\n signal_fields['baseline'][ch], signal_fields['units'][ch],\n signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch],\n signal_fields['init_value'][ch], signal_fields['checksum'][ch],\n signal_fields['block_size'][ch],\n signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0]\n for field in SIGNAL_SPECS.index:\n if signal_fields[field][ch] == '':\n signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default']\n if field == 'baseline' and signal_fields['adc_zero'][ch] != '':\n signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch])\n else:\n if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types:\n signal_fields[field][ch] = int(signal_fields[field][ch])\n elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types:\n signal_fields[field][ch] = float(signal_fields[field][ch])\n if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0:\n signal_fields['adc_gain'][ch] = 200.\n return signal_fields", - "docstring": "Extract fields from a list of signal line strings into a dictionary." - }, - { - "code": "def t(inspect_packages=False, depth=0, **kwargs):\n try:\n frames = inspect.stack()\n kwargs[\"frames\"] = frames\n kwargs[\"inspect_packages\"] = inspect_packages\n kwargs[\"depth\"] = depth\n with Reflect.context(**kwargs) as r:\n instance = T_CLASS(r, stream, **kwargs)\n instance()\n finally:\n del frames", - "docstring": "print a backtrace\n\n since -- 7-6-12\n\n inpsect_packages -- boolean -- by default, this only prints code of packages that are not \n in the pythonN directories, that cuts out a lot of the noise, set this to True if you\n want a full stacktrace\n depth -- integer -- how deep you want the stack trace to print (ie, if you only care about\n the last three calls, pass in depth=3 so you only get the last 3 rows of the stack)" - }, - { - "code": "def dump(self):\n if self.running_config.output_file:\n _, extension = op.splitext(self.running_config.output_file)\n extension = extension.replace(\".\", \"\")\n if extension not in self.ALLOWED_DUMP_FORMATS:\n raise PCException(\n f\"Extension of dump file is not available. \"\n f\"Allowed extensions are: \"\n f\"{', '.join(self.ALLOWED_DUMP_FORMATS)}\")\n with open(self.running_config.output_file, \"w\") as f:\n if extension == \"csv\":\n csv_writer = csv.writer(f)\n csv_writer.writerow((\"\n \"CPE\",\n \"CVE\",\n \"Score\",\n \"Summary\"))\n csv_writer.writerows(self._to_csv())\n elif extension == \"json\":\n json.dump(self.results,\n f,\n indent=4,\n sort_keys=True)\n elif extension == \"raw\":\n f.write(self._to_table())", - "docstring": "Dump to file" - }, - { - "code": "def fromgroups(args):\n from jcvi.formats.bed import Bed\n p = OptionParser(fromgroups.__doc__)\n opts, args = p.parse_args(args)\n if len(args) < 2:\n sys.exit(not p.print_help())\n groupsfile = args[0]\n bedfiles = args[1:]\n beds = [Bed(x) for x in bedfiles]\n fp = open(groupsfile)\n groups = [row.strip().split(\",\") for row in fp]\n for b1, b2 in product(beds, repeat=2):\n extract_pairs(b1, b2, groups)", - "docstring": "%prog fromgroups groupsfile a.bed b.bed ...\n\n Flatten the gene familes into pairs, the groupsfile is a file with each line\n containing the members, separated by comma. The commands also require\n several bed files in order to sort the pairs into different piles (e.g.\n pairs of species in comparison." - }, - { - "code": "def _add_loss_summaries(total_loss):\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n for l in losses + [total_loss]:\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n return loss_averages_op", - "docstring": "Add summaries for losses in CIFAR-10 model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses." - }, - { - "code": "def set_dimensional_calibrations(self, dimensional_calibrations: typing.List[CalibrationModule.Calibration]) -> None:\n self.__data_item.set_dimensional_calibrations(dimensional_calibrations)", - "docstring": "Set the dimensional calibrations.\n\n :param dimensional_calibrations: A list of calibrations, must match the dimensions of the data.\n\n .. versionadded:: 1.0\n\n Scriptable: Yes" - }, - { - "code": "def update_uri(cls, uri):\n logger.debug('Request: Update URI to {0}'.format(uri))\n local_store.instance.base_url = uri", - "docstring": "Set the URI of the StackInABox framework.\n\n :param uri: the base URI used to match the service." - }, - { - "code": "def time_track(self, absolute_time=False, accuracy='ns'):\n try:\n increment = self.property('wf_increment')\n offset = self.property('wf_start_offset')\n except KeyError:\n raise KeyError(\"Object does not have time properties available.\")\n periods = len(self._data)\n relative_time = np.linspace(\n offset,\n offset + (periods - 1) * increment,\n periods)\n if not absolute_time:\n return relative_time\n try:\n start_time = self.property('wf_start_time')\n except KeyError:\n raise KeyError(\n \"Object does not have start time property available.\")\n try:\n unit_correction = {\n 's': 1e0,\n 'ms': 1e3,\n 'us': 1e6,\n 'ns': 1e9,\n }[accuracy]\n except KeyError:\n raise KeyError(\"Invalid accuracy: {0}\".format(accuracy))\n time_type = \"timedelta64[{0}]\".format(accuracy)\n return (np.datetime64(start_time) +\n (relative_time * unit_correction).astype(time_type))", - "docstring": "Return an array of time or the independent variable for this channel\n\n This depends on the object having the wf_increment\n and wf_start_offset properties defined.\n Note that wf_start_offset is usually zero for time-series data.\n If you have time-series data channels with different start times,\n you should use the absolute time or calculate the time offsets using\n the wf_start_time property.\n\n For larger timespans, the accuracy setting should be set lower.\n The default setting is 'ns', which has a timespan of\n [1678 AD, 2262 AD]. For the exact ranges, refer to\n http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html\n section \"Datetime Units\".\n\n :param absolute_time: Whether the returned time values are absolute\n times rather than relative to the start time. If true, the\n wf_start_time property must be set.\n :param accuracy: The accuracy of the returned datetime64 array.\n :rtype: NumPy array.\n :raises: KeyError if required properties aren't found" - }, - { - "code": "def check_missing(self, args):\n return [opt.name for opt in self\n if (opt.name not in args) and (opt.default is None)]", - "docstring": "Returns the names of all options that are required but were not specified.\n\n All options that don't have a default value are required in order to run the\n workflow.\n\n Args:\n args (dict): A dictionary of the provided arguments that is checked for\n missing options.\n\n Returns:\n list: A list with the names of the options that are missing from the\n provided arguments." - }, - { - "code": "def findLorenzDistanceAtTargetKY(Economy,param_name,param_count,center_range,spread,dist_type):\n intermediateObjective = lambda center : getKYratioDifference(Economy = Economy,\n param_name = param_name,\n param_count = param_count,\n center = center,\n spread = spread,\n dist_type = dist_type)\n optimal_center = brentq(intermediateObjective,center_range[0],center_range[1],xtol=10**(-6))\n Economy.center_save = optimal_center\n Economy(LorenzBool = True)\n Economy.distributeParams(param_name,param_count,optimal_center,spread,dist_type)\n Economy.solveAgents()\n Economy.makeHistory()\n dist = Economy.calcLorenzDistance()\n Economy(LorenzBool = False)\n print ('findLorenzDistanceAtTargetKY tried spread = ' + str(spread) + ' and got ' + str(dist))\n return dist", - "docstring": "Finds the sum of squared distances between simulated and target Lorenz points in an economy when\n a given parameter has heterogeneity according to some distribution. The class of distribution\n and a measure of spread are given as inputs, but the measure of centrality such that the capital\n to income ratio matches the target ratio must be found.\n\n Parameters\n ----------\n Economy : cstwMPCmarket\n An object representing the entire economy, containing the various AgentTypes as an attribute.\n param_name : string\n The name of the parameter of interest that varies across the population.\n param_count : int\n The number of different values the parameter of interest will take on.\n center_range : [float,float]\n Bounding values for a measure of centrality for the distribution of the parameter of interest.\n spread : float\n A measure of spread or diffusion for the distribution of the parameter of interest.\n dist_type : string\n The type of distribution to be used. Can be \"lognormal\" or \"uniform\" (can expand).\n\n Returns\n -------\n dist : float\n Sum of squared distances between simulated and target Lorenz points for this economy (sqrt)." - }, - { - "code": "def get_grade_systems_by_gradebooks(self, gradebook_ids):\n grade_system_list = []\n for gradebook_id in gradebook_ids:\n grade_system_list += list(\n self.get_grade_systems_by_gradebook(gradebook_id))\n return objects.GradeSystemList(grade_system_list)", - "docstring": "Gets the list of grade systems corresponding to a list of ``Gradebooks``.\n\n arg: gradebook_ids (osid.id.IdList): list of gradebook\n ``Ids``\n return: (osid.grading.GradeSystemList) - list of grade systems\n raise: NullArgument - ``gradebook_ids`` is ``null``\n raise: OperationFailed - unable to complete request\n raise: PermissionDenied - authorization failure\n *compliance: mandatory -- This method must be implemented.*" - }, - { - "code": "def wrap_error(\n self, data, renderer_context, keys_are_fields, issue_is_title):\n response = renderer_context.get(\"response\", None)\n status_code = str(response and response.status_code)\n errors = []\n for field, issues in data.items():\n if isinstance(issues, six.string_types):\n issues = [issues]\n for issue in issues:\n error = self.dict_class()\n error[\"status\"] = status_code\n if issue_is_title:\n error[\"title\"] = issue\n else:\n error[\"detail\"] = issue\n if keys_are_fields:\n if field in ('non_field_errors', NON_FIELD_ERRORS):\n error[\"path\"] = '/-'\n else:\n error[\"path\"] = '/' + field\n errors.append(error)\n wrapper = self.dict_class()\n wrapper[\"errors\"] = errors\n return wrapper", - "docstring": "Convert error native data to the JSON API Error format\n\n JSON API has a different format for errors, but Django REST Framework\n doesn't have a separate rendering path for errors. This results in\n some guesswork to determine if data is an error, what kind, and how\n to handle it.\n\n As of August 2014, there is not a consensus about the error format in\n JSON API. The format documentation defines an \"errors\" collection, and\n some possible fields for that collection, but without examples for\n common cases. If and when consensus is reached, this format will\n probably change." - }, - { - "code": "def _find_statement_by_line(node, line):\n if isinstance(node, (nodes.ClassDef, nodes.FunctionDef)):\n node_line = node.fromlineno\n else:\n node_line = node.lineno\n if node_line == line:\n return node\n for child in node.get_children():\n result = _find_statement_by_line(child, line)\n if result:\n return result\n return None", - "docstring": "Extracts the statement on a specific line from an AST.\n\n If the line number of node matches line, it will be returned;\n otherwise its children are iterated and the function is called\n recursively.\n\n :param node: An astroid node.\n :type node: astroid.bases.NodeNG\n :param line: The line number of the statement to extract.\n :type line: int\n :returns: The statement on the line, or None if no statement for the line\n can be found.\n :rtype: astroid.bases.NodeNG or None" - }, - { - "code": "def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name,\n container_name=None):\n if container_name is None:\n container_name = container_group_name\n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourcegroups/', resource_group,\n '/providers/Microsoft.ContainerInstance/ContainerGroups/',\n container_group_name,\n '/containers/', container_name, '/logs?api-version=', CONTAINER_API])\n return do_get(endpoint, access_token)", - "docstring": "Get the container logs for containers in a container group.\n\n Args:\n access_token (str): A valid Azure authentication token.\n subscription_id (str): Azure subscription id.\n resource_group (str): Azure resource group name.\n container_group_name (str): Name of container instance group.\n container_name (str): Optional name of a container in the group.\n\n Returns:\n HTTP response. Container logs." - }, - { - "code": "def integer_squareroot(value: int) -> int:\n if not isinstance(value, int) or isinstance(value, bool):\n raise ValueError(\n \"Value must be an integer: Got: {0}\".format(\n type(value),\n )\n )\n if value < 0:\n raise ValueError(\n \"Value cannot be negative: Got: {0}\".format(\n value,\n )\n )\n with decimal.localcontext() as ctx:\n ctx.prec = 128\n return int(decimal.Decimal(value).sqrt())", - "docstring": "Return the integer square root of ``value``.\n\n Uses Python's decimal module to compute the square root of ``value`` with\n a precision of 128-bits. The value 128 is chosen since the largest square\n root of a 256-bit integer is a 128-bit integer." - }, - { - "code": "def _link_policy(self, role):\n policy_arn = self.get_policy_arn()\n if role is not None and policy_arn not in role.ManagedPolicyArns:\n role.ManagedPolicyArns.append(policy_arn)", - "docstring": "If this source triggers a Lambda function whose execution role is auto-generated by SAM, add the\n appropriate managed policy to this Role.\n\n :param model.iam.IAMROle role: the execution role generated for the function" - }, - { - "code": "def visit_pass(self, node, parent):\n return nodes.Pass(node.lineno, node.col_offset, parent)", - "docstring": "visit a Pass node by returning a fresh instance of it" - }, - { - "code": "def covered_interval(bin):\n if bin < 0 or bin > MAX_BIN:\n raise OutOfRangeError(\n 'Invalid bin number %d (maximum bin number is %d)'\n % (bin, MAX_BIN))\n shift = SHIFT_FIRST\n for offset in BIN_OFFSETS:\n if offset <= bin:\n return bin - offset << shift, bin + 1 - offset << shift\n shift += SHIFT_NEXT", - "docstring": "Given a bin number `bin`, return the interval covered by this bin.\n\n :arg int bin: Bin number.\n\n :return: Tuple of `start, stop` being the zero-based, open-ended interval\n covered by `bin`.\n :rtype: tuple(int)\n\n :raise OutOfRangeError: If bin number `bin` exceeds the maximum bin\n number." - }, - { - "code": "def get_shape(self, ds_id, ds_info):\n var_path = ds_info['file_key']\n shape = self[var_path + '/shape']\n if ((ds_info.get('standard_name') == \"longitude\" or ds_info.get('standard_name') == \"latitude\") and\n ds_id.resolution == 10000):\n return shape[0], int(shape[1] / 2)\n return shape", - "docstring": "Get output shape of specified dataset." - }, - { - "code": "def resource_as_xml(self, resource):\n e = self.resource_etree_element(resource)\n if (sys.version_info >= (3, 0)):\n return(tostring(e, encoding='unicode', method='xml'))\n elif (sys.version_info >= (2, 7)):\n s = tostring(e, encoding='UTF-8', method='xml')\n else:\n s = tostring(e, encoding='UTF-8')\n return(s.replace(\"\\n\", ''))", - "docstring": "Return string for the resource as part of an XML sitemap.\n\n Returns a string with the XML snippet representing the resource,\n without any XML declaration." - }, - { - "code": "def profile_update_args_v2(self, profile):\n ij = self.load_install_json(profile.get('install_json', 'install.json'))\n if (\n profile.get('args', {}).get('app') is None\n and profile.get('args', {}).get('default') is None\n ):\n _args = profile.pop('args')\n profile['args'] = {}\n profile['args']['app'] = {}\n profile['args']['default'] = {}\n for arg in self.profile_settings_args_install_json(ij, None):\n try:\n profile['args']['app'][arg] = _args.pop(arg)\n except KeyError:\n if self.args.verbose:\n print(\n '{}{}Input \"{}\" not found in profile \"{}\".'.format(\n c.Style.BRIGHT, c.Fore.YELLOW, arg, profile.get('profile_name')\n )\n )\n profile['args']['default'] = _args\n print(\n '{}{}Updating args section to v2 schema for profile {}.'.format(\n c.Style.BRIGHT, c.Fore.YELLOW, profile.get('profile_name')\n )\n )", - "docstring": "Update v1 profile args to v2 schema for args.\n\n .. code-block:: javascript\n\n \"args\": {\n \"app\": {\n \"input_strings\": \"capitalize\",\n \"tc_action\": \"Capitalize\"\n }\n },\n \"default\": {\n \"api_access_id\": \"$env.API_ACCESS_ID\",\n \"api_default_org\": \"$env.API_DEFAULT_ORG\",\n },\n\n Args:\n profile (dict): The dictionary containting the profile settings." - }, - { - "code": "def starts_with(self, other: 'Key') -> bool:\n if (self.key_type, self.identity, self.group) != (other.key_type, other.identity,\n other.group):\n return False\n if self.key_type == KeyType.TIMESTAMP:\n return True\n if self.key_type == KeyType.DIMENSION:\n if len(self.dimensions) < len(other.dimensions):\n return False\n return self.dimensions[0:len(other.dimensions)] == other.dimensions", - "docstring": "Checks if this key starts with the other key provided. Returns False if key_type, identity\n or group are different.\n For `KeyType.TIMESTAMP` returns True.\n For `KeyType.DIMENSION` does prefix match between the two dimensions property." - }, - { - "code": "def file_save(self, name, filename=None, folder=\"\", keep_ext=True) -> bool:\r\n if name in self.files:\r\n file_object = self.files[name]\r\n clean_filename = secure_filename(file_object.filename)\r\n if filename is not None and keep_ext:\r\n clean_filename = filename + \".%s\" % \\\r\n (clean_filename.rsplit('.', 1)[1].lower())\r\n elif filename is not None and not keep_ext:\r\n clean_filename = filename\r\n file_object.save(os.path.join(\r\n current_app.config['UPLOADS']['FOLDER'],\r\n folder, clean_filename))\r\n return None", - "docstring": "Easy save of a file" - }, - { - "code": "def process_checkpoint(self, msg: Checkpoint, sender: str) -> bool:\n self.logger.info('{} processing checkpoint {} from {}'.format(self, msg, sender))\n result, reason = self.validator.validate_checkpoint_msg(msg)\n if result == DISCARD:\n self.discard(msg, \"{} discard message {} from {} \"\n \"with the reason: {}\".format(self, msg, sender, reason),\n self.logger.trace)\n elif result == PROCESS:\n self._do_process_checkpoint(msg, sender)\n else:\n self.logger.debug(\"{} stashing checkpoint message {} with \"\n \"the reason: {}\".format(self, msg, reason))\n self.stasher.stash((msg, sender), result)\n return False\n return True", - "docstring": "Process checkpoint messages\n\n :return: whether processed (True) or stashed (False)" - }, - { - "code": "def convert(cgi_input, twobit_ref, twobit_name, var_only=False):\n if isinstance(cgi_input, str) or isinstance(cgi_input, unicode):\n cgi_input = auto_zip_open(cgi_input, 'rb')\n reference = twobitreader.TwoBitFile(twobit_ref)\n header = make_header(twobit_name).split('\\n')\n for line in header:\n yield line\n while True:\n line = cgi_input.readline()\n if not line:\n break\n line = line.decode('utf-8')\n if re.search(r'^\\W*$', line) or line.startswith('\n continue\n if line.startswith('>'):\n header_data = line.lstrip('>').rstrip('\\n').split('\\t')\n header = {header_data[i]: i for i in range(len(header_data))}\n continue\n data = line.rstrip('\\n').split(\"\\t\")\n out = process_next_position(\n data=data, cgi_input=cgi_input, header=header, reference=reference,\n var_only=var_only)\n if out:\n for line in out:\n yield line", - "docstring": "Generator that converts CGI var data to VCF-formated strings" - }, - { - "code": "def send(self, op, cmd, integration_id, *args):\n out_cmd = \",\".join(\n (cmd, str(integration_id)) + tuple((str(x) for x in args)))\n self._conn.send(op + out_cmd)", - "docstring": "Formats and sends the requested command to the Lutron controller." - }, - { - "code": "def parent_of(self, name):\n if not self._in_tag(name):\n return\n node = self.cur_node\n while node.tag != name:\n node = node.getparent()\n self.cur_node = node.getparent()", - "docstring": "go to parent of node with name, and set as cur_node. Useful\n for creating new paragraphs" - }, - { - "code": "def get_object(self, name, *argv, **kwargs):\n regexp = name\n options = self.opts(regexp)\n options.update(kwargs)\n args = options.pop('view_args', argv)\n csrf_enable = self.get_backend_data(regexp).get('CSRF_ENABLE', True)\n if regexp in self.settings_urls:\n regexp = r'^{}'.format(self.get_django_settings(regexp)[1:])\n view = self[name].as_view()\n if not csrf_enable:\n view = csrf_exempt(view)\n return url(regexp, view, *args, **options)", - "docstring": "Get url object tuple for url\n\n :param name: url regexp from\n :type name: str\n :param argv: overrided args\n :param kwargs: overrided kwargs\n :return: url object\n :rtype: django.conf.urls.url" - }, - { - "code": "def GetValuesForAttribute(self, attribute, only_one=False):\n if not only_one and self.age_policy == NEWEST_TIME:\n raise ValueError(\"Attempting to read all attribute versions for an \"\n \"object opened for NEWEST_TIME. This is probably \"\n \"not what you want.\")\n if attribute is None:\n return []\n elif isinstance(attribute, string_types):\n attribute = Attribute.GetAttributeByName(attribute)\n return attribute.GetValues(self)", - "docstring": "Returns a list of values from this attribute." - }, - { - "code": "def generic_distribution(target, seeds, func):\n r\n seeds = target[seeds]\n value = func.ppf(seeds)\n return value", - "docstring": "r\"\"\"\n Accepts an 'rv_frozen' object from the Scipy.stats submodule and returns\n values from the distribution for the given seeds\n\n This uses the ``ppf`` method of the stats object\n\n Parameters\n ----------\n target : OpenPNM Object\n The object which this model is associated with. This controls the\n length of the calculated array, and also provides access to other\n necessary properties.\n\n seeds : string, optional\n The dictionary key on the Geometry object containing random seed values\n (between 0 and 1) to use in the statistical distribution.\n\n func : object\n An 'rv_frozen' object from the Scipy.stats library with all of the\n parameters pre-specified.\n\n Examples\n --------\n The following code illustrates the process of obtaining a 'frozen' Scipy\n stats object and adding it as a model:\n\n >>> import scipy\n >>> import openpnm as op\n >>> pn = op.network.Cubic(shape=[3, 3, 3])\n >>> geo = op.geometry.GenericGeometry(network=pn, pores=pn.Ps, throats=pn.Ts)\n >>> geo.add_model(propname='pore.seed',\n ... model=op.models.geometry.pore_seed.random)\n\n Now retrieve the stats distribution and add to ``geo`` as a model:\n\n >>> stats_obj = scipy.stats.weibull_min(c=2, scale=.0001, loc=0)\n >>> geo.add_model(propname='pore.size',\n ... model=op.models.geometry.pore_size.generic_distribution,\n ... seeds='pore.seed',\n ... func=stats_obj)\n\n\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.hist(stats_obj.ppf(q=scipy.rand(1000)), bins=50)" - }, - { - "code": "def requeue(self):\n job_requeue_interval = float(\n self.config.get('sharq', 'job_requeue_interval'))\n while True:\n self.sq.requeue()\n gevent.sleep(job_requeue_interval / 1000.00)", - "docstring": "Loop endlessly and requeue expired jobs." - }, - { - "code": "def stopThread(self):\n if self._thread is not None:\n self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES)\n self._thread = None", - "docstring": "Stops spawned NSThread." - }, - { - "code": "def sub(self, repl, string, *args, **kwargs):\n return self._pattern.sub(self._auto_compile(repl), string, *args, **kwargs)", - "docstring": "Apply `sub`." - }, - { - "code": "def _get_auth(self, force_console=False):\n if not self.target:\n raise ValueError(\"Unspecified target ({!r})\".format(self.target))\n elif not force_console and self.URL_RE.match(self.target):\n auth_url = urlparse(self.target)\n source = 'url'\n if auth_url.username:\n self.user = auth_url.username\n if auth_url.password:\n self.password = auth_url.password\n if not self.auth_valid():\n source = self._get_auth_from_keyring()\n if not self.auth_valid():\n source = self._get_auth_from_netrc(auth_url.hostname)\n if not self.auth_valid():\n source = self._get_auth_from_console(self.target)\n else:\n source = self._get_auth_from_console(self.target)\n if self.auth_valid():\n self.source = source", - "docstring": "Try to get login auth from known sources." - }, - { - "code": "def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True,\n s=1.1, gamma=1., **slice_kwargs):\n if featureset_name not in corpus.features:\n corpus.index_feature(featureset_name)\n if 'date' not in corpus.indices:\n corpus.index('date')\n dates = [min(corpus.indices['date'].keys()) - 1]\n X_ = [1.]\n years, values = corpus.feature_distribution(featureset_name, feature)\n for year, N in izip(years, values):\n if N == 0:\n continue\n if N > 1:\n if year == dates[-1] + 1:\n for n in xrange(int(N)):\n X_.append(1./N)\n dates.append(year)\n else:\n X_.append(float(year - dates[-1]))\n dates.append(year)\n for n in xrange(int(N) - 1):\n X_.append(1./(N - 1))\n dates.append(year)\n else:\n X_.append(float(year - dates[-1]))\n dates.append(year)\n st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k)\n A = defaultdict(list)\n for i in xrange(len(X_)):\n A[dates[i]].append(st[i])\n if normalize:\n A = {key: mean(values)/k for key, values in A.items()}\n else:\n A = {key: mean(values) for key, values in A.items()}\n D = sorted(A.keys())\n return D[1:], [A[d] for d in D[1:]]", - "docstring": "Estimate burstness profile for a feature over the ``'date'`` axis.\n\n Parameters\n ----------\n corpus : :class:`.Corpus`\n feature : str\n Name of featureset in ``corpus``. E.g. ``'citations'``.\n findex : int\n Index of ``feature`` in ``corpus``.\n k : int\n (default: 5) Number of burst states.\n normalize : bool\n (default: True) If True, burstness is expressed relative to the hightest\n possible state (``k-1``). Otherwise, states themselves are returned.\n kwargs : kwargs\n Parameters for burstness automaton HMM." - }, - { - "code": "def __get_img(self):\n with self.fs.open(self.__img_path, 'rb') as fd:\n img = PIL.Image.open(fd)\n img.load()\n return img", - "docstring": "Returns an image object corresponding to the page" - }, - { - "code": "def from_pickle(path: Union[str, BinaryIO], check_version: bool = True) -> BELGraph:\n graph = nx.read_gpickle(path)\n raise_for_not_bel(graph)\n if check_version:\n raise_for_old_graph(graph)\n return graph", - "docstring": "Read a graph from a pickle file.\n\n :param path: File or filename to read. Filenames ending in .gz or .bz2 will be uncompressed.\n :param bool check_version: Checks if the graph was produced by this version of PyBEL" - }, - { - "code": "def _get_dependencies_from_kwargs(self, args):\n if not isinstance(args, dict):\n raise TypeError('\"kwargs\" must be a dictionary')\n dependency_names = set()\n for arg in args.values():\n new_names = self._check_arg(arg)\n dependency_names.update(new_names)\n return dependency_names", - "docstring": "Parse keyed arguments" - }, - { - "code": "def delete_collection_percolator(target):\n for name in current_search.mappings.keys():\n if target.name and target.dbquery:\n current_search.client.delete(\n index=name,\n doc_type='.percolator',\n id='collection-{}'.format(target.name),\n ignore=[404]\n )", - "docstring": "Delete percolator associated with the new collection.\n\n :param target: Collection where the percolator was attached." - }, - { - "code": "def define_xml_str(xml, **kwargs):\n conn = __get_conn(**kwargs)\n ret = conn.defineXML(xml) is not None\n conn.close()\n return ret", - "docstring": "Define a persistent domain based on the XML passed to the function\n\n :param xml: libvirt XML definition of the domain\n :param connection: libvirt connection URI, overriding defaults\n\n .. versionadded:: 2019.2.0\n :param username: username to connect with, overriding defaults\n\n .. versionadded:: 2019.2.0\n :param password: password to connect with, overriding defaults\n\n .. versionadded:: 2019.2.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' virt.define_xml_str " - }, - { - "code": "def mark_flags_as_mutual_exclusive(flag_names, required=False,\n flag_values=FLAGS):\n def validate_mutual_exclusion(flags_dict):\n flag_count = sum(1 for val in flags_dict.values() if val is not None)\n if flag_count == 1 or (not required and flag_count == 0):\n return True\n message = ('%s one of (%s) must be specified.' %\n ('Exactly' if required else 'At most', ', '.join(flag_names)))\n raise ValidationError(message)\n register_multi_flags_validator(\n flag_names, validate_mutual_exclusion, flag_values=flag_values)", - "docstring": "Ensures that only one flag among flag_names is set.\n\n Args:\n flag_names: [str], a list of the flag names to be checked.\n required: Boolean, if set, exactly one of the flags must be set.\n Otherwise, it is also valid for none of the flags to be set.\n flag_values: An optional FlagValues instance to validate against." - }, - { - "code": "def from_dict(cls, d):\n read = lambda cls: (lambda pair: (pair[0], cls.from_dict(pair[1])))\n return cls(\n variables=map(read(Variable), d.get('variables', {}).items()),\n properties=map(read(Property), d.get('properties', {}).items()),\n roles=map(read(Role), d.get('roles', {}).items()),\n predicates=map(read(Predicate), d.get('predicates', {}).items())\n )", - "docstring": "Instantiate a SemI from a dictionary representation." - }, - { - "code": "def spawn_reader_writer(get_data_fn, put_data_fn):\n def _reader_thread():\n while True:\n out = get_data_fn()\n put_data_fn(out)\n if not out:\n break\n t = threading.Thread(target=_reader_thread)\n t.daemon = True\n t.start()\n return t", - "docstring": "Spawn a thread that reads from a data source and writes to a sink.\n\n The thread will terminate if it receives a Falsey value from the source.\n\n Args:\n get_data_fn: Data-reading function. Called repeatedly until it returns\n False-y to indicate that the thread should terminate.\n put_data_fn: Data-writing function.\n Returns: threading.Thread" - }, - { - "code": "def get_subnetwork(statements, nodes, relevance_network=None,\n relevance_node_lim=10):\n if relevance_network is not None:\n relevant_nodes = _find_relevant_nodes(nodes, relevance_network,\n relevance_node_lim)\n all_nodes = nodes + relevant_nodes\n else:\n all_nodes = nodes\n filtered_statements = _filter_statements(statements, all_nodes)\n pa = PysbAssembler()\n pa.add_statements(filtered_statements)\n model = pa.make_model()\n return model", - "docstring": "Return a PySB model based on a subset of given INDRA Statements.\n\n Statements are first filtered for nodes in the given list and other nodes\n are optionally added based on relevance in a given network. The filtered\n statements are then assembled into an executable model using INDRA's\n PySB Assembler.\n\n Parameters\n ----------\n statements : list[indra.statements.Statement]\n A list of INDRA Statements to extract a subnetwork from.\n nodes : list[str]\n The names of the nodes to extract the subnetwork for.\n relevance_network : Optional[str]\n The UUID of the NDEx network in which nodes relevant to the given\n nodes are found.\n relevance_node_lim : Optional[int]\n The maximal number of additional nodes to add to the subnetwork\n based on relevance.\n\n Returns\n -------\n model : pysb.Model\n A PySB model object assembled using INDRA's PySB Assembler from\n the INDRA Statements corresponding to the subnetwork." - }, - { - "code": "def is_user_reseller_admin(self, req, account, user):\n req.credentials_valid = True\n user_json = self.get_user_detail(req, account, user)\n if user_json is None:\n req.credentials_valid = False\n return False\n user_detail = json.loads(user_json)\n return '.reseller_admin' in (g['name'] for g in user_detail['groups'])", - "docstring": "Returns True if the user is a .reseller_admin.\n\n :param account: account user is part of\n :param user: the user\n :returns: True if user .reseller_admin, False\n if user is not a reseller_admin and None if the user\n doesn't exist." - }, - { - "code": "def create_view(self, name, expr, database=None):\n ast = self._build_ast(expr, MapDDialect.make_context())\n select = ast.queries[0]\n statement = ddl.CreateView(name, select, database=database)\n self._execute(statement)", - "docstring": "Create an MapD view from a table expression\n\n Parameters\n ----------\n name : string\n expr : ibis TableExpr\n database : string, default None" - }, - { - "code": "def block_jacobi(A, x, b, Dinv=None, blocksize=1, iterations=1, omega=1.0):\n A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])\n A = A.tobsr(blocksize=(blocksize, blocksize))\n if Dinv is None:\n Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)\n elif Dinv.shape[0] != int(A.shape[0]/blocksize):\n raise ValueError('Dinv and A have incompatible dimensions')\n elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):\n raise ValueError('Dinv and blocksize are incompatible')\n sweep = slice(None)\n (row_start, row_stop, row_step) = sweep.indices(int(A.shape[0]/blocksize))\n if (row_stop - row_start) * row_step <= 0:\n return\n temp = np.empty_like(x)\n [omega] = type_prep(A.dtype, [omega])\n for iter in range(iterations):\n amg_core.block_jacobi(A.indptr, A.indices, np.ravel(A.data),\n x, b, np.ravel(Dinv), temp,\n row_start, row_stop, row_step,\n omega, blocksize)", - "docstring": "Perform block Jacobi iteration on the linear system Ax=b.\n\n Parameters\n ----------\n A : csr_matrix or bsr_matrix\n Sparse NxN matrix\n x : ndarray\n Approximate solution (length N)\n b : ndarray\n Right-hand side (length N)\n Dinv : array\n Array holding block diagonal inverses of A\n size (N/blocksize, blocksize, blocksize)\n blocksize : int\n Desired dimension of blocks\n iterations : int\n Number of iterations to perform\n omega : scalar\n Damping parameter\n\n Returns\n -------\n Nothing, x will be modified in place.\n\n Examples\n --------\n >>> # Use block Jacobi as a Stand-Alone Solver\n >>> from pyamg.relaxation.relaxation import block_jacobi\n >>> from pyamg.gallery import poisson\n >>> from pyamg.util.linalg import norm\n >>> import numpy as np\n >>> A = poisson((10,10), format='csr')\n >>> x0 = np.zeros((A.shape[0],1))\n >>> b = np.ones((A.shape[0],1))\n >>> block_jacobi(A, x0, b, blocksize=4, iterations=10, omega=1.0)\n >>> print norm(b-A*x0)\n 4.66474230129\n >>> #\n >>> # Use block Jacobi as the Multigrid Smoother\n >>> from pyamg import smoothed_aggregation_solver\n >>> opts = {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}\n >>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),\n ... coarse_solver='pinv2', max_coarse=50,\n ... presmoother=('block_jacobi', opts),\n ... postsmoother=('block_jacobi', opts))\n >>> x0=np.zeros((A.shape[0],1))\n >>> residuals=[]\n >>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)" - }, - { - "code": "def lineMatchingPattern(pattern, lines):\n for line in lines:\n m = pattern.match(line)\n if m:\n return m\n else:\n return None", - "docstring": "Searches through the specified list of strings and returns the regular expression \n match for the first line that matches the specified pre-compiled regex pattern, or None if no match was found\n\n Note: if you are using a regex pattern string (i.e. not already compiled), use lineMatching() instead\n\n :type pattern: Compiled regular expression pattern to use\n :type lines: List of lines to search\n\n :return: the regular expression match for the first line that matches the specified regex, or None if no match was found\n :rtype: re.Match" - }, - { - "code": "def peer_retrieve(key, relation_name='cluster'):\n cluster_rels = relation_ids(relation_name)\n if len(cluster_rels) > 0:\n cluster_rid = cluster_rels[0]\n return relation_get(attribute=key, rid=cluster_rid,\n unit=local_unit())\n else:\n raise ValueError('Unable to detect'\n 'peer relation {}'.format(relation_name))", - "docstring": "Retrieve a named key from peer relation `relation_name`." - }, - { - "code": "def create_checkpoint_decoder(args: argparse.Namespace,\n exit_stack: ExitStack,\n train_context: List[mx.Context]) -> Optional[checkpoint_decoder.CheckpointDecoder]:\n sample_size = args.decode_and_evaluate\n if args.optimized_metric == C.BLEU and sample_size == 0:\n logger.info(\"You chose BLEU as the optimized metric, will turn on BLEU monitoring during training. \"\n \"To control how many validation sentences are used for calculating bleu use \"\n \"the --decode-and-evaluate argument.\")\n sample_size = -1\n if sample_size == 0:\n return None\n if args.use_cpu or args.decode_and_evaluate_use_cpu:\n context = mx.cpu()\n elif args.decode_and_evaluate_device_id is not None:\n context = utils.determine_context(device_ids=args.decode_and_evaluate_device_id,\n use_cpu=False,\n disable_device_locking=args.disable_device_locking,\n lock_dir=args.lock_dir,\n exit_stack=exit_stack)[0]\n else:\n context = train_context[-1]\n return checkpoint_decoder.CheckpointDecoderImageModel(context=context,\n inputs=[args.validation_source] + args.validation_source_factors,\n references=args.validation_target,\n model=args.output,\n sample_size=sample_size,\n source_image_size=args.source_image_size,\n image_root=args.validation_source_root,\n max_output_length=args.max_output_length,\n use_feature_loader=args.image_preextracted_features)", - "docstring": "Returns a checkpoint decoder or None.\n\n :param args: Arguments as returned by argparse.\n :param exit_stack: An ExitStack from contextlib.\n :param train_context: Context for training.\n :return: A CheckpointDecoder if --decode-and-evaluate != 0, else None." - }, - { - "code": "def hidden(self):\n members = [self.member_info(item[\"_id\"]) for item in self.members()]\n result = []\n for member in members:\n if member['rsInfo'].get('hidden'):\n server_id = member['server_id']\n result.append({\n '_id': member['_id'],\n 'host': self._servers.hostname(server_id),\n 'server_id': server_id})\n return result", - "docstring": "return list of hidden members" - }, - { - "code": "def zoom(self, factor):\n camera = self.ren.GetActiveCamera()\n camera.Zoom(factor)\n self.ren_win.Render()", - "docstring": "Zoom the camera view by a factor." - }, - { - "code": "def _scale_back_response(bqm, response, scalar, ignored_interactions,\n ignored_variables, ignore_offset):\n if len(ignored_interactions) + len(\n ignored_variables) + ignore_offset == 0:\n response.record.energy = np.divide(response.record.energy, scalar)\n else:\n response.record.energy = bqm.energies((response.record.sample,\n response.variables))\n return response", - "docstring": "Helper function to scale back the response of sample method" - }, - { - "code": "def build_model_name(cls, name='modelName', output_name='output'):\n obj = cls(name)\n obj.exporter = 'generate_model_name'\n obj.output_name = output_name\n return obj", - "docstring": "Build an output model name parameter.\n\n :param name: model name\n :type name: str\n :param output_name: bind output port name\n :type output_name: str\n :return: output description\n :rtype: ParamDef" - }, - { - "code": "def render_from_path(path, context=None, globals=None):\n abs_source = os.path.abspath(os.path.expanduser(path))\n yaml_resolver = resolver.TYamlResolver.new_from_path(abs_source)\n return yaml_resolver.resolve(Context(context), globals)._data", - "docstring": "Renders a templated yaml document from file path.\n\n :param path: A path to the yaml file to process.\n :param context: A context to overlay on the yaml file. This will override any yaml values.\n :param globals: A dictionary of globally-accessible objects within the rendered template.\n :return: A dict with the final overlayed configuration." - }, - { - "code": "def get_accounts(self, username=None):\n url = \"{0}/{1}/accounts\".format(self.domain, self.API_VERSION)\n params = {\"username\": username}\n try:\n return self._Client__call(uri=url, params=params, method=\"get\")\n except RequestException:\n return False\n except AssertionError:\n return False", - "docstring": "Get a list of accounts owned by the user.\n\n Parameters\n ----------\n username : string\n The name of the user. Note: This is only required on the\n sandbox, on production systems your access token will\n identify you.\n\n See more:\n http://developer.oanda.com/rest-sandbox/accounts/#-a-name-getaccountsforuser-a-get-accounts-for-a-user" - }, - { - "code": "def set_seq1(self, a):\n if a is self.a:\n return\n self.a = a\n if not isinstance(self.a, list):\n self.a = list(self.a)\n [hash(x) for x in self.a]", - "docstring": "Same as SequenceMatcher.set_seq1, but check for non-list inputs\n implementation." - }, - { - "code": "def body(self):\n content = []\n length = 0\n for chunk in self:\n content.append(chunk)\n length += len(chunk)\n if self.length_limit and length > self.length_limit:\n self.close()\n raise ContentLimitExceeded(\"Content length is more than %d \"\n \"bytes\" % self.length_limit)\n return b(\"\").join(content)", - "docstring": "Response body.\n\n :raises: :class:`ContentLimitExceeded`, :class:`ContentDecodingError`" - }, - { - "code": "def serialize_numeric(self, tag):\n str_func = int.__str__ if isinstance(tag, int) else float.__str__\n return str_func(tag) + tag.suffix", - "docstring": "Return the literal representation of a numeric tag." - }, - { - "code": "def context_range(self, context):\n if not context.startswith(self.prefix):\n context = self.prefix + '.' + context\n lo = hi = None\n for idx, line_context in enumerate(self.lines, 1):\n if line_context.startswith(context):\n lo = lo or idx\n hi = idx\n if lo is None:\n raise ValueError(\"Context %s does not exist in file %s\" %\n (context, self.filename))\n return lo, hi + 1", - "docstring": "Return the 1-offset, right-open range of lines spanned by\n a particular context name.\n\n Parameters\n ----------\n context : str\n\n Raises\n ------\n ValueError, if context is not present in the file." - }, - { - "code": "async def sources(client: Client, pubkey: str) -> dict:\n return await client.get(MODULE + '/sources/%s' % pubkey, schema=SOURCES_SCHEMA)", - "docstring": "GET transaction sources\n\n :param client: Client to connect to the api\n :param pubkey: Public key\n :return:" - }, - { - "code": "def _clean_fields(allowed_fields: dict, fields: FieldsParam) -> Iterable[str]:\n if fields == ALL:\n fields = allowed_fields.keys()\n else:\n fields = tuple(fields)\n unknown_fields = set(fields) - allowed_fields.keys()\n if unknown_fields:\n raise ValueError('Unknown fields: {}'.format(unknown_fields))\n return fields", - "docstring": "Clean lookup fields and check for errors." - }, - { - "code": "def add_external_reference(self,term_id, external_ref):\n if term_id in self.idx:\n term_obj = Cterm(self.idx[term_id],self.type)\n term_obj.add_external_reference(external_ref)\n else:\n print('{term_id} not in self.idx'.format(**locals()))", - "docstring": "Adds an external reference for the given term\n @type term_id: string\n @param term_id: the term identifier\n @type external_ref: L{CexternalReference}\n @param external_ref: the external reference object" - }, - { - "code": "def rn(shape, dtype=None, impl='numpy', **kwargs):\n rn_cls = tensor_space_impl(impl)\n if dtype is None:\n dtype = rn_cls.default_dtype(RealNumbers())\n rn = rn_cls(shape=shape, dtype=dtype, **kwargs)\n if not rn.is_real:\n raise ValueError('data type {!r} not a real floating-point type.'\n ''.format(dtype))\n return rn", - "docstring": "Return a space of real tensors.\n\n Parameters\n ----------\n shape : positive int or sequence of positive ints\n Number of entries per axis for elements in this space. A\n single integer results in a space with 1 axis.\n dtype : optional\n Data type of each element. Can be provided in any way the\n `numpy.dtype` function understands, e.g. as built-in type or\n as a string. Only real floating-point data types are allowed.\n For ``None``, the `TensorSpace.default_dtype` of the\n created space is used in the form\n ``default_dtype(RealNumbers())``.\n impl : str, optional\n Impmlementation back-end for the space. See\n `odl.space.entry_points.tensor_space_impl_names` for available\n options.\n kwargs :\n Extra keyword arguments passed to the space constructor.\n\n Returns\n -------\n real_space : `TensorSpace`\n\n Examples\n --------\n Space of real 3-tuples with ``float32`` entries:\n\n >>> odl.rn(3, dtype='float32')\n rn(3, dtype='float32')\n\n Real 2x3 tensors with ``float32`` entries:\n\n >>> odl.rn((2, 3), dtype='float32')\n rn((2, 3), dtype='float32')\n\n The default data type depends on the implementation. For\n ``impl='numpy'``, it is ``'float64'``:\n\n >>> ts = odl.rn((2, 3))\n >>> ts\n rn((2, 3))\n >>> ts.dtype\n dtype('float64')\n\n See Also\n --------\n tensor_space : Space of tensors with arbitrary scalar data type.\n cn : Complex tensor space." - }, - { - "code": "def modify_conf(cfgfile, service_name, outfn):\n if not cfgfile or not outfn:\n print('ERROR: There is no config file.')\n sys.exit(0)\n options = service_options[service_name]\n with open(cfgfile, 'r') as cf:\n lines = cf.readlines()\n for opt in options:\n op = opt.get('option')\n res = [line for line in lines if line.startswith(op)]\n if len(res) > 1:\n print('ERROR: There are more than one %s option.' % res)\n sys.exit(0)\n if res:\n (op, sep, val) = (res[0].strip('\\n').replace(' ', '').\n partition('='))\n new_val = None\n if opt.get('is_list'):\n if not any(opt.get('value') == value for value in\n val.split(',')):\n new_val = ','.join((val, opt.get('value')))\n else:\n if val != opt.get('value'):\n new_val = opt.get('value')\n if new_val:\n opt_idx = lines.index(res[0])\n lines.pop(opt_idx)\n lines.insert(opt_idx, '='.join((opt.get('option'),\n new_val + '\\n')))\n else:\n try:\n sec_idx = lines.index('[' + opt.get('section') + ']\\n')\n lines.insert(sec_idx + 1, '='.join(\n (opt.get('option'), opt.get('value') + '\\n')))\n except ValueError:\n print('Invalid %s section name.' % opt.get('section'))\n sys.exit(0)\n with open(outfn, 'w') as fwp:\n all_lines = ''\n for line in lines:\n all_lines += line\n fwp.write(all_lines)", - "docstring": "Modify config file neutron and keystone to include enabler options." - }, - { - "code": "def get_mmcif(code, outfile=None):\n pdbe_url = \"http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif\".format(code)\n r = requests.get(pdbe_url)\n if r.status_code == 200:\n mmcif_string = r.text\n else:\n print(\"Could not download mmcif file for {0}\".format(code))\n mmcif_string = None\n if outfile and mmcif_string:\n with open(outfile, 'w') as foo:\n foo.write(mmcif_string)\n return mmcif_string", - "docstring": "Get mmcif file associated with code from PDBE.\n\n Parameters\n ----------\n code : str\n PDB code.\n outfile : str\n Filepath. Writes returned value to this file.\n\n Returns\n -------\n mmcif_file : str\n Filepath to the mmcif file." - }, - { - "code": "def get_view_url(self):\n if not self.view_url_name:\n raise ImproperlyConfigured(\"Missing `view_url_name` attribute on {0}\".format(self.__class__.__name__))\n return reverse(self.view_url_name, args=self.args, kwargs=self.kwargs)", - "docstring": "This method is used by the ``get_translated_url`` template tag.\n\n By default, it uses the :attr:`view_url_name` to generate an URL.\n When the URL ``args`` and ``kwargs`` are translatable,\n override this function instead to generate the proper URL." - }, - { - "code": "def request_client_list(self, req, msg):\n clients = self._client_conns\n num_clients = len(clients)\n for conn in clients:\n addr = conn.address\n req.inform(addr)\n return req.make_reply('ok', str(num_clients))", - "docstring": "Request the list of connected clients.\n\n The list of clients is sent as a sequence of #client-list informs.\n\n Informs\n -------\n addr : str\n The address of the client as host:port with host in dotted quad\n notation. If the address of the client could not be determined\n (because, for example, the client disconnected suddenly) then\n a unique string representing the client is sent instead.\n\n Returns\n -------\n success : {'ok', 'fail'}\n Whether sending the client list succeeded.\n informs : int\n Number of #client-list inform messages sent.\n\n Examples\n --------\n ::\n\n ?client-list\n #client-list 127.0.0.1:53600\n !client-list ok 1" - }, - { - "code": "def sset_loop(args):\n fiss_func = __cmd_to_func(args.action)\n if not fiss_func:\n eprint(\"invalid FISS cmd '\" + args.action + \"'\")\n return 1\n r = fapi.get_entities(args.project, args.workspace, \"sample_set\")\n fapi._check_response_code(r, 200)\n sample_sets = [entity['name'] for entity in r.json()]\n args.entity_type = \"sample_set\"\n for sset in sample_sets:\n print('\\n\n args.action))\n args.entity = sset\n try:\n result = fiss_func(args)\n except Exception as e:\n status = __pretty_print_fc_exception(e)\n if not args.keep_going:\n return status\n printToCLI(result)\n return 0", - "docstring": "Loop over all sample sets in a workspace, performing a func" - }, - { - "code": "def visit_delete(self, node):\n return \"del %s\" % \", \".join(child.accept(self) for child in node.targets)", - "docstring": "return an astroid.Delete node as string" - }, - { - "code": "def parse_stream(self, stream: BytesIO, context=None):\n if context is None:\n context = Context()\n if not isinstance(context, Context):\n context = Context(context)\n try:\n return self._parse_stream(stream, context)\n except Error:\n raise\n except Exception as exc:\n raise ParsingError(str(exc))", - "docstring": "Parse some python object from the stream.\n\n :param stream: Stream from which the data is read and parsed.\n :param context: Optional context dictionary." - }, - { - "code": "def _sibpath(path, sibling):\n return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)", - "docstring": "Return the path to a sibling of a file in the filesystem.\n\n This is useful in conjunction with the special C{__file__} attribute\n that Python provides for modules, so modules can load associated\n resource files.\n\n (Stolen from twisted.python.util)" - }, - { - "code": "def deploy_lambda(awsclient, function_name, role, handler_filename,\n handler_function,\n folders, description, timeout, memory, subnet_ids=None,\n security_groups=None, artifact_bucket=None,\n zipfile=None,\n fail_deployment_on_unsuccessful_ping=False,\n runtime='python2.7', settings=None, environment=None,\n retention_in_days=None\n ):\n if lambda_exists(awsclient, function_name):\n function_version = _update_lambda(awsclient, function_name,\n handler_filename,\n handler_function, folders, role,\n description, timeout, memory,\n subnet_ids, security_groups,\n artifact_bucket=artifact_bucket,\n zipfile=zipfile,\n environment=environment\n )\n else:\n if not zipfile:\n return 1\n log.info('buffer size: %0.2f MB' % float(len(zipfile) / 1000000.0))\n function_version = _create_lambda(awsclient, function_name, role,\n handler_filename, handler_function,\n folders, description, timeout,\n memory, subnet_ids, security_groups,\n artifact_bucket, zipfile,\n runtime=runtime,\n environment=environment)\n if retention_in_days:\n log_group_name = '/aws/lambda/%s' % function_name\n put_retention_policy(awsclient, log_group_name, retention_in_days)\n pong = ping(awsclient, function_name, version=function_version)\n if 'alive' in str(pong):\n log.info(colored.green('Great you\\'re already accepting a ping ' +\n 'in your Lambda function'))\n elif fail_deployment_on_unsuccessful_ping and not 'alive' in pong:\n log.info(colored.red('Pinging your lambda function failed'))\n return 1\n else:\n log.info(colored.red('Please consider adding a reaction to a ' +\n 'ping event to your lambda function'))\n _deploy_alias(awsclient, function_name, function_version)\n return 0", - "docstring": "Create or update a lambda function.\n\n :param awsclient:\n :param function_name:\n :param role:\n :param handler_filename:\n :param handler_function:\n :param folders:\n :param description:\n :param timeout:\n :param memory:\n :param subnet_ids:\n :param security_groups:\n :param artifact_bucket:\n :param zipfile:\n :param environment: environment variables\n :param retention_in_days: retention time of the cloudwatch logs\n :return: exit_code" - }, - { - "code": "def main():\n configParser = FileParser()\n logging.config.dictConfig(\n configParser.load_from_file(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'settings', 'logging.yml'))\n )\n ApiDoc().main()", - "docstring": "Main function to run command" - }, - { - "code": "def next_row(self):\n if self.mode == self.SYMBOL_MODE:\n self.select_row(+1)\n return\n next_row = self.current_row() + 1\n if next_row < self.count():\n if '
' in self.list.item(next_row).text():\n self.select_row(+2)\n else:\n self.select_row(+1)", - "docstring": "Select next row in list widget." - }, - { - "code": "def save(self, basepath):\n out.info(u\"Get image URL %s\" % self.url, level=1)\n self.connect()\n filename = \"%s%s\" % (self.filename, self.ext)\n comicDir = os.path.join(basepath, self.dirname)\n if not os.path.isdir(comicDir):\n os.makedirs(comicDir)\n fn = os.path.join(comicDir, filename)\n if os.path.isfile(fn) and os.path.getsize(fn) >= self.contentLength:\n out.info(u'Skipping existing file \"%s\".' % fn)\n return fn, False\n content = self.urlobj.content\n if not content:\n out.warn(u\"Empty content from %s, try again...\" % self.url)\n self.connect()\n content = self.urlobj.content\n out.debug(u'Writing comic to file %s...' % fn)\n writeFile(fn, content)\n if self.text:\n fntext = os.path.join(comicDir, \"%s.txt\" % self.filename)\n out.debug(u'Writing comic text to file %s...' % fntext)\n writeFile(fntext, self.text, encoding='utf-8')\n getHandler().comicDownloaded(self, fn, text=self.text)\n return fn, True", - "docstring": "Save comic URL to filename on disk." - }, - { - "code": "def _drop(self, tree):\n tablename = tree.table\n kwargs = {}\n try:\n ret = self.connection.delete_table(tablename, **kwargs)\n except DynamoDBError as e:\n if e.kwargs[\"Code\"] == \"ResourceNotFoundException\" and tree.exists:\n return False\n raise\n return True", - "docstring": "Run a DROP statement" - }, - { - "code": "def find_nb_genes(data):\n data_means = data.mean(1)\n data_vars = data.var(1)\n nb_indices = data_means < 0.9*data_vars\n return nb_indices", - "docstring": "Finds the indices of all genes in the dataset that have\n a mean < 0.9 variance. Returns an array of booleans." - }, - { - "code": "def _interp_gap(data, peak_loc, interp_len):\n start_loc = peak_loc - int(0.5 * interp_len)\n end_loc = peak_loc + int(0.5 * interp_len)\n if start_loc < 0:\n start_loc = 0\n if end_loc > len(data) - 1:\n end_loc = len(data) - 1\n fill = np.linspace(data[start_loc], data[end_loc], end_loc - start_loc)\n data[start_loc:end_loc] = fill\n return data", - "docstring": "Internal function for filling gap with linear interpolation\n\n :type data: numpy.ndarray\n :param data: data to remove peak in\n :type peak_loc: int\n :param peak_loc: peak location position\n :type interp_len: int\n :param interp_len: window to interpolate\n\n :returns: Trace works in-place\n :rtype: :class:`obspy.core.trace.Trace`" - }, - { - "code": "def translate_comment(self, comment, link_resolver):\n out = u''\n self.translate_tags(comment, link_resolver)\n ast = self.comment_to_ast(comment, link_resolver)\n out += self.ast_to_html(ast, link_resolver)\n return out", - "docstring": "Given a gtk-doc comment string, returns the comment translated\n to the desired format." - }, - { - "code": "def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,\n fork_context=None, write_pid=True):\n def double_fork():\n logger.debug('forking %s', self)\n pid = os.fork()\n if pid == 0:\n os.setsid()\n second_pid = os.fork()\n if second_pid == 0:\n return False, True\n else:\n if write_pid: self.write_pid(second_pid)\n return False, False\n else:\n os.waitpid(pid, 0)\n return True, False\n fork_func = functools.partial(fork_context, double_fork) if fork_context else double_fork\n self.purge_metadata()\n self.pre_fork(**pre_fork_opts or {})\n is_parent, is_child = fork_func()\n try:\n if not is_parent and not is_child:\n os._exit(0)\n elif is_parent:\n assert not is_child\n self.post_fork_parent(**post_fork_parent_opts or {})\n else:\n assert not is_parent\n os.chdir(self._buildroot)\n self.post_fork_child(**post_fork_child_opts or {})\n except Exception:\n logger.critical(traceback.format_exc())\n os._exit(0)", - "docstring": "Perform a double-fork, execute callbacks and write the child pid file.\n\n The double-fork here is necessary to truly daemonize the subprocess such that it can never\n take control of a tty. The initial fork and setsid() creates a new, isolated process group\n and also makes the first child a session leader (which can still acquire a tty). By forking a\n second time, we ensure that the second child can never acquire a controlling terminal because\n it's no longer a session leader - but it now has its own separate process group.\n\n Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset\n the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn\n below) due to the fact that the daemons that pants would run are typically personal user\n daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to\n differ in their permissions without good reason - in this case, we want to inherit the umask.\n\n :param fork_context: A function which accepts and calls a function that will call fork. This\n is not a contextmanager/generator because that would make interacting with native code more\n challenging. If no fork_context is passed, the fork function is called directly." - }, - { - "code": "def get(self, section, key):\n try:\n return self.parser.get(section, key)\n except (NoOptionError, NoSectionError) as e:\n logger.warning(\"%s\", e)\n return None", - "docstring": "get function reads the config value for the requested section and\n key and returns it\n\n Parameters:\n * **section (string):** the section to look for the config value either - oxd, client\n * **key (string):** the key for the config value required\n\n Returns:\n **value (string):** the function returns the value of the key in the appropriate format if found or returns None if such a section or key couldnot be found\n\n Example:\n config = Configurer(location)\n oxd_port = config.get('oxd', 'port') # returns the port of the oxd" - }, - { - "code": "def handle_market_close(self, dt, data_portal):\n completed_session = self._current_session\n if self.emission_rate == 'daily':\n self.sync_last_sale_prices(dt, data_portal)\n session_ix = self._session_count\n self._session_count += 1\n packet = {\n 'period_start': self._first_session,\n 'period_end': self._last_session,\n 'capital_base': self._capital_base,\n 'daily_perf': {\n 'period_open': self._market_open,\n 'period_close': dt,\n },\n 'cumulative_perf': {\n 'period_open': self._first_session,\n 'period_close': self._last_session,\n },\n 'progress': self._progress(self),\n 'cumulative_risk_metrics': {},\n }\n ledger = self._ledger\n ledger.end_of_session(session_ix)\n self.end_of_session(\n packet,\n ledger,\n completed_session,\n session_ix,\n data_portal,\n )\n return packet", - "docstring": "Handles the close of the given day.\n\n Parameters\n ----------\n dt : Timestamp\n The most recently completed simulation datetime.\n data_portal : DataPortal\n The current data portal.\n\n Returns\n -------\n A daily perf packet." - }, - { - "code": "def send_poll(\n self,\n chat_id: Union[int, str],\n question: str,\n options: List[str],\n disable_notification: bool = None,\n reply_to_message_id: int = None,\n reply_markup: Union[\n \"pyrogram.InlineKeyboardMarkup\",\n \"pyrogram.ReplyKeyboardMarkup\",\n \"pyrogram.ReplyKeyboardRemove\",\n \"pyrogram.ForceReply\"\n ] = None\n ) -> \"pyrogram.Message\":\n r = self.send(\n functions.messages.SendMedia(\n peer=self.resolve_peer(chat_id),\n media=types.InputMediaPoll(\n poll=types.Poll(\n id=0,\n question=question,\n answers=[\n types.PollAnswer(text=o, option=bytes([i]))\n for i, o in enumerate(options)\n ]\n )\n ),\n message=\"\",\n silent=disable_notification or None,\n reply_to_msg_id=reply_to_message_id,\n random_id=self.rnd_id(),\n reply_markup=reply_markup.write() if reply_markup else None\n )\n )\n for i in r.updates:\n if isinstance(i, (types.UpdateNewMessage, types.UpdateNewChannelMessage)):\n return pyrogram.Message._parse(\n self, i.message,\n {i.id: i for i in r.users},\n {i.id: i for i in r.chats}\n )", - "docstring": "Use this method to send a new poll.\n\n Args:\n chat_id (``int`` | ``str``):\n Unique identifier (int) or username (str) of the target chat.\n For your personal cloud (Saved Messages) you can simply use \"me\" or \"self\".\n For a contact that exists in your Telegram address book you can use his phone number (str).\n\n question (``str``):\n The poll question, as string.\n\n options (List of ``str``):\n The poll options, as list of strings (2 to 10 options are allowed).\n\n disable_notification (``bool``, *optional*):\n Sends the message silently.\n Users will receive a notification with no sound.\n\n reply_to_message_id (``int``, *optional*):\n If the message is a reply, ID of the original message.\n\n reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):\n Additional interface options. An object for an inline keyboard, custom reply keyboard,\n instructions to remove reply keyboard or to force a reply from the user.\n\n Returns:\n On success, the sent :obj:`Message ` is returned.\n\n Raises:\n :class:`RPCError ` in case of a Telegram RPC error." - }, - { - "code": "def process_text(text, out_format='json_ld', save_json='eidos_output.json',\n webservice=None):\n if not webservice:\n if eidos_reader is None:\n logger.error('Eidos reader is not available.')\n return None\n json_dict = eidos_reader.process_text(text, out_format)\n else:\n res = requests.post('%s/process_text' % webservice,\n json={'text': text})\n json_dict = res.json()\n if save_json:\n with open(save_json, 'wt') as fh:\n json.dump(json_dict, fh, indent=2)\n return process_json(json_dict)", - "docstring": "Return an EidosProcessor by processing the given text.\n\n This constructs a reader object via Java and extracts mentions\n from the text. It then serializes the mentions into JSON and\n processes the result with process_json.\n\n Parameters\n ----------\n text : str\n The text to be processed.\n out_format : Optional[str]\n The type of Eidos output to read into and process. Currently only\n 'json-ld' is supported which is also the default value used.\n save_json : Optional[str]\n The name of a file in which to dump the JSON output of Eidos.\n webservice : Optional[str]\n An Eidos reader web service URL to send the request to.\n If None, the reading is assumed to be done with the Eidos JAR rather\n than via a web service. Default: None\n\n Returns\n -------\n ep : EidosProcessor\n An EidosProcessor containing the extracted INDRA Statements in its\n statements attribute." - }, - { - "code": "def res(arg):\n def _res(ctx):\n _arg = arg(ctx) if callable(arg) else arg\n return I(arg)\n return _res", - "docstring": "Convert the argument into an IRI ref" - }, - { - "code": "def clicked(self, event):\n group = event.artist._mt_group\n indices = event.ind\n major, minor, _ = mpl_version.split('.')\n if (int(major), int(minor)) < (1, 2) or not event.mouseevent.dblclick:\n for i in indices:\n print(self.groups[group][i].line_str)\n else:\n first = indices[0]\n logevent = self.groups[group][first]\n try:\n idx = map(itemgetter(0), self.durlines).index(logevent)\n _, poly = self.durlines[idx]\n poly.remove()\n plt.gcf().canvas.draw()\n del self.durlines[idx]\n except ValueError:\n if self.args['optime_start']:\n pts = [[date2num(logevent.datetime), 0],\n [date2num(logevent.datetime), logevent.duration],\n [date2num(logevent.datetime +\n timedelta(milliseconds=logevent.duration)\n ), 0]]\n else:\n pts = [[date2num(logevent.datetime), 0],\n [date2num(logevent.datetime), logevent.duration],\n [date2num(logevent.datetime -\n timedelta(milliseconds=logevent.duration)\n ), 0]]\n poly = Polygon(pts, closed=True, alpha=0.2, linewidth=0,\n facecolor=event.artist.get_markerfacecolor(),\n edgecolor=None, zorder=-10000)\n ax = plt.gca()\n ax.add_patch(poly)\n plt.gcf().canvas.draw()\n self.durlines.append((logevent, poly))", - "docstring": "Call if an element of this plottype is clicked.\n\n Implement in sub class." - }, - { - "code": "def validate(self):\n if not (self.recipients or self.cc or self.bcc):\n raise InvalidMessage(\"No recipients have been added\")\n if not self.body and not self.html:\n raise InvalidMessage(\"No body has been set\")\n if not self.sender:\n raise InvalidMessage(\"No sender address has been set\")\n if self.is_bad_headers():\n raise BadHeaders", - "docstring": "Checks if message is valid and raises appropriate exception." - }, - { - "code": "def softmax(X, axis=0):\n if axis == 1:\n return np.exp(X - logsumexp(X, axis=1)[:, np.newaxis])\n elif axis == 0:\n return np.exp(X - logsumexp(X, axis=0))\n else:\n raise ValueError(\"This only works on 2D arrays for now.\")", - "docstring": "Pass X through a softmax function in a numerically stable way using the\n log-sum-exp trick.\n\n This transformation is:\n\n .. math::\n\n \\\\frac{\\exp\\{X_k\\}}{\\sum^K_{j=1} \\exp\\{X_j\\}}\n\n and is appliedx to each row/column, `k`, of X.\n\n Parameters\n ----------\n X: ndarray\n 2D array of shape (N, D) to apply the log-sum-exp trick.\n axis: int, optional\n Axis to apply the summation along (works the same as axis in\n numpy.sum).\n\n Returns\n -------\n smX: ndarray\n results of applying the log-sum-exp trick, this will be shape\n (N, D), and each row will sum to 1 if :code:`axis=1` or each column\n will sum to 1 if :code:`axis=0`." - }, - { - "code": "def _get_plugin_map(self, compiler, options_src, target):\n plugins_key = '{}_plugins'.format(compiler)\n requested_plugins = (\n tuple(getattr(self, plugins_key, []) or []) +\n tuple(options_src.get_options().get(plugins_key, []) or []) +\n tuple((getattr(target, plugins_key, []) or []))\n )\n requested_plugins = {p for val in requested_plugins for p in val.split(',')}\n plugin_args_key = '{}_plugin_args'.format(compiler)\n available_plugin_args = {}\n available_plugin_args.update(getattr(self, plugin_args_key, {}) or {})\n available_plugin_args.update(options_src.get_options().get(plugin_args_key, {}) or {})\n available_plugin_args.update(getattr(target, plugin_args_key, {}) or {})\n plugin_map = {}\n for plugin in requested_plugins:\n if target not in self._plugin_targets(compiler).get(plugin, {}):\n plugin_map[plugin] = available_plugin_args.get(plugin, [])\n return plugin_map", - "docstring": "Returns a map of plugin to args, for the given compiler.\n\n Only plugins that must actually be activated will be present as keys in the map.\n Plugins with no arguments will have an empty list as a value.\n\n Active plugins and their args will be gathered from (in order of precedence):\n - The _plugins and _plugin_args fields of the target, if it has them.\n - The _plugins and _plugin_args options of this task, if it has them.\n - The _plugins and _plugin_args fields of this task, if it has them.\n\n Note that in-repo plugins will not be returned, even if requested, when building\n themselves. Use published versions of those plugins for that.\n\n See:\n - examples/src/java/org/pantsbuild/example/javac/plugin/README.md.\n - examples/src/scala/org/pantsbuild/example/scalac/plugin/README.md\n\n :param compiler: one of 'javac', 'scalac'.\n :param options_src: A JvmToolMixin instance providing plugin options.\n :param target: The target whose plugins we compute." - }, - { - "code": "def artifact_cache_dir(self):\n return (self.get_options().artifact_cache_dir or\n os.path.join(self.scratch_dir, 'artifacts'))", - "docstring": "Note that this is unrelated to the general pants artifact cache." - }, - { - "code": "def artboards(src_path):\n pages = list_artboards(src_path)\n artboards = []\n for page in pages:\n artboards.extend(page.artboards)\n return artboards", - "docstring": "Return artboards as a flat list" - }, - { - "code": "def code(ctx, show_hidden, query, single):\n ensure_validated(ctx)\n controller = ctx.obj['controller']\n creds = [(cr, c)\n for (cr, c) in controller.calculate_all()\n if show_hidden or not cr.is_hidden\n ]\n creds = _search(creds, query)\n if len(creds) == 1:\n cred, code = creds[0]\n if cred.touch:\n prompt_for_touch()\n try:\n if cred.oath_type == OATH_TYPE.HOTP:\n hotp_touch_timer = Timer(0.500, prompt_for_touch)\n hotp_touch_timer.start()\n creds = [(cred, controller.calculate(cred))]\n hotp_touch_timer.cancel()\n elif code is None:\n creds = [(cred, controller.calculate(cred))]\n except APDUError as e:\n if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:\n ctx.fail('Touch credential timed out!')\n elif single:\n _error_multiple_hits(ctx, [cr for cr, c in creds])\n if single:\n click.echo(creds[0][1].value)\n else:\n creds.sort()\n outputs = [\n (\n cr.printable_key,\n c.value if c\n else '[Touch Credential]' if cr.touch\n else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP\n else ''\n ) for (cr, c) in creds\n ]\n longest_name = max(len(n) for (n, c) in outputs) if outputs else 0\n longest_code = max(len(c) for (n, c) in outputs) if outputs else 0\n format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code)\n for name, result in outputs:\n click.echo(format_str.format(name, result))", - "docstring": "Generate codes.\n\n Generate codes from credentials stored on your YubiKey.\n Provide a query string to match one or more specific credentials.\n Touch and HOTP credentials require a single match to be triggered." - }, - { - "code": "def __convert_enum(node):\n name = __get_attribute(node, 'Name')\n logging.debug('Found EnumProperty named %s', name)\n converted_values = []\n for value in node.getElementsByTagName('EnumValue'):\n converted = __convert_node(value)\n converted['value'] = converted['name']\n converted['name'] = name\n __with_argument(value, converted)\n converted_values.append(converted)\n return converted_values", - "docstring": "Converts an EnumProperty node to JSON format." - }, - { - "code": "def make_controller(cls, config, session, left_menu_items=None):\n m = config.model\n Controller = config.defaultCrudRestController\n class ModelController(Controller):\n model = m\n table = config.table_type(session)\n table_filler = config.table_filler_type(session)\n new_form = config.new_form_type(session)\n new_filler = config.new_filler_type(session)\n edit_form = config.edit_form_type(session)\n edit_filler = config.edit_filler_type(session)\n allow_only = config.allow_only\n if hasattr(config.layout, 'crud_resources'):\n resources = config.layout.crud_resources\n def _before(self, *args, **kw):\n super(self.__class__, self)._before(*args, **kw)\n tmpl_context.make_pager_args = make_pager_args\n if request.response_type not in ('application/json',):\n default_renderer = AdminController._get_default_renderer()\n for action in ('get_all', 'new', 'edit'):\n for template in config.layout.crud_templates.get(action, []):\n if template.startswith(default_renderer):\n override_template(getattr(self, action), template)\n return ModelController(session, left_menu_items)", - "docstring": "New CRUD controllers using the admin configuration can be created using this." - }, - { - "code": "def __extract_description(self):\n tmp = list()\n for line in self._comment:\n if len(line) >= 1 and line[0] == '@':\n break\n tmp.append(line)\n tmp = self.__remove_trailing_empty_lines(tmp)\n self._description = os.linesep.join(tmp)", - "docstring": "Extracts the description from the DocBlock. The description start at the first line and stops at the first tag\n or the end of the DocBlock." - }, - { - "code": "def sanitize_ep(endpoint, plural=False):\n if plural:\n if endpoint.endswith('y'):\n endpoint = endpoint[:-1] + 'ies'\n elif not endpoint.endswith('s'):\n endpoint += 's'\n else:\n if endpoint.endswith('ies'):\n endpoint = endpoint[:-3] + 'y'\n elif endpoint.endswith('s'):\n endpoint = endpoint[:-1]\n return endpoint", - "docstring": "Sanitize an endpoint to a singular or plural form.\n\n Used mostly for convenience in the `_parse` method to grab the raw\n data from queried datasets.\n\n XXX: this is el cheapo (no bastante bien)" - }, - { - "code": "def merge_variant_files(orig_files, out_file, ref_file, config, region=None):\n in_pipeline = False\n if isinstance(orig_files, dict):\n file_key = config[\"file_key\"]\n in_pipeline = True\n orig_files = orig_files[file_key]\n out_file = _do_merge(orig_files, out_file, config, region)\n if in_pipeline:\n return [{file_key: out_file, \"region\": region, \"sam_ref\": ref_file, \"config\": config}]\n else:\n return out_file", - "docstring": "Combine multiple VCF files with different samples into a single output file.\n\n Uses bcftools merge on bgzipped input files, handling both tricky merge and\n concatenation of files. Does not correctly handle files with the same\n sample (use combine_variant_files instead)." - }, - { - "code": "def logtrick_sgd(sgd):\n r\n @wraps(sgd)\n def new_sgd(fun, x0, data, bounds=None, eval_obj=False, **sgd_kwargs):\n if bounds is None:\n return sgd(fun, x0, data, bounds=bounds, eval_obj=eval_obj,\n **sgd_kwargs)\n logx, expx, gradx, bounds = _logtrick_gen(bounds)\n if bool(eval_obj):\n def new_fun(x, *fargs, **fkwargs):\n o, g = fun(expx(x), *fargs, **fkwargs)\n return o, gradx(g, x)\n else:\n def new_fun(x, *fargs, **fkwargs):\n return gradx(fun(expx(x), *fargs, **fkwargs), x)\n result = sgd(new_fun, logx(x0), data, bounds=bounds, eval_obj=eval_obj,\n **sgd_kwargs)\n result['x'] = expx(result['x'])\n return result\n return new_sgd", - "docstring": "r\"\"\"\n Log-Trick decorator for stochastic gradients.\n\n This decorator implements the \"log trick\" for optimizing positive bounded\n variables using SGD. It will apply this trick for any variables that\n correspond to a Positive() bound.\n\n Examples\n --------\n >>> from ..optimize import sgd\n >>> from ..btypes import Bound, Positive\n\n Here is an example where we may want to enforce a particular parameter or\n parameters to be strictly greater than zero,\n\n >>> def cost(w, data, lambda_):\n ... N = len(data)\n ... y, X = data[:, 0], data[:, 1:]\n ... y_est = X.dot(w)\n ... ww = w.T.dot(w)\n ... obj = (y - y_est).sum() / N + lambda_ * ww\n ... gradw = - 2 * X.T.dot(y - y_est) / N + 2 * lambda_ * w\n ... return obj, gradw\n\n Now let's enforce that the `w` are positive,\n\n >>> bounds = [Positive(), Positive()]\n >>> new_sgd = logtrick_sgd(sgd)\n\n Data\n\n >>> y = np.linspace(1, 10, 100) + np.random.randn(100) + 1\n >>> X = np.array([np.ones(100), np.linspace(1, 100, 100)]).T\n >>> data = np.hstack((y[:, np.newaxis], X))\n\n Initial values\n\n >>> w_0 = np.array([1., 1.])\n >>> lambda_0 = .25\n\n >>> res = new_sgd(cost, w_0, data, args=(lambda_0,), bounds=bounds,\n ... batch_size=10, eval_obj=True)\n >>> res.x >= 0\n array([ True, True], dtype=bool)\n\n Note\n ----\n This decorator only works on unstructured optimizers. However, it can be\n use with structured_minimizer, so long as it is the inner wrapper." - }, - { - "code": "def process_slice(self, b_rot90=None):\n if b_rot90:\n self._Mnp_2Dslice = np.rot90(self._Mnp_2Dslice)\n if self.func == 'invertIntensities':\n self.invert_slice_intensities()", - "docstring": "Processes a single slice." - }, - { - "code": "def _get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size):\n vocab_filepath = os.path.join(tmp_dir, vocab_filename)\n print('Vocab file written to: ' + vocab_filepath)\n if tf.gfile.Exists(vocab_filepath):\n gs = text_encoder.SubwordTextEncoder(vocab_filepath)\n return gs\n example_file = os.path.join(tmp_dir, _EXAMPLES_FILE)\n gs = text_encoder.SubwordTextEncoder()\n token_counts = tokenizer.corpus_token_counts(\n example_file, corpus_max_lines=1000000)\n gs = gs.build_to_target_size(\n vocab_size, token_counts, min_val=1, max_val=1e3)\n gs.store_to_file(vocab_filepath)\n return gs", - "docstring": "Read or create vocabulary." - }, - { - "code": "def up(cloud_init, use_snapshots, upgrade_image, snapshot_cluster, snapshot_time):\n try:\n cloud_config = CloudConfig()\n ci = None\n if cloud_init:\n ci = CloudInit()\n cloud_controller = CloudController(cloud_config)\n cloud_controller.up(ci, use_snapshots, upgrade_image, snapshot_cluster, snapshot_time)\n except CloudComposeException as ex:\n print(ex)", - "docstring": "creates a new cluster" - }, - { - "code": "def watch(self, selector, callback):\n if selector not in self._monitors:\n self._monitors[selector] = set()\n self._monitors[selector].add(callback)", - "docstring": "Call a function whenever a stream changes.\n\n Args:\n selector (DataStreamSelector): The selector to watch.\n If this is None, it is treated as a wildcard selector\n that matches every stream.\n callback (callable): The function to call when a new\n reading is pushed. Callback is called as:\n callback(stream, value)" - }, - { - "code": "def _handle_template_param(self):\n if self._context & contexts.TEMPLATE_NAME:\n if not self._context & (contexts.HAS_TEXT | contexts.HAS_TEMPLATE):\n self._fail_route()\n self._context ^= contexts.TEMPLATE_NAME\n elif self._context & contexts.TEMPLATE_PARAM_VALUE:\n self._context ^= contexts.TEMPLATE_PARAM_VALUE\n else:\n self._emit_all(self._pop())\n self._context |= contexts.TEMPLATE_PARAM_KEY\n self._emit(tokens.TemplateParamSeparator())\n self._push(self._context)", - "docstring": "Handle a template parameter at the head of the string." - }, - { - "code": "def set_(self, state):\n if not self.can_be_(state):\n state = self._meta['translator'].translate(state)\n raise TransitionError(\n \"Cannot transit from '{actual_value}' to '{value}'.\"\n .format(actual_value=self.actual_state.value, value=state.value)\n )\n self.force_set(state)", - "docstring": "Set new state for machine." - }, - { - "code": "def url_info(request):\n return {\n 'MEDIA_URL' : core_settings.MEDIA_URL,\n 'STATIC_URL': core_settings.STATIC_URL,\n 'VERSION' : core_settings.VERSION,\n 'SERVER_INFO' : core_settings.SERVER_INFO,\n 'SITE_NAME' : current_site_name,\n 'CURRENT_SITE': current_site,\n }", - "docstring": "Make MEDIA_URL and current HttpRequest object\n available in template code." - }, - { - "code": "def convertafield(field_comm, field_val, field_iddname):\n convinidd = ConvInIDD()\n field_typ = field_comm.get('type', [None])[0] \n conv = convinidd.conv_dict().get(field_typ, convinidd.no_type)\n return conv(field_val, field_iddname)", - "docstring": "convert field based on field info in IDD" - }, - { - "code": "def _transform_list_of_states_to_state(self, state: List[int]) -> State:\n return State({gene: state[i] for i, gene in enumerate(self.genes)})", - "docstring": "Private method which transform a list which contains the state of the gene\n in the models to a State object.\n\n Examples\n --------\n\n The model contains 2 genes: operon = {0, 1, 2}\n mucuB = {0, 1}\n >>> graph._transform_list_of_states_to_dict_of_states([0, 1])\n {operon: 0, mucuB: 1}\n >>> graph._transform_list_of_states_to_dict_of_states([2, 0])\n {operon: 2, mucuB: 0}" - }, - { - "code": "def guess_fill_char( left_comp, right_comp ):\n return \"*\"\n if ( left_comp.src == right_comp.src and left_comp.strand != right_comp.strand ): \n if left_comp.end == right_comp.start: \n return \"-\"\n return \"*\"", - "docstring": "For the case where there is no annotated synteny we will try to guess it" - }, - { - "code": "def _render(template=None, filepath=None, context=None, at_paths=None,\n at_encoding=anytemplate.compat.ENCODING, at_engine=None,\n at_ask_missing=False, at_cls_args=None, _at_usr_tmpl=None,\n **kwargs):\n ecls = find_engine(filepath, at_engine)\n LOGGER.debug(\"Use the template engine: %s\", ecls.name())\n engine = ecls() if at_cls_args is None else ecls(**at_cls_args)\n at_paths = anytemplate.utils.mk_template_paths(filepath, at_paths)\n if filepath is None:\n (render_fn, target) = (engine.renders, template)\n else:\n (render_fn, target) = (engine.render, filepath)\n try:\n return render_fn(target, context=context, at_paths=at_paths,\n at_encoding=at_encoding, **kwargs)\n except TemplateNotFound as exc:\n LOGGER.warning(\"** Missing template[s]: paths=%r\", at_paths)\n if not at_ask_missing:\n raise TemplateNotFound(str(exc))\n if _at_usr_tmpl is None:\n _at_usr_tmpl = anytemplate.compat.raw_input(\n \"\\nPlease enter an absolute or relative path starting \"\n \"from '.' of missing template file\"\n \"%s : \" % (\", \" + filepath if template is None else '')\n ).strip()\n usr_tmpl = anytemplate.utils.normpath(_at_usr_tmpl)\n if template is None:\n LOGGER.debug(\"Render %s instead of %s\", usr_tmpl, filepath)\n target = usr_tmpl\n return render_fn(target, context=context,\n at_paths=(at_paths + [os.path.dirname(usr_tmpl)]),\n at_encoding=at_encoding, **kwargs)\n except Exception as exc:\n raise CompileError(\"exc=%r, template=%s\" % (exc, target[:200]))", - "docstring": "Compile and render given template string and return the result string.\n\n :param template: Template content string or None\n :param filepath: Template file path or None\n :param context: A dict or dict-like object to instantiate given\n template file\n :param at_paths: Template search paths\n :param at_encoding: Template encoding\n :param at_engine: Specify the name of template engine to use explicitly or\n None to find it automatically anyhow.\n :param at_cls_args: Arguments passed to instantiate template engine class\n :param _at_usr_tmpl: Template file of path will be given by user later;\n this file will be used just for testing purpose.\n :param kwargs: Keyword arguments passed to the template engine to\n render templates with specific features enabled.\n\n :return: Rendered string" - }, - { - "code": "def _apply_diff(environ, diff):\n original = {}\n if diff:\n for k, v in diff.iteritems():\n if v is None:\n log.log(5, 'unset %s', k)\n else:\n log.log(5, '%s=\"%s\"', k, v)\n original[k] = environ.get(k)\n if original[k] is None:\n log.log(1, '%s was not set', k)\n else:\n log.log(1, '%s was \"%s\"', k, original[k])\n if v is None:\n environ.pop(k, None)\n else:\n environ[k] = v\n else:\n log.log(5, 'nothing to apply')\n return original", - "docstring": "Apply a frozen environment.\n\n :param dict diff: key-value pairs to apply to the environment.\n :returns: A dict of the key-value pairs that are being changed." - }, - { - "code": "def prepend(self, tr):\n self.transforms.insert(0, tr)\n tr.changed.connect(self._subtr_changed)\n self._rebuild_shaders()\n self.update()", - "docstring": "Add a new transform to the beginning of this chain.\n\n Parameters\n ----------\n tr : instance of Transform\n The transform to use." - }, - { - "code": "def set_bucket_name(self, bucket_name):\n is_valid_bucket_name(bucket_name)\n self.policies.append(('eq', '$bucket', bucket_name))\n self.form_data['bucket'] = bucket_name\n self.bucket_name = bucket_name", - "docstring": "Set bucket name policy condition.\n\n :param bucket_name: set bucket name." - }, - { - "code": "def parse_pagination(headers):\n links = {\n link.rel: parse_qs(link.href).get(\"page\", None)\n for link in link_header.parse(headers.get(\"Link\", \"\")).links\n }\n return _Navigation(\n links.get(\"previous\", [None])[0],\n links.get(\"next\", [None])[0],\n links.get(\"last\", [None])[0],\n links.get(\"current\", [None])[0],\n links.get(\"first\", [None])[0]\n )", - "docstring": "Parses headers to create a pagination objects\n\n :param headers: HTTP Headers\n :type headers: dict\n :return: Navigation object for pagination\n :rtype: _Navigation" - }, - { - "code": "def use_plenary_sequence_rule_view(self):\n self._object_views['sequence_rule'] = PLENARY\n for session in self._get_provider_sessions():\n try:\n session.use_plenary_sequence_rule_view()\n except AttributeError:\n pass", - "docstring": "Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view" - }, - { - "code": "def restore(self, value, context=None):\n context = context or orb.Context()\n value = super(ReferenceColumn, self).restore(value, context=context)\n if self.testFlag(self.Flags.I18n) and context.locale == 'all':\n return {locale: self._restore(val, context) for locale, val in value.items()}\n else:\n return self._restore(value, context)", - "docstring": "Returns the inflated value state. This method will match the desired inflated state.\n\n :param value: \n :param inflated: \n\n :return: " - }, - { - "code": "def get_local_tzone():\n if localtime().tm_isdst:\n if altzone < 0:\n tzone = '+' + \\\n str(int(float(altzone) / 60 // 60)).rjust(2,\n '0') + \\\n str(int(float(\n altzone) / 60 % 60)).ljust(2, '0')\n else:\n tzone = '-' + \\\n str(int(float(altzone) / 60 // 60)).rjust(2,\n '0') + \\\n str(int(float(\n altzone) / 60 % 60)).ljust(2, '0')\n else:\n if altzone < 0:\n tzone = \\\n '+' + str(int(float(timezone) / 60 // 60)).rjust(2,\n '0') + \\\n str(int(float(\n timezone) / 60 % 60)).ljust(2, '0')\n else:\n tzone = \\\n '-' + str(int(float(timezone) / 60 // 60)).rjust(2,\n '0') + \\\n str(int(float(\n timezone) / 60 % 60)).ljust(2, '0')\n return tzone", - "docstring": "Get the current time zone on the local host" - }, - { - "code": "def end(self):\n for depth in xrange(len(self.names) - 1, -1, -1):\n self.out_f.write('{0}}}\\n'.format(self.prefix(depth)))", - "docstring": "Generate the closing part" - }, - { - "code": "def prep(self, wait, args, env=None):\n self.pattern = wait\n self.env = env\n self.args = args\n if callable(wait):\n self.wait = lambda lines: wait()", - "docstring": "Given the return value of a preparefunc, prepare this\n CompatStarter." - }, - { - "code": "def variable_matrix(\n variables: VarType, parent: str = None, iterator: str = \"product\"\n) -> Iterable[Dict[str, YamlValue]]:\n _iters: Dict[str, Callable] = {\"product\": product, \"zip\": zip}\n _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {\n \"zip\": iterator_zip,\n \"product\": iterator_product,\n \"arange\": iterator_arange,\n \"chain\": iterator_chain,\n \"append\": iterator_chain,\n \"cycle\": iterator_cycle,\n \"repeat\": iterator_cycle,\n }\n if isinstance(variables, dict):\n key_vars: List[List[Dict[str, YamlValue]]] = []\n for key, function in _special_keys.items():\n if variables.get(key):\n item = variables[key]\n assert item is not None\n for val in function(item, parent):\n key_vars.append(val)\n del variables[key]\n for key, value in variables.items():\n key_vars.append(list(variable_matrix(value, key, iterator)))\n logger.debug(\"key vars: %s\", key_vars)\n for i in _iters[iterator](*key_vars):\n logger.debug(\"dicts: %s\", i)\n yield combine_dictionaries(i)\n elif isinstance(variables, list):\n for item in variables:\n yield from variable_matrix(item, parent, iterator)\n else:\n assert parent is not None\n yield {parent: variables}", - "docstring": "Process the variables into a list of the appropriate combinations.\n\n This function performs recursive processing of the input variables, creating an\n iterator which has all the combinations of variables specified in the input." - }, - { - "code": "def update(self, species_set, generation):\n species_data = []\n for sid, s in iteritems(species_set.species):\n if s.fitness_history:\n prev_fitness = max(s.fitness_history)\n else:\n prev_fitness = -sys.float_info.max\n s.fitness = self.species_fitness_func(s.get_fitnesses())\n s.fitness_history.append(s.fitness)\n s.adjusted_fitness = None\n if prev_fitness is None or s.fitness > prev_fitness:\n s.last_improved = generation\n species_data.append((sid, s))\n species_data.sort(key=lambda x: x[1].fitness)\n result = []\n species_fitnesses = []\n num_non_stagnant = len(species_data)\n for idx, (sid, s) in enumerate(species_data):\n stagnant_time = generation - s.last_improved\n is_stagnant = False\n if num_non_stagnant > self.stagnation_config.species_elitism:\n is_stagnant = stagnant_time >= self.stagnation_config.max_stagnation\n if (len(species_data) - idx) <= self.stagnation_config.species_elitism:\n is_stagnant = False\n if is_stagnant:\n num_non_stagnant -= 1\n result.append((sid, s, is_stagnant))\n species_fitnesses.append(s.fitness)\n return result", - "docstring": "Required interface method. Updates species fitness history information,\n checking for ones that have not improved in max_stagnation generations,\n and - unless it would result in the number of species dropping below the configured\n species_elitism parameter if they were removed,\n in which case the highest-fitness species are spared -\n returns a list with stagnant species marked for removal." - }, - { - "code": "def line_ball_intersection(start_points, end_points, center, radius):\n L = end_points - start_points\n oc = start_points - center\n r = radius\n ldotl = np.einsum('ij, ij->i', L, L)\n ldotoc = np.einsum('ij, ij->i', L, oc)\n ocdotoc = np.einsum('ij, ij->i', oc, oc)\n discrims = ldotoc**2 - ldotl * (ocdotoc - r**2)\n lengths = np.zeros(len(start_points))\n m = discrims > 0\n d1 = (-ldotoc[m] - np.sqrt(discrims[m])) / ldotl[m]\n d2 = (-ldotoc[m] + np.sqrt(discrims[m])) / ldotl[m]\n d1 = np.clip(d1, 0, 1)\n d2 = np.clip(d2, 0, 1)\n lengths[m] = (d2 - d1) * np.sqrt(ldotl[m])\n return lengths", - "docstring": "Compute the length of the intersection of a line segment with a ball.\n\n Parameters\n ----------\n start_points : (n,3) float, list of points in space\n end_points : (n,3) float, list of points in space\n center : (3,) float, the sphere center\n radius : float, the sphere radius\n\n Returns\n --------\n lengths: (n,) float, the lengths." - }, - { - "code": "def cwt_haar(x: np.ndarray, scale=10):\n next_power_two = 2 ** int(np.log2(len(x)))\n x = x[0:next_power_two]\n num_data = len(x)\n x_hat = np.fft.fft(x)\n f = (2.0 * np.pi / num_data)\n omega = f * np.concatenate((np.arange(0, num_data // 2), np.arange(num_data // 2, num_data) * -1))\n psi_hat = np.sqrt(2.0 * np.pi * scale) * normalized_haar_wavelet(scale * omega, scale)\n W = np.fft.ifft(x_hat * psi_hat)\n return W[2 * scale:-2 * scale]", - "docstring": "continuous haar wavelet transform based on the paper\n \"A practical guide to wavelet analysis\" by Christopher Torrence and Gilbert P Compo" - }, - { - "code": "def start_active_players_path(page):\n soup = BeautifulSoup(page)\n try:\n return soup.find('a', href=True, text='Start Active Players')['href']\n except:\n return None", - "docstring": "Return the path in the \"Start Active Players\" button" - }, - { - "code": "def guest_reboot(self, userid):\n LOG.info(\"Begin to reboot vm %s\", userid)\n self._smtclient.guest_reboot(userid)\n LOG.info(\"Complete reboot vm %s\", userid)", - "docstring": "Reboot a guest vm." - }, - { - "code": "def get_optional_env(key):\n environment_variable_value = os.environ.get(key)\n if environment_variable_value:\n return environment_variable_value\n elif key in CONSTANTS:\n return CONSTANTS[key]\n else:\n raise Exception(\"The variable {1} is not set\".format(key))", - "docstring": "Return the value of an optional environment variable, and use\n the provided default if it's not set." - }, - { - "code": "def list_items(cls, repo, *args, **kwargs):\n out_list = IterableList(cls._id_attribute_)\n out_list.extend(cls.iter_items(repo, *args, **kwargs))\n return out_list", - "docstring": "Find all items of this type - subclasses can specify args and kwargs differently.\n If no args are given, subclasses are obliged to return all items if no additional\n arguments arg given.\n\n :note: Favor the iter_items method as it will\n\n :return:list(Item,...) list of item instances" - }, - { - "code": "def data_uuids(self, uuids, start, end, archiver=\"\", timeout=DEFAULT_TIMEOUT):\n if not isinstance(uuids, list):\n uuids = [uuids]\n where = \" or \".join(['uuid = \"{0}\"'.format(uuid) for uuid in uuids])\n return self.query(\"select data in ({0}, {1}) where {2}\".format(start, end, where), archiver, timeout).get('timeseries',{})", - "docstring": "With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps\n\n Arguments:\n [uuids]: list of UUIDs\n [start, end]: time references:\n [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed\n into the constructor for the client\n [timeout]: time in seconds to wait for a response from the archiver" - }, - { - "code": "def sort(args):\n valid_sort_methods = (\"unix\", \"topo\")\n p = OptionParser(sort.__doc__)\n p.add_option(\"--method\", default=\"unix\", choices=valid_sort_methods,\n help=\"Specify sort method [default: %default]\")\n p.add_option(\"-i\", dest=\"inplace\", default=False, action=\"store_true\",\n help=\"If doing a unix sort, perform sort inplace [default: %default]\")\n p.set_tmpdir()\n p.set_outfile()\n p.set_home(\"gt\")\n opts, args = p.parse_args(args)\n if len(args) != 1:\n sys.exit(not p.print_help())\n gffile, = args\n sortedgff = opts.outfile\n if opts.inplace:\n if opts.method == \"topo\" or (opts.method == \"unix\" and gffile in (\"-\", \"stdin\")):\n logging.error(\"Cannot perform inplace sort when method is `topo`\" + \\\n \" or method is `unix` and input is `stdin` stream\")\n sys.exit()\n if opts.method == \"unix\":\n cmd = \"sort\"\n cmd += \" -k1,1 -k4,4n {0}\".format(gffile)\n if opts.tmpdir:\n cmd += \" -T {0}\".format(opts.tmpdir)\n if opts.inplace:\n cmd += \" -o {0}\".gffile\n sortedgff = None\n sh(cmd, outfile=sortedgff)\n elif opts.method == \"topo\":\n GT_HOME = opts.gt_home\n if not op.isdir(GT_HOME):\n logging.error(\"GT_HOME={0} directory does not exist\".format(GT_HOME))\n sys.exit()\n cmd = \"{0}\".format(op.join(GT_HOME, \"bin\", \"gt\"))\n cmd += \" gff3 -sort -tidy -retainids -addids no {0}\".format(gffile)\n sh(cmd, outfile=sortedgff)", - "docstring": "%prog sort gffile\n\n Sort gff file using plain old unix sort based on [chromosome, start coordinate].\n or topologically based on hierarchy of features using the gt (genometools) toolkit" - }, - { - "code": "def clear_cache(self):\n super(HyperparameterTuningJobAnalytics, self).clear_cache()\n self._tuning_job_describe_result = None\n self._training_job_summaries = None", - "docstring": "Clear the object of all local caches of API methods." - }, - { - "code": "def pch_emitter(target, source, env):\n validate_vars(env)\n pch = None\n obj = None\n for t in target:\n if SCons.Util.splitext(str(t))[1] == '.pch':\n pch = t\n if SCons.Util.splitext(str(t))[1] == '.obj':\n obj = t\n if not obj:\n obj = SCons.Util.splitext(str(pch))[0]+'.obj'\n target = [pch, obj]\n return (target, source)", - "docstring": "Adds the object file target." - }, - { - "code": "def generate_signature(secret, verb, url, nonce, data):\n parsedURL = urllib.parse.urlparse(url)\n path = parsedURL.path\n if parsedURL.query:\n path = path + '?' + parsedURL.query\n message = bytes(verb + path + str(nonce) + data, 'utf-8')\n signature = hmac.new(secret.encode('utf-8'),\n message,\n digestmod=hashlib.sha256).hexdigest()\n return signature", - "docstring": "Generate a request signature compatible with BitMEX." - }, - { - "code": "def classes_in_module(module) -> List:\n md = module.__dict__\n return [\n md[c] for c in md if (\n isinstance(md[c], type) and\n issubclass(md[c], ETKModule\n ) and\n md[c].__module__ == module.__name__)\n ]", - "docstring": "Return all classes with super class ExtractionModule\n\n Args:\n module:\n\n Returns: List of classes" - }, - { - "code": "def _recv_nack(self, method_frame):\n if self._nack_listener:\n delivery_tag = method_frame.args.read_longlong()\n multiple, requeue = method_frame.args.read_bits(2)\n if multiple:\n while self._last_ack_id < delivery_tag:\n self._last_ack_id += 1\n self._nack_listener(self._last_ack_id, requeue)\n else:\n self._last_ack_id = delivery_tag\n self._nack_listener(self._last_ack_id, requeue)", - "docstring": "Receive a nack from the broker." - }, - { - "code": "def infix_handle(tokens):\n func, args = get_infix_items(tokens, callback=infix_handle)\n return \"(\" + func + \")(\" + \", \".join(args) + \")\"", - "docstring": "Process infix calls." - }, - { - "code": "def pixel_to_utm(row, column, transform):\n east = transform[0] + column * transform[1]\n north = transform[3] + row * transform[5]\n return east, north", - "docstring": "Convert pixel coordinate to UTM coordinate given a transform\n\n :param row: row pixel coordinate\n :type row: int or float\n :param column: column pixel coordinate\n :type column: int or float\n :param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`\n :type transform: tuple or list\n :return: east, north UTM coordinates\n :rtype: float, float" - }, - { - "code": "def build_attr_string(attrs, supported=True):\r\n if not supported:\r\n return ''\r\n if type(attrs) == str:\r\n attrs = [attrs]\r\n result = '\\033['\r\n for attr in attrs:\r\n result += term_attributes[attr] + ';'\r\n return result[:-1] + 'm'", - "docstring": "Build a string that will turn any ANSI shell output the desired\r\n colour.\r\n\r\n attrs should be a list of keys into the term_attributes table." - }, - { - "code": "def to_grid_locator(latitude, longitude, precision='square'):\n if precision not in ('square', 'subsquare', 'extsquare'):\n raise ValueError('Unsupported precision value %r' % precision)\n if not -90 <= latitude <= 90:\n raise ValueError('Invalid latitude value %r' % latitude)\n if not -180 <= longitude <= 180:\n raise ValueError('Invalid longitude value %r' % longitude)\n latitude += 90.0\n longitude += 180.0\n locator = []\n field = int(longitude / LONGITUDE_FIELD)\n locator.append(chr(field + 65))\n longitude -= field * LONGITUDE_FIELD\n field = int(latitude / LATITUDE_FIELD)\n locator.append(chr(field + 65))\n latitude -= field * LATITUDE_FIELD\n square = int(longitude / LONGITUDE_SQUARE)\n locator.append(str(square))\n longitude -= square * LONGITUDE_SQUARE\n square = int(latitude / LATITUDE_SQUARE)\n locator.append(str(square))\n latitude -= square * LATITUDE_SQUARE\n if precision in ('subsquare', 'extsquare'):\n subsquare = int(longitude / LONGITUDE_SUBSQUARE)\n locator.append(chr(subsquare + 97))\n longitude -= subsquare * LONGITUDE_SUBSQUARE\n subsquare = int(latitude / LATITUDE_SUBSQUARE)\n locator.append(chr(subsquare + 97))\n latitude -= subsquare * LATITUDE_SUBSQUARE\n if precision == 'extsquare':\n extsquare = int(longitude / LONGITUDE_EXTSQUARE)\n locator.append(str(extsquare))\n extsquare = int(latitude / LATITUDE_EXTSQUARE)\n locator.append(str(extsquare))\n return ''.join(locator)", - "docstring": "Calculate Maidenhead locator from latitude and longitude.\n\n Args:\n latitude (float): Position's latitude\n longitude (float): Position's longitude\n precision (str): Precision with which generate locator string\n\n Returns:\n str: Maidenhead locator for latitude and longitude\n\n Raise:\n ValueError: Invalid precision identifier\n ValueError: Invalid latitude or longitude value" - }, - { - "code": "def check_owner_permission(payload, allow_user_owner):\n for entity_type in ['users', 'groups']:\n for perm_type in ['add', 'remove']:\n for perms in payload.get(entity_type, {}).get(perm_type, {}).values():\n if 'owner' in perms:\n if entity_type == 'users' and allow_user_owner:\n continue\n if entity_type == 'groups':\n raise exceptions.ParseError(\"Owner permission cannot be assigned to a group\")\n raise exceptions.PermissionDenied(\"Only owners can grant/revoke owner permission\")", - "docstring": "Raise ``PermissionDenied``if ``owner`` found in ``data``." - }, - { - "code": "def get_all_letters(self, params=None):\n if not params:\n params = {}\n return self._iterate_through_pages(self.get_letters_per_page, resource=LETTERS, **{'params': params})", - "docstring": "Get all letters\n This will iterate over all pages until it gets all elements.\n So if the rate limit exceeded it will throw an Exception and you will get nothing\n\n :param params: search params\n :return: list" - }, - { - "code": "def _translate(self, input_filename, output_filename):\n command = [\n self.translate_binary,\n '-f', 'GeoJSON',\n output_filename,\n input_filename\n ]\n result = self._runcommand(command)\n self.log('Result (Translate): ', result, lvl=debug)", - "docstring": "Translate KML file to geojson for import" - }, - { - "code": "def find_melody(file='440_480_clean.wav', chunksize=512):\n (data, freq, bits) = data_from_file(file)\n res = []\n for d in analyze_chunks(data, freq, bits, chunksize):\n if res != []:\n if res[-1][0] == d:\n val = res[-1][1]\n res[-1] = (d, val + 1)\n else:\n res.append((d, 1))\n else:\n res.append((d, 1))\n return [(x, freq) for (x, freq) in res]", - "docstring": "Cut the sample into chunks and analyze each chunk.\n\n Return a list [(Note, chunks)] where chunks is the number of chunks\n where that note is the most dominant.\n\n If two consequent chunks turn out to return the same Note they are\n grouped together.\n\n This is an experimental function." - }, - { - "code": "def get_account_at(self, address, block_number):\n cur = self.db.cursor()\n return namedb_get_account_at(cur, address, block_number)", - "docstring": "Get the sequence of states an account was in at a given block.\n Returns a list of states" - }, - { - "code": "def find_callback(args, kw=None):\n 'Return callback whether passed as a last argument or as a keyword'\n if args and callable(args[-1]):\n return args[-1], args[:-1]\n try:\n return kw['callback'], args\n except (KeyError, TypeError):\n return None, args", - "docstring": "Return callback whether passed as a last argument or as a keyword" - }, - { - "code": "def subtopics(store, folders, folder_id, subfolder_id, ann_id=None):\n items = folders.grouped_items(folder_id, subfolder_id, ann_id=ann_id)\n fcs = dict([(cid, fc) for cid, fc in store.get_many(items.keys())])\n for cid, subids in items.iteritems():\n fc = fcs[cid]\n for subid in subids:\n try:\n data = typed_subtopic_data(fc, subid)\n except KeyError:\n continue\n yield cid, subid, fc['meta_url'], subtopic_type(subid), data", - "docstring": "Yields an unordered generator of subtopics in a subfolder.\n\n Each item of the generator is a 4-tuple of ``content_id``,\n ``subtopic_id``, ``subtopic_type`` and ``data``. Subtopic type\n is one of the following Unicode strings: ``text``, ``image``\n or ``manual``. The type of ``data`` is dependent on the\n subtopic type. For ``image``, ``data`` is a ``(unicode, str)``,\n where the first element is the URL and the second element is\n the binary image data. For all other types, ``data`` is a\n ``unicode`` string.\n\n :param str folder_id: Folder id\n :param str subfolder_id: Subfolder id\n :param str ann_id: Username\n :rtype: generator of\n ``(content_id, subtopic_id, url, subtopic_type, data)``" - }, - { - "code": "def path(self):\n if not isinstance(self.ref, str):\n return None\n u = parse_app_url(self.ref)\n if u.inner.proto != 'file':\n return None\n return u.path", - "docstring": "Return the path to the file, if the ref is a file" - }, - { - "code": "def __highlight_occurence(self, file, occurence):\n if not self.__container.get_editor(file):\n cache_data = self.__files_cache.get_content(file)\n if cache_data:\n document = cache_data.document or self.__get_document(cache_data.content)\n self.__container.load_document(document, file)\n self.__uncache(file)\n else:\n self.__container.load_file(file)\n else:\n self.__container.set_current_editor(file)\n if not occurence:\n return\n cursor = self.__container.get_current_editor().textCursor()\n cursor.setPosition(occurence.position, QTextCursor.MoveAnchor)\n cursor.setPosition(occurence.position + occurence.length, QTextCursor.KeepAnchor)\n self.__container.get_current_editor().setTextCursor(cursor)", - "docstring": "Highlights given file occurence.\n\n :param file: File containing the occurence.\n :type file: unicode\n :param occurence: Occurence to highlight.\n :type occurence: Occurence or SearchOccurenceNode" - }, - { - "code": "def create_iam_role(self, account):\n try:\n iam = self.session.client('iam')\n trust = get_template('vpc_flow_logs_iam_role_trust.json').render()\n policy = get_template('vpc_flow_logs_role_policy.json').render()\n newrole = iam.create_role(\n Path='/',\n RoleName=self.role_name,\n AssumeRolePolicyDocument=trust\n )['Role']['Arn']\n iam.put_role_policy(\n RoleName=self.role_name,\n PolicyName='VpcFlowPolicy',\n PolicyDocument=policy\n )\n self.log.debug('Created VPC Flow Logs role & policy for {}'.format(account.account_name))\n auditlog(\n event='vpc_flow_logs.create_iam_role',\n actor=self.ns,\n data={\n 'account': account.account_name,\n 'roleName': self.role_name,\n 'trustRelationship': trust,\n 'inlinePolicy': policy\n }\n )\n return newrole\n except Exception:\n self.log.exception('Failed creating the VPC Flow Logs role for {}.'.format(account))", - "docstring": "Create a new IAM role. Returns the ARN of the newly created role\n\n Args:\n account (:obj:`Account`): Account where to create the IAM role\n\n Returns:\n `str`" - }, - { - "code": "def make_form_or_formset_fields_not_required(form_or_formset):\n if isinstance(form_or_formset, BaseFormSet):\n for single_form in form_or_formset:\n make_form_fields_not_required(single_form)\n else:\n make_form_fields_not_required(form_or_formset)", - "docstring": "Take a Form or FormSet and set all fields to not required." - }, - { - "code": "def send_audio_file(\n self, audio_file, device_state, authentication_headers,\n dialog_request_id, distance_profile, audio_format\n ):\n payload = {\n 'context': device_state,\n 'event': {\n 'header': {\n 'namespace': 'SpeechRecognizer',\n 'name': 'Recognize',\n 'messageId': self.generate_message_id(),\n 'dialogRequestId': dialog_request_id,\n },\n 'payload': {\n 'profile': distance_profile,\n 'format': audio_format\n }\n }\n }\n multipart_data = MultipartEncoder(\n fields=[\n (\n 'request', (\n 'request',\n json.dumps(payload),\n 'application/json;',\n {'Content-Disposition': \"form-data; name='request'\"}\n ),\n ),\n (\n 'audio', (\n 'audio',\n audio_file,\n 'application/octet-stream',\n {'Content-Disposition': \"form-data; name='audio'\"}\n )\n ),\n ],\n boundary='boundary',\n )\n headers = {\n **authentication_headers,\n 'Content-Type': multipart_data.content_type\n }\n stream_id = self.connection.request(\n 'POST',\n '/v20160207/events',\n headers=headers,\n body=multipart_data,\n )\n response = self.connection.get_response(stream_id)\n return self.parse_response(response)", - "docstring": "Send audio to AVS\n\n The file-like object are steaming uploaded for improved latency.\n\n Returns:\n bytes -- wav audio bytes returned from AVS" - }, - { - "code": "def removeDataFrameRows(self, rows):\n if not self.editable:\n return False\n if rows:\n position = min(rows)\n count = len(rows)\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + count - 1)\n removedAny = False\n for idx, line in self._dataFrame.iterrows():\n if idx in rows:\n removedAny = True\n self._dataFrame.drop(idx, inplace=True)\n if not removedAny:\n return False\n self._dataFrame.reset_index(inplace=True, drop=True)\n self.endRemoveRows()\n return True\n return False", - "docstring": "Removes rows from the dataframe.\n\n :param rows: (list)\n of row indexes to removes.\n :return: (bool)\n True on success, False on failure." - }, - { - "code": "def set_widgets(self):\n source = self.parent.get_existing_keyword('source')\n if source or source == 0:\n self.leSource.setText(source)\n else:\n self.leSource.clear()\n source_scale = self.parent.get_existing_keyword('scale')\n if source_scale or source_scale == 0:\n self.leSource_scale.setText(source_scale)\n else:\n self.leSource_scale.clear()\n source_date = self.parent.get_existing_keyword('date')\n if source_date:\n self.ckbSource_date.setChecked(True)\n self.dtSource_date.setDateTime(source_date)\n else:\n self.ckbSource_date.setChecked(False)\n self.dtSource_date.clear()\n source_url = self.parent.get_existing_keyword('url')\n try:\n source_url = source_url.toString()\n except AttributeError:\n pass\n if source_url or source_url == 0:\n self.leSource_url.setText(source_url)\n else:\n self.leSource_url.clear()\n source_license = self.parent.get_existing_keyword('license')\n if source_license or source_license == 0:\n self.leSource_license.setText(source_license)\n else:\n self.leSource_license.clear()", - "docstring": "Set widgets on the Source tab." - }, - { - "code": "def evaluate_tour_Q(self, tour):\n from .chic import score_evaluate_Q\n return score_evaluate_Q(tour, self.active_sizes, self.Q)", - "docstring": "Use Cythonized version to evaluate the score of a current tour,\n taking orientation into consideration. This may be the most accurate\n evaluation under the right condition." - }, - { - "code": "def deleter(self, func):\n if not callable(func):\n raise TypeError('deleter must be callable function')\n if hasattr(func, '__code__') and func.__code__.co_argcount != 1:\n raise TypeError('deleter must be a function with two arguments')\n if func.__name__ != self.name:\n raise TypeError('deleter function must have same name as getter')\n self._del_func = func\n return self", - "docstring": "Register a delete function for the DynamicProperty\n\n This function may only take one argument, self." - }, - { - "code": "def send(self, path, value, metric_type):\n msg = self._msg_format.format(\n path=self._build_path(path, metric_type),\n value=value,\n metric_type=metric_type)\n LOGGER.debug('Sending %s to %s:%s', msg.encode('ascii'),\n self._host, self._port)\n try:\n if self._tcp:\n if self._sock.closed():\n return\n return self._sock.write(msg.encode('ascii'))\n self._sock.sendto(msg.encode('ascii'), (self._host, self._port))\n except iostream.StreamClosedError as error:\n LOGGER.warning('Error sending TCP statsd metric: %s', error)\n except (OSError, socket.error) as error:\n LOGGER.exception('Error sending statsd metric: %s', error)", - "docstring": "Send a metric to Statsd.\n\n :param list path: The metric path to record\n :param mixed value: The value to record\n :param str metric_type: The metric type" - }, - { - "code": "def _check_pointers(parser):\n from fortpy.stats.bp import check_pointers\n check_pointers(parser, args[\"source\"], args[\"filter\"], args[\"recursive\"])", - "docstring": "Checks the pointer best-practice conditions." - }, - { - "code": "def _get_elevation(self, location):\n url = self._elevation_query_base % (location.latitude, location.longitude)\n if self.api_key != \"\":\n url += \"&key=%s\" % self.api_key\n data = self._read_from_url(url)\n response = json.loads(data)\n if response[\"status\"] == \"OK\":\n location.elevation = int(float(response[\"results\"][0][\"elevation\"]))\n else:\n location.elevation = 0", - "docstring": "Query the elevation information with the latitude and longitude of\n the specified `location`." - }, - { - "code": "def parse_quality(self):\n if (self.quality is None):\n self.quality_val = self.default_quality\n elif (self.quality not in self.allowed_qualities):\n raise IIIFRequestError(\n code=400, parameter=\"quality\",\n text=\"The quality parameter must be '%s', got '%s'.\" %\n (\"', '\".join(self.allowed_qualities), self.quality))\n else:\n self.quality_val = self.quality", - "docstring": "Check quality paramater.\n\n Sets self.quality_val based on simple substitution of\n 'native' for default. Checks for the three valid values\n else throws an IIIFRequestError." - }, - { - "code": "def deploy(project, version, promote, quiet):\n from . import logic\n logic.deploy(project, version, promote, quiet)", - "docstring": "Deploy the app to the target environment.\n\n The target environments can be configured using the ENVIRONMENTS conf\n variable. This will also collect all static files and compile translation\n messages" - }, - { - "code": "def get_optional(self, name):\n locator = self._locate(name)\n return self._references.get_optional(locator) if locator != None else None", - "docstring": "Gets all optional dependencies by their name.\n\n :param name: the dependency name to locate.\n\n :return: a list with found dependencies or empty list of no dependencies was found." - }, - { - "code": "def validate_values(self, values):\n format_message = '{}: {} (value: {})'.format\n messages = []\n for name, value in values.items():\n if name not in self.expected_values:\n continue\n valid, message = self.expected_values[name].validate(value)\n if valid:\n continue\n messages.append(format_message(name, message, value))\n if len(messages):\n return False, '\\n'.join(messages)\n return True, None", - "docstring": "Validate values if they are registered as expected_values and present.\n\n * If they are not registered they shouldn't be used anywhere at all\n because profile can self check (profile.check_dependencies) for\n missing/undefined dependencies.\n\n * If they are not present in values but registered as expected_values\n either the expected value has a default value OR a request for that\n name will raise a KeyError on runtime. We don't know if all expected\n values are actually needed/used, thus this fails late." - }, - { - "code": "def getCheckpointFile(self):\n checkpointFile = self._jrdd.rdd().getCheckpointFile()\n if checkpointFile.isDefined():\n return checkpointFile.get()", - "docstring": "Gets the name of the file to which this RDD was checkpointed\n\n Not defined if RDD is checkpointed locally." - }, - { - "code": "def match(self, path, method):\n if path != '/':\n path = path.rstrip('/')\n method = method.upper()\n status = 404\n for p, n, m in self.endpoints:\n matched, url_vars = match_path(p, path)\n if not matched:\n continue\n if method not in m:\n status = 405\n raise HTTPError(status=status, body=f'Method not found: {path} {method}')\n callback, type_hints = m[method]\n type_matched, typed_url_vars = match_url_vars_type(url_vars, type_hints)\n if not type_matched:\n continue\n return callback, typed_url_vars\n raise HTTPError(status=status, body=f'Not found: {path}')", - "docstring": "Get callback and url_vars.\n\n >>> from kobin import Response\n >>> r = Router()\n >>> def view(user_id: int) -> Response:\n ... return Response(f'You are {user_id}')\n ...\n >>> r.add('/users/{user_id}', 'GET', 'user-detail', view)\n\n >>> callback, url_vars = r.match('/users/1', 'GET')\n >>> url_vars\n {'user_id': 1}\n >>> response = callback(**url_vars)\n >>> response.body\n [b'You are 1']\n\n >>> callback, url_vars = r.match('/notfound', 'GET')\n Traceback (most recent call last):\n ...\n kobin.responses.HTTPError" - }, - { - "code": "def register_validator(validator):\n if hasattr(validator, \"EXTS\") and hasattr(validator, \"run\"):\n ValidatorFactory.PLUGINS.append(validator)\n else:\n raise ValidatorException(\"Validator does not have 'run' method or EXTS variable!\")", - "docstring": "Register a Validator class for file verification.\n\n :param validator:\n :return:" - }, - { - "code": "async def verify_docker_image_task(chain, link):\n errors = []\n worker_type = get_worker_type(link.task)\n if worker_type not in chain.context.config['valid_docker_image_worker_types']:\n errors.append(\"{} is not a valid docker-image workerType!\".format(worker_type))\n raise_on_errors(errors)", - "docstring": "Verify the docker image Link.\n\n Args:\n chain (ChainOfTrust): the chain we're operating on.\n link (LinkOfTrust): the task link we're checking." - }, - { - "code": "def feed(self, token, test_newline=True):\n if test_newline:\n newlines = token.count(self.newline_char)\n if newlines:\n self.line += newlines\n self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1\n self.char_pos += len(token)\n self.column = self.char_pos - self.line_start_pos + 1", - "docstring": "Consume a token and calculate the new line & column.\n\n As an optional optimization, set test_newline=False is token doesn't contain a newline." - }, - { - "code": "def find_and_replace_userids(self, text):\n match = True\n pattern = re.compile('<@([A-Z0-9]{9})>')\n while match:\n match = pattern.search(text)\n if match:\n name = self.get_user_display_name(match.group(1))\n text = re.sub(re.compile(match.group(0)), '@' + name, text)\n return text", - "docstring": "Finds occurrences of Slack userids and attempts to replace them with\n display names.\n\n Args:\n text (string): The message text\n Returns:\n string: The message text with userids replaced." - }, - { - "code": "def write(self, fptr):\n self._validate(writing=True)\n self._write_superbox(fptr, b'cgrp')", - "docstring": "Write a colour group box to file." - }, - { - "code": "def reset_everything(self, payload):\n kill_signal = signals['9']\n self.process_handler.kill_all(kill_signal, True)\n self.process_handler.wait_for_finish()\n self.reset = True\n answer = {'message': 'Resetting current queue', 'status': 'success'}\n return answer", - "docstring": "Kill all processes, delete the queue and clean everything up." - }, - { - "code": "def close(self, wait=False):\n self.session.close()\n self.pool.shutdown(wait=wait)", - "docstring": "Close session, shutdown pool." - }, - { - "code": "def plot_pole(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):\n if not has_cartopy:\n print('-W- cartopy must be installed to run ipmag.plot_pole')\n return\n A95_km = A95 * 111.32\n map_axis.scatter(plon, plat, marker=marker,\n color=color, edgecolors=edgecolor, s=markersize,\n label=label, zorder=101, transform=ccrs.Geodetic())\n equi(map_axis, plon, plat, A95_km, color)\n if legend == 'yes':\n plt.legend(loc=2)", - "docstring": "This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis.\n\n Before this function is called, a plot needs to be initialized with code\n such as that in the make_orthographic_map function.\n\n Example\n -------\n >>> plon = 200\n >>> plat = 60\n >>> A95 = 6\n >>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)\n >>> ipmag.plot_pole(map_axis, plon, plat, A95 ,color='red',markersize=40)\n\n Required Parameters\n -----------\n map_axis : the name of the current map axis that has been developed using cartopy\n plon : the longitude of the paleomagnetic pole being plotted (in degrees E)\n plat : the latitude of the paleomagnetic pole being plotted (in degrees)\n A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)\n\n Optional Parameters (defaults are used if not specified)\n -----------\n color : the default color is black. Other colors can be chosen (e.g. 'r')\n marker : the default is a circle. Other symbols can be chosen (e.g. 's')\n markersize : the default is 20. Other size can be chosen\n label : the default is no label. Labels can be assigned.\n legend : the default is no legend ('no'). Putting 'yes' will plot a legend." - }, - { - "code": "def read(self):\n buffer = BytesIO()\n for chunk in self.buffer_iter():\n log.debug('buffer.write(%r)', chunk)\n buffer.write(chunk)\n buffer.seek(0)\n return buffer.read()", - "docstring": "Read buffer out as a single stream.\n\n .. warning::\n\n Avoid using this function!\n\n **Why?** This is a *convenience* function; it doesn't encourage good\n memory management.\n\n All memory required for a mesh is duplicated, and returned as a\n single :class:`str`. So at best, using this function will double\n the memory required for a single model.\n\n **Instead:** Wherever possible, please use :meth:`buffer_iter`." - }, - { - "code": "def tree_to_graph(bbltree:BubbleTree) -> Graph or Digraph:\n GraphObject = Digraph if bbltree.oriented else Graph\n def create(name:str):\n ret = GraphObject('cluster_' + name)\n ret.node(name, style='invis', shape='point')\n ret.body.append('color=lightgrey')\n ret.body.append('label=\"\"')\n ret.body.append('shape=ellipse')\n ret.body.append('penwidth=2')\n ret.body.append('pencolor=black')\n return ret\n nodes = frozenset(bbltree.nodes())\n subgraphs = {}\n for powernode in bbltree.powernodes():\n if powernode not in subgraphs:\n subgraphs[powernode] = create(powernode)\n for succ in bbltree.inclusions[powernode]:\n if succ not in subgraphs:\n if succ not in nodes:\n subgraphs[succ] = create(succ)\n else:\n subgraphs[powernode].node(succ)\n for powernode, succs in bbltree.inclusions.items():\n for succ in succs:\n if succ not in nodes:\n subgraphs[powernode].subgraph(subgraphs[succ])\n graph = GraphObject('graph', graph_attr={'compound': 'true'})\n for root in bbltree.roots:\n if root in subgraphs:\n graph.subgraph(subgraphs[root])\n for source, targets in bbltree.edges.items():\n for target in targets:\n if source <= target:\n attrs = {}\n if source not in nodes:\n attrs.update({'ltail': 'cluster_' + source})\n if target not in nodes:\n attrs.update({'lhead': 'cluster_' + target})\n graph.edge(source, target, **attrs)\n return graph", - "docstring": "Compute as a graphviz.Graph instance the given graph.\n\n If given BubbleTree instance is oriented, returned value\n is a graphviz.Digraph.\n\n See http://graphviz.readthedocs.io/en/latest/examples.html#cluster-py\n for graphviz API" - }, - { - "code": "def is_valid_policy_type(policy):\n if _is_py3:\n string_type = str,\n elif _is_py2:\n string_type = basestring\n if not isinstance(policy, string_type):\n raise TypeError('policy can only be of type str')\n is_non_empty_string(policy)\n return True", - "docstring": "Validate if policy is type str\n\n :param policy: S3 style Bucket policy.\n :return: True if policy parameter is of a valid type, 'string'.\n Raise :exc:`TypeError` otherwise." - }, - { - "code": "def _xfsdump_output(data):\n out = {}\n summary = []\n summary_block = False\n for line in [l.strip() for l in data.split(\"\\n\") if l.strip()]:\n line = re.sub(\"^xfsdump: \", \"\", line)\n if line.startswith(\"session id:\"):\n out['Session ID'] = line.split(\" \")[-1]\n elif line.startswith(\"session label:\"):\n out['Session label'] = re.sub(\"^session label: \", \"\", line)\n elif line.startswith(\"media file size\"):\n out['Media size'] = re.sub(r\"^media file size\\s+\", \"\", line)\n elif line.startswith(\"dump complete:\"):\n out['Dump complete'] = re.sub(r\"^dump complete:\\s+\", \"\", line)\n elif line.startswith(\"Dump Status:\"):\n out['Status'] = re.sub(r\"^Dump Status:\\s+\", \"\", line)\n elif line.startswith(\"Dump Summary:\"):\n summary_block = True\n continue\n if line.startswith(\" \") and summary_block:\n summary.append(line.strip())\n elif not line.startswith(\" \") and summary_block:\n summary_block = False\n if summary:\n out['Summary'] = ' '.join(summary)\n return out", - "docstring": "Parse CLI output of the xfsdump utility." - }, - { - "code": "def _compile_update_join_wheres(self, query):\n join_wheres = []\n for join in query.joins:\n for clause in join.clauses:\n join_wheres.append(self._compile_join_constraints(clause))\n return \" \".join(join_wheres)", - "docstring": "Compile the \"join\" clauses for an update.\n\n :param query: A QueryBuilder instance\n :type query: QueryBuilder\n\n :return: The compiled sql\n :rtype: str" - }, - { - "code": "def jump_server(self, msg=\"Changing servers\"):\n if self.connection.is_connected():\n self.connection.disconnect(msg)\n next(self.servers)\n self._connect()", - "docstring": "Connect to a new server, possibly disconnecting from the current.\n\n The bot will skip to next server in the server_list each time\n jump_server is called." - }, - { - "code": "def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):\n if index is None:\n index = extract_index(arrays)\n else:\n index = ensure_index(index)\n arrays = _homogenize(arrays, index, dtype)\n axes = [ensure_index(columns), index]\n return create_block_manager_from_arrays(arrays, arr_names, axes)", - "docstring": "Segregate Series based on type and coerce into matrices.\n\n Needs to handle a lot of exceptional cases." - }, - { - "code": "def cache_size(self, new_value):\n if type(new_value) == int and 0 < new_value:\n if self._lemma_cache is not None:\n self._lemma_cache = repoze.lru.LRUCache(new_value)\n self._synset_cache = repoze.lru.LRUCache(new_value)", - "docstring": "Set the cache size used to reduce the number of database\n access operations." - }, - { - "code": "def _storage_purge_all(delete=False, verbosity=0):\n orphaned_storages = Storage.objects.filter(data=None)\n if verbosity >= 1:\n if orphaned_storages.exists():\n logger.info(__(\"Unreferenced storages ({}):\", orphaned_storages.count()))\n for storage_id in orphaned_storages.values_list('id', flat=True):\n logger.info(__(\" {}\", storage_id))\n else:\n logger.info(\"No unreferenced storages\")\n if delete:\n orphaned_storages.delete()", - "docstring": "Purge unreferenced storages." - }, - { - "code": "def file_contents_safe(self, sentry_unit, file_name,\n max_wait=60, fatal=False):\n unit_name = sentry_unit.info['unit_name']\n file_contents = False\n tries = 0\n while not file_contents and tries < (max_wait / 4):\n try:\n file_contents = sentry_unit.file_contents(file_name)\n except IOError:\n self.log.debug('Attempt {} to open file {} from {} '\n 'failed'.format(tries, file_name,\n unit_name))\n time.sleep(4)\n tries += 1\n if file_contents:\n return file_contents\n elif not fatal:\n return None\n elif fatal:\n msg = 'Failed to get file contents from unit.'\n amulet.raise_status(amulet.FAIL, msg)", - "docstring": "Get file contents from a sentry unit. Wrap amulet file_contents\n with retry logic to address races where a file checks as existing,\n but no longer exists by the time file_contents is called.\n Return None if file not found. Optionally raise if fatal is True." - }, - { - "code": "def overlay(array1, array2, alpha=0.5):\n if alpha < 0. or alpha > 1.:\n raise ValueError(\"`alpha` needs to be between [0, 1]\")\n if array1.shape != array2.shape:\n raise ValueError('`array1` and `array2` must have the same shapes')\n return (array1 * alpha + array2 * (1. - alpha)).astype(array1.dtype)", - "docstring": "Overlays `array1` onto `array2` with `alpha` blending.\n\n Args:\n array1: The first numpy array.\n array2: The second numpy array.\n alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1],\n with 0 being `array2` only to 1 being `array1` only (Default value = 0.5).\n\n Returns:\n The `array1`, overlayed with `array2` using `alpha` blending." - }, - { - "code": "def _get_line(self, search_string, search_file, return_string=True, case_sens=True):\n if os.path.isfile(search_file):\n if type(search_string) == type(''): search_string = [search_string]\n if not case_sens: search_string = [i.lower() for i in search_string]\n with open(search_file) as fp:\n for line in fp:\n query_line = line if case_sens else line.lower()\n if all([i in query_line for i in search_string]):\n return line if return_string else True\n if return_string:\n raise Exception('%s not found in %s'%(' & '.join(search_string), search_file))\n else: return False\n else: raise Exception('%s file does not exist'%search_file)", - "docstring": "Return the first line containing a set of strings in a file.\n\n If return_string is False, we just return whether such a line\n was found. If case_sens is False, the search is case\n insensitive." - }, - { - "code": "def setidd(cls, iddinfo, iddindex, block, idd_version):\n cls.idd_info = iddinfo\n cls.block = block\n cls.idd_index = iddindex\n cls.idd_version = idd_version", - "docstring": "Set the IDD to be used by eppy.\n\n Parameters\n ----------\n iddinfo : list\n Comments and metadata about fields in the IDD.\n block : list\n Field names in the IDD." - }, - { - "code": "def request(self, url, method='GET', params=None, data=None,\n expected_response_code=200, headers=None):\n url = \"{0}/{1}\".format(self._baseurl, url)\n if headers is None:\n headers = self._headers\n if params is None:\n params = {}\n if isinstance(data, (dict, list)):\n data = json.dumps(data)\n retry = True\n _try = 0\n while retry:\n try:\n response = self._session.request(\n method=method,\n url=url,\n auth=(self._username, self._password),\n params=params,\n data=data,\n headers=headers,\n proxies=self._proxies,\n verify=self._verify_ssl,\n timeout=self._timeout\n )\n break\n except (requests.exceptions.ConnectionError,\n requests.exceptions.HTTPError,\n requests.exceptions.Timeout):\n _try += 1\n if self._retries != 0:\n retry = _try < self._retries\n if method == \"POST\":\n time.sleep((2 ** _try) * random.random() / 100.0)\n if not retry:\n raise\n if 500 <= response.status_code < 600:\n raise InfluxDBServerError(response.content)\n elif response.status_code == expected_response_code:\n return response\n else:\n raise InfluxDBClientError(response.content, response.status_code)", - "docstring": "Make a HTTP request to the InfluxDB API.\n\n :param url: the path of the HTTP request, e.g. write, query, etc.\n :type url: str\n :param method: the HTTP method for the request, defaults to GET\n :type method: str\n :param params: additional parameters for the request, defaults to None\n :type params: dict\n :param data: the data of the request, defaults to None\n :type data: str\n :param expected_response_code: the expected response code of\n the request, defaults to 200\n :type expected_response_code: int\n :param headers: headers to add to the request\n :type headers: dict\n :returns: the response from the request\n :rtype: :class:`requests.Response`\n :raises InfluxDBServerError: if the response code is any server error\n code (5xx)\n :raises InfluxDBClientError: if the response code is not the\n same as `expected_response_code` and is not a server error code" - }, - { - "code": "def __replaceSpecialValues(self, decisions):\n\t\terror = []\n\t\tfor row, line in enumerate(decisions):\n\t\t\tif '.' in line:\n\t\t\t\tfor i, element in enumerate(line):\n\t\t\t\t\tif row == 0:\n\t\t\t\t\t\terror.append(\n\t\t\t\t\t\t\t\"Row: {}colume: {}==> don't have parent value\".format(str(row).ljust(4), str(i).ljust(4)))\n\t\t\t\t\tif element == self.__parentSymbol:\n\t\t\t\t\t\tif decisions[row - 1][i] == '.':\n\t\t\t\t\t\t\terror.append(\"Row: {}Colume: {}==> don't have parent value\".format(str(row).ljust(4),\n\t\t\t\t\t\t\t str(i).ljust(4)))\n\t\t\t\t\t\tdecisions[row][i] = decisions[row - 1][i]\n\t\tif error:\n\t\t\tview.Tli.showErrors('ReplaceSpecialValuesError', error)\n\t\telse:\n\t\t\treturn decisions", - "docstring": "Will replace special values in decisions array.\n\n\t\tArgs:\n\t\t\tdecisions (array of array of str): Standard decision array format.\n\t\tRaises:\n\t\t\tValueError: Row element don't have parent value.\n\n\t\tReturns:\n\t\t\tNew decision array with updated values." - }, - { - "code": "def FromMicroseconds(self, micros):\n self._NormalizeDuration(\n micros // _MICROS_PER_SECOND,\n (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND)", - "docstring": "Converts microseconds to Duration." - }, - { - "code": "def pipe_urlbuilder(context=None, _INPUT=None, conf=None, **kwargs):\n pkwargs = cdicts(opts, kwargs)\n get_params = get_funcs(conf.get('PARAM', []), **kwargs)[0]\n get_paths = get_funcs(conf.get('PATH', []), **pkwargs)[0]\n get_base = get_funcs(conf['BASE'], listize=False, **pkwargs)[0]\n parse_params = utils.parse_params\n splits = get_splits(_INPUT, funcs=[get_params, get_paths, get_base])\n parsed = utils.dispatch(splits, *get_dispatch_funcs('pass', parse_params))\n _OUTPUT = starmap(parse_result, parsed)\n return _OUTPUT", - "docstring": "A url module that builds a url. Loopable.\n\n Parameters\n ----------\n context : pipe2py.Context object\n _INPUT : pipeforever pipe or an iterable of items or fields\n conf : {\n 'PARAM': [\n {'key': {'value': <'order'>}, 'value': {'value': <'desc'>}},\n {'key': {'value': <'page'>}, 'value': {'value': <'2'>}}\n ]\n 'PATH': {'type': 'text', 'value': <''>},\n 'BASE': {'type': 'text', 'value': <'http://site.com/feed.xml'>},\n }\n\n Yields\n ------\n _OUTPUT : url" - }, - { - "code": "async def connect_async(self, loop=None, timeout=None):\n if self.deferred:\n raise Exception(\"Error, database not properly initialized \"\n \"before opening connection\")\n if self._async_conn:\n return\n elif self._async_wait:\n await self._async_wait\n else:\n self._loop = loop\n self._async_wait = asyncio.Future(loop=self._loop)\n conn = self._async_conn_cls(\n database=self.database,\n loop=self._loop,\n timeout=timeout,\n **self.connect_params_async)\n try:\n await conn.connect()\n except Exception as e:\n if not self._async_wait.done():\n self._async_wait.set_exception(e)\n self._async_wait = None\n raise\n else:\n self._task_data = TaskLocals(loop=self._loop)\n self._async_conn = conn\n self._async_wait.set_result(True)", - "docstring": "Set up async connection on specified event loop or\n on default event loop." - }, - { - "code": "def set_geometry(self, crect):\n x0, y0, width, height = self.geometry()\n if width is None:\n width = crect.width()\n if height is None:\n height = crect.height()\n offset = self.editor.contentOffset()\n x = self.editor.blockBoundingGeometry(self.editor.firstVisibleBlock())\\\n .translated(offset.x(), offset.y()).left() \\\n + self.editor.document().documentMargin() \\\n + self.editor.panels.margin_size(Panel.Position.LEFT)\n y = crect.top() + self.editor.panels.margin_size(Panel.Position.TOP)\n self.setGeometry(QRect(x+x0, y+y0, width, height))", - "docstring": "Set geometry for floating panels.\n\n Normally you don't need to override this method, you should override\n `geometry` instead." - }, - { - "code": "def add(self, val):\n if not isinstance(val, six.integer_types):\n raise ValueError(\"CumulativePointLong only supports integer types\")\n if val > 0:\n super(CumulativePointLong, self).add(val)", - "docstring": "Add `val` to the current value if it's positive.\n\n Return without adding if `val` is not positive.\n\n :type val: int\n :param val: Value to add." - }, - { - "code": "def visit_AsyncFunctionDef(self, node):\n node = self.get_function_node(node)\n if node is not None:\n node._async = True", - "docstring": "Visit an async function node." - }, - { - "code": "def fetch(self, query, decode_geom=False):\n copy_query = 'COPY ({query}) TO stdout WITH (FORMAT csv, HEADER true)'.format(query=query)\n query_columns = get_columns(self, query)\n result = recursive_read(self, copy_query)\n df_types = dtypes(query_columns, exclude_dates=True)\n df = pd.read_csv(result, dtype=df_types,\n parse_dates=date_columns_names(query_columns),\n true_values=['t'],\n false_values=['f'],\n index_col='cartodb_id' if 'cartodb_id' in df_types.keys() else False,\n converters={'the_geom': lambda x: _decode_geom(x) if decode_geom else x})\n if decode_geom:\n df.rename({'the_geom': 'geometry'}, axis='columns', inplace=True)\n return df", - "docstring": "Pull the result from an arbitrary SELECT SQL query from a CARTO account\n into a pandas DataFrame.\n\n Args:\n query (str): SELECT query to run against CARTO user database. This data\n will then be converted into a pandas DataFrame.\n decode_geom (bool, optional): Decodes CARTO's geometries into a\n `Shapely `__\n object that can be used, for example, in `GeoPandas\n `__.\n\n Returns:\n pandas.DataFrame: DataFrame representation of query supplied.\n Pandas data types are inferred from PostgreSQL data types.\n In the case of PostgreSQL date types, dates are attempted to be\n converted, but on failure a data type 'object' is used.\n\n Examples:\n This query gets the 10 highest values from a table and\n returns a dataframe.\n\n .. code:: python\n\n topten_df = cc.query(\n '''\n SELECT * FROM\n my_table\n ORDER BY value_column DESC\n LIMIT 10\n '''\n )\n\n This query joins points to polygons based on intersection, and\n aggregates by summing the values of the points in each polygon. The\n query returns a dataframe, with a geometry column that contains\n polygons.\n\n .. code:: python\n\n points_aggregated_to_polygons = cc.query(\n '''\n SELECT polygons.*, sum(points.values)\n FROM polygons JOIN points\n ON ST_Intersects(points.the_geom, polygons.the_geom)\n GROUP BY polygons.the_geom, polygons.cartodb_id\n ''',\n decode_geom=True\n )" - }, - { - "code": "def step(self):\n assert len(self._agents_to_act) == 0\n self.next()\n while len(self._agents_to_act) > 0:\n self.next()", - "docstring": "Progress simulation with a single step.\n\n Can not be called when some of the agents have not acted for the\n current step." - }, - { - "code": "def decompress(file_obj, file_type):\n def is_zip():\n archive = zipfile.ZipFile(file_obj)\n result = {name: wrap_as_stream(archive.read(name))\n for name in archive.namelist()}\n return result\n def is_tar():\n import tarfile\n archive = tarfile.open(fileobj=file_obj, mode='r')\n result = {name: archive.extractfile(name)\n for name in archive.getnames()}\n return result\n file_type = str(file_type).lower()\n if isinstance(file_obj, bytes):\n file_obj = wrap_as_stream(file_obj)\n if file_type[-3:] == 'zip':\n return is_zip()\n if 'tar' in file_type[-6:]:\n return is_tar()\n raise ValueError('Unsupported type passed!')", - "docstring": "Given an open file object and a file type, return all components\n of the archive as open file objects in a dict.\n\n Parameters\n -----------\n file_obj : file-like\n Containing compressed data\n file_type : str\n File extension, 'zip', 'tar.gz', etc\n\n Returns\n ---------\n decompressed : dict\n Data from archive in format {file name : file-like}" - }, - { - "code": "def _parse_and_sort_accept_header(accept_header):\n return sorted([_split_into_mimetype_and_priority(x) for x in accept_header.split(',')],\n key=lambda x: x[1], reverse=True)", - "docstring": "Parse and sort the accept header items.\n\n >>> _parse_and_sort_accept_header('application/json;q=0.5, text/*')\n [('text/*', 1.0), ('application/json', 0.5)]" - }, - { - "code": "def parse_xml_point(elem):\n point = {}\n units = {}\n for data in elem.findall('data'):\n name = data.get('name')\n unit = data.get('units')\n point[name] = float(data.text) if name != 'date' else parse_iso_date(data.text)\n if unit:\n units[name] = unit\n return point, units", - "docstring": "Parse an XML point tag." - }, - { - "code": "def _update_offset_value(self, f, offset, size, value):\n f.seek(offset, 0)\n if (size == 8):\n f.write(struct.pack('>q', value))\n else:\n f.write(struct.pack('>i', value))", - "docstring": "Writes \"value\" into location \"offset\" in file \"f\"." - }, - { - "code": "def remove_arg(args, arg, has_param=False):\n for idx, found_arg in enumerate(args):\n if found_arg == arg:\n if has_param:\n slice_idx = idx + 2\n else:\n slice_idx = idx + 1\n args = args[:idx] + args[slice_idx:]\n break\n return args", - "docstring": "Removes the first instance of the specified arg from the list of args.\n\n If the arg is present and has_param is set, also removes the parameter that follows\n the arg.\n :param list args: strings representing an argument list.\n :param staring arg: argument to remove from the list.\n :param bool has_param: if true, also remove the parameter that follows arg in the list.\n :return: possibly modified list of args." - }, - { - "code": "def enable_schedule(self):\n self.opts['schedule']['enabled'] = True\n evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)\n evt.fire_event({'complete': True, 'schedule': self._get_schedule()},\n tag='/salt/minion/minion_schedule_enabled_complete')", - "docstring": "Enable the scheduler." - }, - { - "code": "def paginate(limit, start_arg=\"next_token\", limit_arg=\"max_results\"):\n default_start = 0\n def outer_wrapper(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg])\n lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg])\n stop = start + lim\n result = func(*args, **kwargs)\n limited_results = list(itertools.islice(result, start, stop))\n next_token = stop if stop < len(result) else None\n return limited_results, next_token\n return wrapper\n return outer_wrapper", - "docstring": "Returns a limited result list, and an offset into list of remaining items\n\n Takes the next_token, and max_results kwargs given to a function and handles\n the slicing of the results. The kwarg `next_token` is the offset into the\n list to begin slicing from. `max_results` is the size of the result required\n\n If the max_results is not supplied then the `limit` parameter is used as a\n default\n\n :param limit_arg: the name of argument in the decorated function that\n controls amount of items returned\n :param start_arg: the name of the argument in the decorated that provides\n the starting offset\n :param limit: A default maximum items to return\n :return: a tuple containing a list of items, and the offset into the list" - }, - { - "code": "def killall(self, everywhere=False):\n with self._NAILGUN_KILL_LOCK:\n for proc in self._iter_nailgun_instances(everywhere):\n logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid))\n proc.terminate()", - "docstring": "Kills all nailgun servers started by pants.\n\n :param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;\n otherwise restricts the nailguns killed to those started for the\n current build root." - }, - { - "code": "def grad(self):\n from . import _ndarray_cls\n hdl = NDArrayHandle()\n check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))\n if hdl.value is None:\n return None\n return _ndarray_cls(hdl)", - "docstring": "Returns gradient buffer attached to this NDArray." - }, - { - "code": "def attrget(self, groupname, attrname, rownr):\n return self._attrget(groupname, attrname, rownr)", - "docstring": "Get the value of an attribute in the given row in a group." - }, - { - "code": "def fill_up(self,\n bucket_batch_sizes: List[BucketBatchSize],\n seed: int = 42) -> 'ParallelDataSet':\n source = list(self.source)\n target = list(self.target)\n label = list(self.label)\n rs = np.random.RandomState(seed)\n for bucket_idx in range(len(self)):\n bucket = bucket_batch_sizes[bucket_idx].bucket\n bucket_batch_size = bucket_batch_sizes[bucket_idx].batch_size\n bucket_source = self.source[bucket_idx]\n bucket_target = self.target[bucket_idx]\n bucket_label = self.label[bucket_idx]\n num_samples = bucket_source.shape[0]\n if num_samples % bucket_batch_size != 0:\n rest = bucket_batch_size - num_samples % bucket_batch_size\n desired_indices_np = rs.randint(num_samples, size=rest)\n desired_indices = mx.nd.array(desired_indices_np)\n if isinstance(source[bucket_idx], np.ndarray):\n source[bucket_idx] = np.concatenate((bucket_source, bucket_source.take(desired_indices_np)), axis=0)\n else:\n source[bucket_idx] = mx.nd.concat(bucket_source, bucket_source.take(desired_indices), dim=0)\n target[bucket_idx] = mx.nd.concat(bucket_target, bucket_target.take(desired_indices), dim=0)\n label[bucket_idx] = mx.nd.concat(bucket_label, bucket_label.take(desired_indices), dim=0)\n return ParallelDataSet(source, target, label)", - "docstring": "Returns a new dataset with buckets filled up.\n\n :param bucket_batch_sizes: Bucket batch sizes.\n :param seed: The random seed used for sampling sentences to fill up.\n :return: New dataset with buckets filled up to the next multiple of batch size" - }, - { - "code": "def find_donors_and_acceptors_in_ligand(self):\n atom_names=[x.name for x in self.topology_data.universe.ligand]\n try:\n for atom in self.topology_data.mol.GetSubstructMatches(self.HDonorSmarts, uniquify=1):\n self.donors.append(atom_names[atom[0]])\n for atom in self.topology_data.mol.GetSubstructMatches(self.HAcceptorSmarts, uniquify=1):\n self.acceptors.append(atom_names[atom[0]])\n except Exception as e:\n m = Chem.MolFromPDBFile(\"lig.pdb\")\n self.donors = []\n self.acceptors = []\n for atom in m.GetSubstructMatches(self.HDonorSmarts, uniquify=1):\n self.donors.append(atom_names[atom[0]])\n haccep = \"[$([O,S;H1;v2]-[!$(*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=!@[O,N,P,S])]),$([nH0,o,s;+0])]\"\n self.HAcceptorSmarts = Chem.MolFromSmarts(haccep)\n for atom in m.GetSubstructMatches(self.HAcceptorSmarts, uniquify=1):\n self.acceptors.append(atom_names[atom[0]])", - "docstring": "Since MDAnalysis a pre-set list for acceptor and donor atoms for proteins and solvents\n from specific forcefields, it is necessary to find donor and acceptor atoms for the\n ligand molecule. This function uses RDKit and searches through ligand atoms to find\n matches for pre-set list of possible donor and acceptor atoms. The resulting list is then\n parsed to MDAnalysis through the donors and acceptors arguments." - }, - { - "code": "def get_student_current_grades(self, username, course_ids=None):\n if course_ids is None:\n enrollments_client = CourseEnrollments(self.requester, self.base_url)\n enrollments = enrollments_client.get_student_enrollments()\n course_ids = list(enrollments.get_enrolled_course_ids())\n all_current_grades = []\n for course_id in course_ids:\n try:\n all_current_grades.append(self.get_student_current_grade(username, course_id))\n except HTTPError as error:\n if error.response.status_code >= 500:\n raise\n return CurrentGradesByUser(all_current_grades)", - "docstring": "Returns a CurrentGradesByUser object with the user current grades.\n\n Args:\n username (str): an edx user's username\n course_ids (list): a list of edX course ids.\n\n Returns:\n CurrentGradesByUser: object representing the student current grades" - }, - { - "code": "def WalkChildren(elem):\n\tfor child in elem.childNodes:\n\t\tyield child\n\t\tfor elem in WalkChildren(child):\n\t\t\tyield elem", - "docstring": "Walk the XML tree of children below elem, returning each in order." - }, - { - "code": "def previous(self) -> \"ArrayEntry\":\n try:\n newval, nbef = self.before.pop()\n except IndexError:\n raise NonexistentInstance(self.json_pointer(), \"previous of first\") from None\n return ArrayEntry(\n self.index - 1, nbef, self.after.cons(self.value), newval,\n self.parinst, self.schema_node, self.timestamp)", - "docstring": "Return an instance node corresponding to the previous entry.\n\n Raises:\n NonexistentInstance: If the receiver is the first entry of the\n parent array." - }, - { - "code": "def match(self, data, threshold=0.5, generator=False):\n blocked_pairs = self._blockData(data)\n clusters = self.matchBlocks(blocked_pairs, threshold)\n if generator:\n return clusters\n else:\n return list(clusters)", - "docstring": "Identifies records that all refer to the same entity, returns\n tuples\n\n containing a set of record ids and a confidence score as a\n float between 0 and 1. The record_ids within each set should\n refer to the same entity and the confidence score is a measure\n of our confidence that all the records in a cluster refer to\n the same entity.\n\n This method should only used for small to moderately sized\n datasets for larger data, use matchBlocks\n\n Arguments:\n\n data -- Dictionary of records, where the keys are record_ids\n and the values are dictionaries with the keys being\n field names\n\n threshold -- Number between 0 and 1 (default is .5). We will\n consider records as potential duplicates if the\n predicted probability of being a duplicate is\n above the threshold.\n\n Lowering the number will increase recall,\n raising it will increase precision" - }, - { - "code": "def asmono(samples:np.ndarray, channel:Union[int, str]=0) -> np.ndarray:\n if numchannels(samples) == 1:\n if isinstance(samples[0], float):\n return samples\n elif isinstance(samples[0], np.dnarray):\n return np.reshape(samples, (len(samples),))\n else:\n raise TypeError(\"Samples should be numeric, found: %s\"\n % str(type(samples[0])))\n if isinstance(channel, int):\n return samples[:, channel]\n elif channel == 'mix':\n return _mix(samples, scale_by_numchannels=True)\n else:\n raise ValueError(\"channel has to be an integer indicating a channel,\"\n \" or 'mix' to mix down all channels\")", - "docstring": "convert samples to mono if they are not mono already.\n\n The returned array will always have the shape (numframes,)\n\n channel: the channel number to use, or 'mix' to mix-down\n all channels" - }, - { - "code": "def _collapse_by_bam_variantcaller(samples):\n by_bam = collections.OrderedDict()\n for data in (x[0] for x in samples):\n work_bam = utils.get_in(data, (\"combine\", \"work_bam\", \"out\"), data.get(\"align_bam\"))\n variantcaller = get_variantcaller(data)\n if isinstance(work_bam, list):\n work_bam = tuple(work_bam)\n key = (multi.get_batch_for_key(data), work_bam, variantcaller)\n try:\n by_bam[key].append(data)\n except KeyError:\n by_bam[key] = [data]\n out = []\n for grouped_data in by_bam.values():\n cur = grouped_data[0]\n cur.pop(\"region\", None)\n region_bams = cur.pop(\"region_bams\", None)\n if region_bams and len(region_bams[0]) > 1:\n cur.pop(\"work_bam\", None)\n out.append([cur])\n return out", - "docstring": "Collapse regions to a single representative by BAM input, variant caller and batch." - }, - { - "code": "def _get_method_repo(self, namespace=None):\n self._validate_namespace(namespace)\n if namespace not in self.methods:\n self.methods[namespace] = NocaseDict()\n return self.methods[namespace]", - "docstring": "Returns the method repository for the specified CIM namespace\n within the mock repository. This is the original instance variable,\n so any modifications will change the mock repository.\n\n Validates that the namespace exists in the mock repository.\n\n If the method repository does not contain the namespace yet, it is\n added.\n\n Parameters:\n\n namespace(:term:`string`): Namespace name. Must not be `None`.\n\n Returns:\n\n dict of dict of method callback function: Method repository.\n\n Raises:\n\n :exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does\n not exist." - }, - { - "code": "def getStore(self) :\n res = {}\n res.update(self.store)\n for k, v in self.subStores.items() :\n res[k] = v.getStore()\n return res", - "docstring": "get the inner store as dictionary" - }, - { - "code": "def decode_value(value, client):\n value_type = value.WhichOneof(\"value_type\")\n if value_type == \"null_value\":\n return None\n elif value_type == \"boolean_value\":\n return value.boolean_value\n elif value_type == \"integer_value\":\n return value.integer_value\n elif value_type == \"double_value\":\n return value.double_value\n elif value_type == \"timestamp_value\":\n return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value)\n elif value_type == \"string_value\":\n return value.string_value\n elif value_type == \"bytes_value\":\n return value.bytes_value\n elif value_type == \"reference_value\":\n return reference_value_to_document(value.reference_value, client)\n elif value_type == \"geo_point_value\":\n return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude)\n elif value_type == \"array_value\":\n return [decode_value(element, client) for element in value.array_value.values]\n elif value_type == \"map_value\":\n return decode_dict(value.map_value.fields, client)\n else:\n raise ValueError(\"Unknown ``value_type``\", value_type)", - "docstring": "Converts a Firestore protobuf ``Value`` to a native Python value.\n\n Args:\n value (google.cloud.firestore_v1beta1.types.Value): A\n Firestore protobuf to be decoded / parsed / converted.\n client (~.firestore_v1beta1.client.Client): A client that has\n a document factory.\n\n Returns:\n Union[NoneType, bool, int, float, datetime.datetime, \\\n str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native\n Python value converted from the ``value``.\n\n Raises:\n NotImplementedError: If the ``value_type`` is ``reference_value``.\n ValueError: If the ``value_type`` is unknown." - }, - { - "code": "def post_log_artifacts(job_log):\n logger.debug(\"Downloading/parsing log for log %s\", job_log.id)\n try:\n artifact_list = extract_text_log_artifacts(job_log)\n except LogSizeException as e:\n job_log.update_status(JobLog.SKIPPED_SIZE)\n logger.warning('Skipping parsing log for %s: %s', job_log.id, e)\n return\n except Exception as e:\n job_log.update_status(JobLog.FAILED)\n if isinstance(e, HTTPError) and e.response.status_code in (403, 404):\n logger.warning(\"Unable to retrieve log for %s: %s\", job_log.id, e)\n return\n logger.error(\"Failed to download/parse log for %s: %s\", job_log.id, e)\n raise\n try:\n serialized_artifacts = serialize_artifact_json_blobs(artifact_list)\n store_job_artifacts(serialized_artifacts)\n job_log.update_status(JobLog.PARSED)\n logger.debug(\"Stored artifact for %s %s\", job_log.job.repository.name,\n job_log.job.id)\n except Exception as e:\n logger.error(\"Failed to store parsed artifact for %s: %s\", job_log.id, e)\n raise", - "docstring": "Post a list of artifacts to a job." - }, - { - "code": "def lchown(path, user, group):\n path = os.path.expanduser(path)\n uid = user_to_uid(user)\n gid = group_to_gid(group)\n err = ''\n if uid == '':\n if user:\n err += 'User does not exist\\n'\n else:\n uid = -1\n if gid == '':\n if group:\n err += 'Group does not exist\\n'\n else:\n gid = -1\n return os.lchown(path, uid, gid)", - "docstring": "Chown a file, pass the file the desired user and group without following\n symlinks.\n\n path\n path to the file or directory\n\n user\n user owner\n\n group\n group owner\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' file.chown /etc/passwd root root" - }, - { - "code": "def value_to_datum(self, instance, value):\n if value is None:\n return None\n bound = getattr(instance._origin, self.cls)\n if type(value) is bound:\n if self.use_data_setter:\n return value._data\n else:\n descriptors, alt_descriptors = get_pk_descriptors(bound)\n if len(descriptors) == 1:\n return getattr(value, descriptors[0][0])\n elif len(descriptors) > 1:\n return tuple(\n getattr(value, name)\n for name, _ in descriptors\n )\n else:\n raise AttributeError(\n \"unable to perform set object no primary key \"\n \"fields defined for %s\" % self.cls)\n else:\n raise TypeError(\n \"must be %s, not %s\" % (self.cls, type(value).__name__))", - "docstring": "Convert a given Python-side value to a MAAS-side datum.\n\n :param instance: The `Object` instance on which this field is\n currently operating. This method should treat it as read-only, for\n example to perform validation with regards to other fields.\n :param datum: The Python-side value to validate and convert into a\n MAAS-side datum.\n :return: A datum derived from the given value." - }, - { - "code": "def main():\n parser = argparse.ArgumentParser(\n description='Tool for testing caffe to mxnet conversion layer by layer')\n parser.add_argument('--image_url', type=str,\n default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\\\n 'tutorials/python/predict_image/cat.jpg',\n help='input image to test inference, can be either file path or url')\n parser.add_argument('--caffe_prototxt_path', type=str,\n default='./model.prototxt',\n help='path to caffe prototxt')\n parser.add_argument('--caffe_model_path', type=str,\n default='./model.caffemodel',\n help='path to caffe weights')\n parser.add_argument('--caffe_mean', type=str,\n default='./model_mean.binaryproto',\n help='path to caffe mean file')\n parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,\n help='mean difference allowed between caffe blob and mxnet blob')\n parser.add_argument('--max_diff_allowed', type=int, default=1e-01,\n help='max difference allowed between caffe blob and mxnet blob')\n parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')\n args = parser.parse_args()\n convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,\n args.caffe_model_path, args.caffe_mean,\n args.mean_diff_allowed, args.max_diff_allowed)", - "docstring": "Entrypoint for compare_layers" - }, - { - "code": "def accept_re(regexp, buf, pos):\n match = regexp.match(buf, pos)\n if not match:\n return None, pos\n return buf[match.start(1):match.end(1)], match.end(0)", - "docstring": "Accept a regular expression at the current buffer position." - }, - { - "code": "def tag_id(self, name):\n return self._store.get(self.tag_key(name)) or self.reset_tag(name)", - "docstring": "Get the unique tag identifier for a given tag.\n\n :param name: The tag\n :type name: str\n\n :rtype: str" - }, - { - "code": "def buy(self):\n pg = self.usr.getPage(\"http://www.neopets.com/\" + self.buyURL, vars = {'Referer': 'http://www.neopets.com/browseshop.phtml?owner=' + self.owner})\n if \"(owned by\" in pg.content:\n return True\n elif \"does not exist in this shop\" in pg.content:\n return False\n else:\n logging.getLogger(\"neolib.item\").exception(\"Unknown message when attempting to buy user shop item.\", {'pg': pg})\n return False", - "docstring": "Attempts to purchase a user shop item, returns result\n \n Uses the associated user and buyURL to attempt to purchase the user shop item. Returns\n whether or not the item was successfully bought. \n \n Returns\n bool - True if successful, false otherwise" - }, - { - "code": "def blobs(self, repository_ids=[], reference_names=[], commit_hashes=[]):\n if not isinstance(repository_ids, list):\n raise Exception(\"repository_ids must be a list\")\n if not isinstance(reference_names, list):\n raise Exception(\"reference_names must be a list\")\n if not isinstance(commit_hashes, list):\n raise Exception(\"commit_hashes must be a list\")\n return BlobsDataFrame(self.__engine.getBlobs(repository_ids,\n reference_names,\n commit_hashes),\n self.session,\n self.__implicits)", - "docstring": "Retrieves the blobs of a list of repositories, reference names and commit hashes.\n So the result will be a DataFrame of all the blobs in the given commits that are\n in the given references that belong to the given repositories.\n\n >>> blobs_df = engine.blobs(repo_ids, ref_names, hashes)\n\n Calling this function with no arguments is the same as:\n\n >>> engine.repositories.references.commits.tree_entries.blobs\n\n :param repository_ids: list of repository ids to filter by (optional)\n :type repository_ids: list of strings\n :param reference_names: list of reference names to filter by (optional)\n :type reference_names: list of strings\n :param commit_hashes: list of hashes to filter by (optional)\n :type commit_hashes: list of strings\n :rtype: BlobsDataFrame" - }, - { - "code": "def entrypoints(section):\n return {ep.name: ep.load() for ep in pkg_resources.iter_entry_points(section)}", - "docstring": "Returns the Entry Point for a given Entry Point section.\n\n :param str section: The section name in the entry point collection\n :returns: A dictionary of (Name, Class) pairs stored in the entry point collection." - }, - { - "code": "def get_version_details(self, version_name):\n name = ('%s/versions/%s' % (self._full_model_name, version_name))\n return self._api.projects().models().versions().get(name=name).execute()", - "docstring": "Get details of a version.\n\n Args:\n version: the name of the version in short form, such as \"v1\".\n Returns: a dictionary containing the version details." - }, - { - "code": "def key_from_protobuf(pb):\n path_args = []\n for element in pb.path:\n path_args.append(element.kind)\n if element.id:\n path_args.append(element.id)\n if element.name:\n path_args.append(element.name)\n project = None\n if pb.partition_id.project_id:\n project = pb.partition_id.project_id\n namespace = None\n if pb.partition_id.namespace_id:\n namespace = pb.partition_id.namespace_id\n return Key(*path_args, namespace=namespace, project=project)", - "docstring": "Factory method for creating a key based on a protobuf.\n\n The protobuf should be one returned from the Cloud Datastore\n Protobuf API.\n\n :type pb: :class:`.entity_pb2.Key`\n :param pb: The Protobuf representing the key.\n\n :rtype: :class:`google.cloud.datastore.key.Key`\n :returns: a new `Key` instance" - }, - { - "code": "def prettytable(self):\n table = prettytable.PrettyTable(self.columns)\n if self.sortby:\n if self.sortby in self.columns:\n table.sortby = self.sortby\n else:\n msg = \"Column (%s) doesn't exist to sort by\" % self.sortby\n raise exceptions.CLIAbort(msg)\n for a_col, alignment in self.align.items():\n table.align[a_col] = alignment\n if self.title:\n table.title = self.title\n for row in self.rows:\n table.add_row(row)\n return table", - "docstring": "Returns a new prettytable instance." - }, - { - "code": "def process_request(self, request, response):\n self.logger.info('Requested: {0} {1} {2}'.format(request.method, request.relative_uri, request.content_type))", - "docstring": "Logs the basic endpoint requested" - }, - { - "code": "def validate_fields(self, **kwargs):\n for field in self.fields:\n value = kwargs[field]\n required_type = self.fields[field]\n if type(value) != required_type:\n raise TypeError('{}.{} needs to be a {}, recieved: {}({})'.format(\n self.name,\n field,\n required_type.__name__,\n type(value).__name__,\n value.__repr__()))", - "docstring": "ensures that all incoming fields are the types that were specified" - }, - { - "code": "def powerlaw(f, log10_A=-16, gamma=5):\n fyr = 1 / 3.16e7\n return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)", - "docstring": "Power-law PSD.\n\n :param f: Sampling frequencies\n :param log10_A: log10 of red noise Amplitude [GW units]\n :param gamma: Spectral index of red noise process" - }, - { - "code": "def _notify_add(self, slice_):\n change = AddChange(self, slice_)\n self.notify_observers(change)", - "docstring": "Notify about an AddChange." - }, - { - "code": "def make_key(self, *parts):\n separator = getattr(self.model_class, 'index_separator', '.')\n parts = map(decode, parts)\n return '%s%s' % (self._base_key, separator.join(map(str, parts)))", - "docstring": "Generate a namespaced key for the given path." - }, - { - "code": "def group_comments_by_round(comments, ranking=0):\n comment_rounds = {}\n ordered_comment_round_names = []\n for comment in comments:\n comment_round_name = ranking and comment[11] or comment[7]\n if comment_round_name not in comment_rounds:\n comment_rounds[comment_round_name] = []\n ordered_comment_round_names.append(comment_round_name)\n comment_rounds[comment_round_name].append(comment)\n return [(comment_round_name, comment_rounds[comment_round_name])\n for comment_round_name in ordered_comment_round_names]", - "docstring": "Group comments by the round to which they belong" - }, - { - "code": "def keys(cls):\n if cls._cache_keys is None:\n cls._cache_keys = [c.name for c in cls.__table__._columns]\n return cls._cache_keys", - "docstring": "return list of all declared columns.\n\n :rtype: List[str]" - }, - { - "code": "def get_content_item_inlines(plugins=None, base=BaseContentItemInline):\n COPY_FIELDS = (\n 'form', 'raw_id_fields', 'filter_vertical', 'filter_horizontal',\n 'radio_fields', 'prepopulated_fields', 'formfield_overrides', 'readonly_fields',\n )\n if plugins is None:\n plugins = extensions.plugin_pool.get_plugins()\n inlines = []\n for plugin in plugins:\n if not isinstance(plugin, extensions.ContentPlugin):\n raise TypeError(\"get_content_item_inlines() expects to receive ContentPlugin instances, not {0}\".format(plugin))\n ContentItemType = plugin.model\n class_name = '%s_AutoInline' % ContentItemType.__name__\n attrs = {\n '__module__': plugin.__class__.__module__,\n 'model': ContentItemType,\n 'name': plugin.verbose_name,\n 'plugin': plugin,\n 'type_name': plugin.type_name,\n 'extra_fieldsets': plugin.fieldsets,\n 'cp_admin_form_template': plugin.admin_form_template,\n 'cp_admin_init_template': plugin.admin_init_template,\n }\n for name in COPY_FIELDS:\n if getattr(plugin, name):\n attrs[name] = getattr(plugin, name)\n inlines.append(type(class_name, (base,), attrs))\n inlines.sort(key=lambda inline: inline.name.lower())\n return inlines", - "docstring": "Dynamically generate genuine django inlines for all registered content item types.\n When the `plugins` parameter is ``None``, all plugin inlines are returned." - }, - { - "code": "def init(self):\n self.y = {\"version\": int(time.time())}\n recipient_email = raw_input(\"Enter Email ID: \")\n self.import_key(emailid=recipient_email)\n self.encrypt(emailid_list=[recipient_email])", - "docstring": "Initialize a new password db store" - }, - { - "code": "def _extract_core_semantics(self, docs):\n all_concepts = []\n doc_core_sems = []\n for doc in docs:\n core_sems = self._process_doc(doc)\n doc_core_sems.append(core_sems)\n all_concepts += [con for con, weight in core_sems]\n return doc_core_sems, list(set(all_concepts))", - "docstring": "Extracts core semantics for a list of documents, returning them along with\n a list of all the concepts represented." - }, - { - "code": "def decrypt(self, encrypted):\n fernet = Fernet(self.decryption_cipher_key)\n return fernet.decrypt(encrypted)", - "docstring": "decrypts the encrypted message using Fernet\n\n :param encrypted: the encrypted message\n :returns: the decrypted, serialized identifier collection" - }, - { - "code": "def minify(compiled):\n compiled = compiled.strip()\n if compiled:\n out = []\n for line in compiled.splitlines():\n line = line.split(\"\n if line:\n ind = 0\n while line.startswith(\" \"):\n line = line[1:]\n ind += 1\n internal_assert(ind % tabideal == 0, \"invalid indentation in\", line)\n out.append(\" \" * (ind // tabideal) + line)\n compiled = \"\\n\".join(out) + \"\\n\"\n return compiled", - "docstring": "Perform basic minifications.\n\n Fails on non-tabideal indentation or a string with a #." - }, - { - "code": "def _folder_item_result(self, analysis_brain, item):\n item[\"Result\"] = \"\"\n if not self.has_permission(ViewResults, analysis_brain):\n img = get_image(\"to_follow.png\", width=\"16px\", height=\"16px\")\n item[\"before\"][\"Result\"] = img\n return\n result = analysis_brain.getResult\n capture_date = analysis_brain.getResultCaptureDate\n capture_date_str = self.ulocalized_time(capture_date, long_format=0)\n item[\"Result\"] = result\n item[\"CaptureDate\"] = capture_date_str\n item[\"result_captured\"] = capture_date_str\n if self.is_analysis_edition_allowed(analysis_brain):\n item[\"allow_edit\"].append(\"Remarks\")\n if self.is_result_edition_allowed(analysis_brain):\n item[\"allow_edit\"].append(\"Result\")\n choices = analysis_brain.getResultOptions\n if choices:\n choices = copy(choices)\n choices.insert(0, dict(ResultValue=\"\", ResultText=\"\"))\n item[\"choices\"][\"Result\"] = choices\n if not result:\n return\n obj = self.get_object(analysis_brain)\n formatted_result = obj.getFormattedResult(\n sciformat=int(self.scinot), decimalmark=self.dmk)\n item[\"formatted_result\"] = formatted_result", - "docstring": "Set the analysis' result to the item passed in.\n\n :param analysis_brain: Brain that represents an analysis\n :param item: analysis' dictionary counterpart that represents a row" - }, - { - "code": "def _uncheck_descendant(self, item):\n children = self.get_children(item)\n for iid in children:\n self.change_state(iid, \"unchecked\")\n self._uncheck_descendant(iid)", - "docstring": "Uncheck the boxes of item's descendant." - }, - { - "code": "def _init_go_sources(self, go_sources_arg, go2obj_arg):\n gos_user = set(go_sources_arg)\n if 'children' in self.kws and self.kws['children']:\n gos_user |= get_leaf_children(gos_user, go2obj_arg)\n gos_godag = set(go2obj_arg)\n gos_source = gos_user.intersection(gos_godag)\n gos_missing = gos_user.difference(gos_godag)\n if not gos_missing:\n return gos_source\n sys.stdout.write(\"{N} GO IDs NOT FOUND IN GO DAG: {GOs}\\n\".format(\n N=len(gos_missing), GOs=\" \".join([str(e) for e in gos_missing])))\n return gos_source", - "docstring": "Return GO sources which are present in GODag." - }, - { - "code": "def add_user(self, username, email, **kwargs):\n api = self._get_api(iam.AccountAdminApi)\n kwargs.update({'username': username, 'email': email})\n user = User._create_request_map(kwargs)\n body = iam.UserUpdateReq(**user)\n return User(api.create_user(body))", - "docstring": "Create a new user with provided details.\n\n Add user example:\n\n .. code-block:: python\n\n account_management_api = AccountManagementAPI()\n # Add user\n user = {\n \"username\": \"test_user\",\n \"email\": \"test@gmail.com\",\n \"phone_number\": \"0123456789\"\n }\n new_user = account_management_api.add_user(**user)\n\n :param str username: The unique username of the user (Required)\n :param str email: The unique email of the user (Required)\n :param str full_name: The full name of the user\n :param list groups: List of group IDs (`str`) which this user belongs to\n :param str password: The password string of the user\n :param str phone_number: Phone number of the user\n :param bool terms_accepted: 'General Terms & Conditions' have been accepted\n :param bool marketing_accepted: Marketing Information opt-in\n :returns: the new user object\n :rtype: User" - }, - { - "code": "def check_cache(resource_type):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n adapter = args[0]\n key, val = list(kwargs.items())[0]\n except IndexError:\n logger.warning(\"Couldn't generate full index key, skipping cache\")\n else:\n index_key = (resource_type, key, val)\n try:\n cached_record = adapter._swimlane.resources_cache[index_key]\n except KeyError:\n logger.debug('Cache miss: `{!r}`'.format(index_key))\n else:\n logger.debug('Cache hit: `{!r}`'.format(cached_record))\n return cached_record\n return func(*args, **kwargs)\n return wrapper\n return decorator", - "docstring": "Decorator for adapter methods to check cache for resource before normally sending requests to retrieve data\n\n Only works with single kwargs, almost always used with @one_of_keyword_only decorator\n\n Args:\n resource_type (type(APIResource)): Subclass of APIResource of cache to be checked when called" - }, - { - "code": "def add_distinguished_name(list_name, item_name):\n payload = {\"jsonrpc\": \"2.0\",\n \"id\": \"ID0\",\n \"method\": \"add_policy_distinguished_names\",\n \"params\": [list_name, {\"item_name\": item_name}]}\n response = __proxy__['bluecoat_sslv.call'](payload, True)\n return _validate_change_result(response)", - "docstring": "Adds a distinguished name to a distinguished name list.\n\n list_name(str): The name of the specific policy distinguished name list to append to.\n\n item_name(str): The distinguished name to append.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' bluecoat_sslv.add_distinguished_name MyDistinguishedList cn=foo.bar.com" - }, - { - "code": "def cli(env, volume_id, replicant_id):\n block_storage_manager = SoftLayer.BlockStorageManager(env.client)\n success = block_storage_manager.failback_from_replicant(\n volume_id,\n replicant_id\n )\n if success:\n click.echo(\"Failback from replicant is now in progress.\")\n else:\n click.echo(\"Failback operation could not be initiated.\")", - "docstring": "Failback a block volume from the given replicant volume." - }, - { - "code": "def _get_site_response_term(self, C, imt, vs30, sa1180):\n vs30_star = self._get_vs30star(vs30, imt)\n site_resp_term = np.zeros_like(vs30)\n gt_vlin = vs30 >= C['vlin']\n lw_vlin = vs30 < C['vlin']\n vs30_rat = vs30_star / C['vlin']\n site_resp_term[gt_vlin] = ((C['a10'] + C['b'] * self.CONSTS['n']) *\n np.log(vs30_rat[gt_vlin]))\n site_resp_term[lw_vlin] = (C['a10'] * np.log(vs30_rat[lw_vlin]) -\n C['b'] * np.log(sa1180[lw_vlin] + C['c']) +\n C['b'] * np.log(sa1180[lw_vlin] + C['c'] *\n vs30_rat[lw_vlin] **\n self.CONSTS['n']))\n return site_resp_term", - "docstring": "Compute and return site response model term see page 1033" - }, - { - "code": "def init_megno(self, seed=None):\n if seed is None:\n clibrebound.reb_tools_megno_init(byref(self))\n else:\n clibrebound.reb_tools_megno_init_seed(byref(self), c_uint(seed))", - "docstring": "This function initialises the chaos indicator MEGNO particles and enables their integration.\n\n MEGNO is short for Mean Exponential Growth of Nearby orbits. It can be used to test\n if a system is chaotic or not. In the backend, the integrator is integrating an additional set\n of particles using the variational equation. Note that variational equations are better \n suited for this than shadow particles. MEGNO is currently only supported in the IAS15 \n and WHFast integrators.\n\n This function also needs to be called if you are interested in the Lyapunov exponent as it is\n calculate with the help of MEGNO. See Rein and Tamayo 2015 for details on the implementation.\n\n For more information on MENGO see e.g. http://dx.doi.org/10.1051/0004-6361:20011189" - }, - { - "code": "def _object_type_html(self):\n if self.exists:\n return \"%s\" % (self.url(), \n self.content_type.name,)\n else:\n return \"%s\" % self.content_type.name", - "docstring": "Return an html admin link with the object's type as text. If the \n object doesn't exist, return the object's type crossed out." - }, - { - "code": "def ssh_version():\n ret = subprocess.Popen(\n ['ssh', '-V'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n try:\n version_parts = ret[1].split(b',')[0].split(b'_')[1]\n parts = []\n for part in version_parts:\n try:\n parts.append(int(part))\n except ValueError:\n return tuple(parts)\n return tuple(parts)\n except IndexError:\n return (2, 0)", - "docstring": "Returns the version of the installed ssh command" - }, - { - "code": "def cross(v, w):\n x = v[1] * w[2] - v[2] * w[1]\n y = v[2] * w[0] - v[0] * w[2]\n z = v[0] * w[1] - v[1] * w[0]\n return [x, y, z]", - "docstring": "cross product of two vectors" - }, - { - "code": "def prepareDiff(self):\n self.streamForDiff = NativeStringIO()\n self.linter.reporter.set_output(self.streamForDiff)", - "docstring": "Prepare to run the checker and get diff results." - }, - { - "code": "def to_array(self):\n array = super(InlineQueryResultMpeg4Gif, self).to_array()\n array['type'] = u(self.type)\n array['id'] = u(self.id)\n array['mpeg4_url'] = u(self.mpeg4_url)\n array['thumb_url'] = u(self.thumb_url)\n if self.mpeg4_width is not None:\n array['mpeg4_width'] = int(self.mpeg4_width)\n if self.mpeg4_height is not None:\n array['mpeg4_height'] = int(self.mpeg4_height)\n if self.mpeg4_duration is not None:\n array['mpeg4_duration'] = int(self.mpeg4_duration)\n if self.title is not None:\n array['title'] = u(self.title)\n if self.caption is not None:\n array['caption'] = u(self.caption)\n if self.parse_mode is not None:\n array['parse_mode'] = u(self.parse_mode)\n if self.reply_markup is not None:\n array['reply_markup'] = self.reply_markup.to_array()\n if self.input_message_content is not None:\n array['input_message_content'] = self.input_message_content.to_array()\n return array", - "docstring": "Serializes this InlineQueryResultMpeg4Gif to a dictionary.\n\n :return: dictionary representation of this object.\n :rtype: dict" - }, - { - "code": "def reload_sources(self, names):\n try:\n self.like.logLike.loadSourceMaps(names, True, True)\n self._scale_srcmap(self._src_expscale, check_header=False,\n names=names)\n except:\n for name in names:\n self.reload_source(name)", - "docstring": "Recompute the source map for a list of sources in the model." - }, - { - "code": "def String(self, str):\n ret = libxml2mod.xmlTextReaderConstString(self._o, str)\n return ret", - "docstring": "Get an interned string from the reader, allows for example\n to speedup string name comparisons" - }, - { - "code": "def _parse_line(self, line_no, line):\n try:\n matched = statement.parseString(line)\n except ParseException as exc:\n raise DataError(\"Error parsing line in TileBus file\", line_number=line_no, column=exc.col, contents=line)\n if 'symbol' in matched:\n self._parse_cmd(matched)\n elif 'filename' in matched:\n self._parse_include(matched)\n elif 'variable' in matched:\n self._parse_assignment(matched)\n elif 'configvar' in matched:\n self._parse_configvar(matched)", - "docstring": "Parse a line in a TileBus file\n\n Args:\n line_no (int): The line number for printing useful error messages\n line (string): The line that we are trying to parse" - }, - { - "code": "def render_subgraph(self, ontol, nodes, **args):\n subont = ontol.subontology(nodes, **args)\n return self.render(subont, **args)", - "docstring": "Render a `ontology` object after inducing a subgraph" - }, - { - "code": "def stretch(arr, fields=None, return_indices=False):\n dtype = []\n len_array = None\n flatten = False\n if fields is None:\n fields = arr.dtype.names\n elif isinstance(fields, string_types):\n fields = [fields]\n flatten = True\n for field in fields:\n dt = arr.dtype[field]\n if dt == 'O' or len(dt.shape):\n if dt == 'O':\n lengths = VLEN(arr[field])\n else:\n lengths = np.repeat(dt.shape[0], arr.shape[0])\n if len_array is None:\n len_array = lengths\n elif not np.array_equal(lengths, len_array):\n raise ValueError(\n \"inconsistent lengths of array columns in input\")\n if dt == 'O':\n dtype.append((field, arr[field][0].dtype))\n else:\n dtype.append((field, arr[field].dtype, dt.shape[1:]))\n else:\n dtype.append((field, dt))\n if len_array is None:\n raise RuntimeError(\"no array column in input\")\n ret = np.empty(np.sum(len_array), dtype=dtype)\n for field in fields:\n dt = arr.dtype[field]\n if dt == 'O' or len(dt.shape) == 1:\n ret[field] = np.hstack(arr[field])\n elif len(dt.shape):\n ret[field] = np.vstack(arr[field])\n else:\n ret[field] = np.repeat(arr[field], len_array)\n if flatten:\n ret = ret[fields[0]]\n if return_indices:\n idx = np.concatenate(list(map(np.arange, len_array)))\n return ret, idx\n return ret", - "docstring": "Stretch an array.\n\n Stretch an array by ``hstack()``-ing multiple array fields while\n preserving column names and record array structure. If a scalar field is\n specified, it will be stretched along with array fields.\n\n Parameters\n ----------\n arr : NumPy structured or record array\n The array to be stretched.\n fields : list of strings or string, optional (default=None)\n A list of column names or a single column name to stretch.\n If ``fields`` is a string, then the output array is a one-dimensional\n unstructured array containing only the stretched elements of that\n field. If None, then stretch all fields.\n return_indices : bool, optional (default=False)\n If True, the array index of each stretched array entry will be\n returned in addition to the stretched array.\n This changes the return type of this function to a tuple consisting\n of a structured array and a numpy int64 array.\n\n Returns\n -------\n ret : A NumPy structured array\n The stretched array.\n\n Examples\n --------\n >>> import numpy as np\n >>> from root_numpy import stretch\n >>> arr = np.empty(2, dtype=[('scalar', np.int), ('array', 'O')])\n >>> arr[0] = (0, np.array([1, 2, 3], dtype=np.float))\n >>> arr[1] = (1, np.array([4, 5, 6], dtype=np.float))\n >>> stretch(arr, ['scalar', 'array'])\n array([(0, 1.0), (0, 2.0), (0, 3.0), (1, 4.0), (1, 5.0), (1, 6.0)],\n dtype=[('scalar', ' tol]\n above_fermi = [i for i in range(len(energies))\n if energies[i] > self.efermi and tdos[i] > tol]\n vbm_start = max(below_fermi)\n cbm_start = min(above_fermi)\n if vbm_start == cbm_start:\n return 0.0, self.efermi, self.efermi\n else:\n terminal_dens = tdos[vbm_start:vbm_start + 2][::-1]\n terminal_energies = energies[vbm_start:vbm_start + 2][::-1]\n start = get_linear_interpolated_value(terminal_dens,\n terminal_energies, tol)\n terminal_dens = tdos[cbm_start - 1:cbm_start + 1]\n terminal_energies = energies[cbm_start - 1:cbm_start + 1]\n end = get_linear_interpolated_value(terminal_dens,\n terminal_energies, tol)\n return end - start, end, start", - "docstring": "Expects a DOS object and finds the gap\n\n Args:\n tol: tolerance in occupations for determining the gap\n abs_tol: Set to True for an absolute tolerance and False for a\n relative one.\n spin: Possible values are None - finds the gap in the summed\n densities, Up - finds the gap in the up spin channel,\n Down - finds the gap in the down spin channel.\n\n Returns:\n (gap, cbm, vbm):\n Tuple of floats in eV corresponding to the gap, cbm and vbm." - }, - { - "code": "def device_info(self):\n return {\n 'family': self.family,\n 'platform': self.platform,\n 'os_type': self.os_type,\n 'os_version': self.os_version,\n 'udi': self.udi,\n 'driver_name': self.driver.platform,\n 'mode': self.mode,\n 'is_console': self.is_console,\n 'is_target': self.is_target,\n 'hostname': self.hostname,\n }", - "docstring": "Return device info dict." - }, - { - "code": "def remove_unsupported_kwargs(module_or_fn, all_kwargs_dict):\n if all_kwargs_dict is None:\n all_kwargs_dict = {}\n if not isinstance(all_kwargs_dict, dict):\n raise ValueError(\"all_kwargs_dict must be a dict with string keys.\")\n return {\n kwarg: value for kwarg, value in all_kwargs_dict.items()\n if supports_kwargs(module_or_fn, kwarg) != NOT_SUPPORTED\n }", - "docstring": "Removes any kwargs not supported by `module_or_fn` from `all_kwargs_dict`.\n\n A new dict is return with shallow copies of keys & values from\n `all_kwargs_dict`, as long as the key is accepted by module_or_fn. The\n returned dict can then be used to connect `module_or_fn` (along with some\n other inputs, ie non-keyword arguments, in general).\n\n `snt.supports_kwargs` is used to tell whether a given kwarg is supported. Note\n that this method may give false negatives, which would lead to extraneous\n removals in the result of this function. Please read the docstring for\n `snt.supports_kwargs` for details, and manually inspect the results from this\n function if in doubt.\n\n Args:\n module_or_fn: some callable which can be interrogated by\n `snt.supports_kwargs`. Generally a Sonnet module or a method (wrapped in\n `@reuse_variables`) of a Sonnet module.\n all_kwargs_dict: a dict containing strings as keys, or None.\n\n Raises:\n ValueError: if `all_kwargs_dict` is not a dict.\n\n Returns:\n A dict containing some subset of the keys and values in `all_kwargs_dict`.\n This subset may be empty. If `all_kwargs_dict` is None, this will be an\n empty dict." - }, - { - "code": "def make_parent_dirs(path, mode=0o777):\n parent = os.path.dirname(path)\n if parent:\n make_all_dirs(parent, mode)\n return path", - "docstring": "Ensure parent directories of a file are created as needed." - }, - { - "code": "def peer(name):\n if salt.utils.cloud.check_name(name, 'a-zA-Z0-9._-'):\n raise SaltInvocationError(\n 'Invalid characters in peer name \"{0}\"'.format(name))\n cmd = 'peer probe {0}'.format(name)\n return _gluster(cmd)", - "docstring": "Add another node into the peer list.\n\n name\n The remote host to probe.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'one.gluster.*' glusterfs.peer two\n\n GLUSTER direct CLI example (to show what salt is sending to gluster):\n\n $ gluster peer probe ftp2\n\n GLUSTER CLI 3.4.4 return example (so we know what we are parsing):\n #if the \"peer\" is the local host:\n peer probe: success: on localhost not needed\n\n #if the peer was just added:\n peer probe: success\n\n #if the peer was already part of the cluster:\n peer probe: success: host ftp2 port 24007 already in peer list" - }, - { - "code": "def item_extra_kwargs(self, item):\n if use_feed_image:\n feed_image = item.feed_image\n if feed_image:\n image_complete_url = urljoin(\n self.get_site_url(), feed_image.file.url\n )\n else:\n image_complete_url = \"\"\n content_field = getattr(item, self.item_content_field)\n try:\n content = expand_db_html(content_field)\n except:\n content = content_field.__html__()\n soup = BeautifulSoup(content, 'html.parser')\n for div in soup.find_all(\"div\", {'class': 'responsive-object'}):\n del div['style']\n for img_tag in soup.findAll('img'):\n if img_tag.has_attr('src'):\n img_tag['src'] = urljoin(self.get_site_url(), img_tag['src'])\n fields_to_add = {\n 'content': soup.prettify(formatter=\"html\"),\n }\n if use_feed_image:\n fields_to_add['image'] = image_complete_url\n else:\n fields_to_add['image'] = \"\"\n return fields_to_add", - "docstring": "Returns an extra keyword arguments dictionary that is used with\n the 'add_item' call of the feed generator.\n Add the fields of the item, to be used by the custom feed generator." - }, - { - "code": "def clean_process_meta(self):\n ds = self.dataset\n ds.config.build.clean()\n ds.config.process.clean()\n ds.commit()\n self.state = self.STATES.CLEANED", - "docstring": "Remove all process and build metadata" - }, - { - "code": "def remove_container(name, force=False):\n try:\n if not force:\n _get_docker().stop(name)\n except APIError:\n pass\n try:\n _get_docker().remove_container(name, force=True)\n return True\n except APIError:\n return False", - "docstring": "Wrapper for docker remove_container\n\n :returns: True if container was found and removed" - }, - { - "code": "def get_season_stats(self, season_key):\n season_stats_url = self.api_path + \"season/\" + season_key + \"/stats/\"\n response = self.get_response(season_stats_url)\n return response", - "docstring": "Calling Season Stats API.\n\n Arg:\n season_key: key of the season\n Return:\n json data" - }, - { - "code": "def is_pdf(document):\n if not executable_exists('pdftotext'):\n current_app.logger.warning(\n \"GNU file was not found on the system. \"\n \"Switching to a weak file extension test.\"\n )\n if document.lower().endswith(\".pdf\"):\n return True\n return False\n file_output = os.popen('file ' + re.escape(document)).read()\n try:\n filetype = file_output.split(\":\")[-1]\n except IndexError:\n current_app.logger.error(\n \"Your version of the 'file' utility seems to be unsupported.\"\n )\n raise IncompatiblePDF2Text('Incompatible pdftotext')\n pdf = filetype.find(\"PDF\") > -1\n return pdf", - "docstring": "Check if a document is a PDF file and return True if is is." - }, - { - "code": "def _update_index(self):\n d = self.declaration\n self.index = self.view.model.index(d.row, d.column)\n if self.delegate:\n self._refresh_count += 1\n timed_call(self._loading_interval, self._update_delegate)", - "docstring": "Update the reference to the index within the table" - }, - { - "code": "def import_consumer(value):\n parts = value.split('.')\n module_obj = importlib.import_module('.'.join(parts[0:-1]))\n return (getattr(module_obj, parts[-1]),\n get_package_version(module_obj, value))", - "docstring": "Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux\n and it will return a handle to the class, and the version.\n\n :param str value: The consumer class in module.Consumer format\n :return: tuple(Class, str)" - }, - { - "code": "def end_input(self, cmd):\r\n self.input_mode = False\r\n self.input_loop.exit()\r\n self.interpreter.widget_proxy.end_input(cmd)", - "docstring": "End of wait_input mode" - }, - { - "code": "def get_distutils_option(option, commands):\n dist = get_dummy_distribution()\n for cmd in commands:\n cmd_opts = dist.command_options.get(cmd)\n if cmd_opts is not None and option in cmd_opts:\n return cmd_opts[option][1]\n else:\n return None", - "docstring": "Returns the value of the given distutils option.\n\n Parameters\n ----------\n option : str\n The name of the option\n\n commands : list of str\n The list of commands on which this option is available\n\n Returns\n -------\n val : str or None\n the value of the given distutils option. If the option is not set,\n returns None." - }, - { - "code": "def lu_slogdet(LU):\n r\n LU = (asarray(LU[0], float), asarray(LU[1], float))\n adet = _sum(log(_abs(LU[0].diagonal())))\n s = prod(sign(LU[0].diagonal()))\n nrows_exchange = LU[1].size - _sum(LU[1] == arange(LU[1].size, dtype=\"int32\"))\n odd = nrows_exchange % 2 == 1\n if odd:\n s *= -1.0\n return (s, adet)", - "docstring": "r\"\"\"Natural logarithm of a LU decomposition.\n\n Args:\n LU (tuple): LU decomposition.\n\n Returns:\n tuple: sign and log-determinant." - }, - { - "code": "def call(self, operation, data):\n print('API call [{0}:{1}], method - {2}, data - {3}'.format(\n self.host, self.api_key, operation, repr(data)))", - "docstring": "Make some network operations." - }, - { - "code": "def ipynb_to_rst(directory, filename):\n print(filename)\n os.chdir(directory)\n subprocess.Popen([\"ipython\", \"nbconvert\", \"--to\", \"rst\",\n filename],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=directory)", - "docstring": "Converts a given file in a directory to an rst in the same directory." - }, - { - "code": "def history(verbose, range):\n alembic_command.history(\n config=get_config(),\n rev_range=range,\n verbose=verbose\n )", - "docstring": "List revision changesets chronologically" - }, - { - "code": "def set_max_clients(limit):\n global _dirty, _max_clients\n LOGGER.debug('Setting maximum client limit to %i', limit)\n _dirty = True\n _max_clients = limit", - "docstring": "Set the maximum number of simultaneous batch submission that can execute\n in parallel.\n\n :param int limit: The maximum number of simultaneous batch submissions" - }, - { - "code": "def on_chain_updated(self, chain_head,\n committed_batches=None,\n uncommitted_batches=None):\n try:\n self._py_call(\n 'on_chain_updated',\n ctypes.py_object(chain_head),\n ctypes.py_object(committed_batches),\n ctypes.py_object(uncommitted_batches))\n except Exception:\n LOGGER.exception(\n \"Unhandled exception in BlockPublisher.on_chain_updated\")", - "docstring": "The existing chain has been updated, the current head block has\n changed.\n\n :param chain_head: the new head of block_chain, can be None if\n no block publishing is desired.\n :param committed_batches: the set of batches that were committed\n as part of the new chain.\n :param uncommitted_batches: the list of transactions if any that are\n now de-committed when the new chain was selected.\n :return: None" - }, - { - "code": "def parseCmdline(self, requestData):\n self.printSysLog(\"Enter ReqHandle.parseCmdline\")\n if isinstance(requestData, list):\n self.requestString = ' '.join(requestData)\n self.request = requestData\n elif isinstance(requestData, string_types):\n self.requestString = requestData\n self.request = shlex.split(requestData)\n else:\n msg = msgs.msg['0012'][1] % (modId, type(requestData))\n self.printLn(\"ES\", msg)\n self.updateResults(msgs.msg['0012'][0])\n return self.results\n self.totalParms = len(self.request)\n if self.totalParms == 0:\n msg = msgs.msg['0009'][1] % modId\n self.printLn(\"ES\", msg)\n self.updateResults(msgs.msg['0009'][0])\n elif self.totalParms == 1:\n self.function = self.request[0].upper()\n if self.function == 'HELP' or self.function == 'VERSION':\n pass\n else:\n msg = msgs.msg['0008'][1] % (modId, self.function)\n self.printLn(\"ES\", msg)\n self.updateResults(msgs.msg['0008'][0])\n else:\n self.function = self.request[0].upper()\n if self.request[0] == 'HELP' or self.request[0] == 'VERSION':\n pass\n else:\n if self.function in ReqHandle.funcHandler:\n self.funcHandler[self.function][2](self)\n else:\n msg = msgs.msg['0007'][1] % (modId, self.function)\n self.printLn(\"ES\", msg)\n self.updateResults(msgs.msg['0007'][0])\n self.printSysLog(\"Exit ReqHandle.parseCmdline, rc: \" +\n str(self.results['overallRC']))\n return self.results", - "docstring": "Parse the request command string.\n\n Input:\n Self with request filled in.\n\n Output:\n Request Handle updated with the parsed information so that\n it is accessible via key/value pairs for later processing.\n Return code - 0: successful, non-zero: error" - }, - { - "code": "def declarations(cls, extra_defs=None):\n warnings.warn(\n \"Factory.declarations is deprecated; use Factory._meta.pre_declarations instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n decls = cls._meta.pre_declarations.as_dict()\n decls.update(extra_defs or {})\n return decls", - "docstring": "Retrieve a copy of the declared attributes.\n\n Args:\n extra_defs (dict): additional definitions to insert into the\n retrieved DeclarationDict." - }, - { - "code": "def _compute_surface_areas(self, cell_ids):\n cn = self.cells[\"nodes\"][cell_ids]\n ids = numpy.stack([cn, cn, cn], axis=1)\n half_el = 0.5 * self.edge_lengths[..., cell_ids]\n zero = numpy.zeros([half_el.shape[1]])\n vals = numpy.stack(\n [\n numpy.column_stack([zero, half_el[0], half_el[0]]),\n numpy.column_stack([half_el[1], zero, half_el[1]]),\n numpy.column_stack([half_el[2], half_el[2], zero]),\n ],\n axis=1,\n )\n return ids, vals", - "docstring": "For each edge, one half of the the edge goes to each of the end\n points. Used for Neumann boundary conditions if on the boundary of the\n mesh and transition conditions if in the interior." - }, - { - "code": "def feasible_set(self):\n for y in itertools.product(*[range(1, k + 1) for k in self.K]):\n yield np.array(y)", - "docstring": "Iterator over values in feasible set" - }, - { - "code": "def match(self, node, results=None):\n if self.type is not None and node.type != self.type:\n return False\n if self.content is not None:\n r = None\n if results is not None:\n r = {}\n if not self._submatch(node, r):\n return False\n if r:\n results.update(r)\n if results is not None and self.name:\n results[self.name] = node\n return True", - "docstring": "Does this pattern exactly match a node?\n\n Returns True if it matches, False if not.\n\n If results is not None, it must be a dict which will be\n updated with the nodes matching named subpatterns.\n\n Default implementation for non-wildcard patterns." - }, - { - "code": "def set(self):\n self._is_set = True\n scheduler.state.awoken_from_events.update(self._waiters)\n del self._waiters[:]", - "docstring": "set the event to triggered\n\n after calling this method, all greenlets waiting on the event will be\n rescheduled, and calling :meth:`wait` will not block until\n :meth:`clear` has been called" - }, - { - "code": "def make_plotters(self):\n plotters, y = {}, 0\n for var_name in self.var_names:\n plotters[var_name] = VarHandler(\n var_name,\n self.data,\n y,\n model_names=self.model_names,\n combined=self.combined,\n colors=self.colors,\n )\n y = plotters[var_name].y_max()\n return plotters", - "docstring": "Initialize an object for each variable to be plotted." - }, - { - "code": "def write_training_metrics(self):\n with open(self.path, 'w') as file:\n writer = csv.writer(file)\n writer.writerow(FIELD_NAMES)\n for row in self.rows:\n writer.writerow(row)", - "docstring": "Write Training Metrics to CSV" - }, - { - "code": "def index_to_coords(index, shape):\r\n coords = []\r\n for i in xrange(1, len(shape)):\r\n divisor = int(np.product(shape[i:]))\r\n value = index // divisor\r\n coords.append(value)\r\n index -= value * divisor\r\n coords.append(index)\r\n return tuple(coords)", - "docstring": "convert index to coordinates given the shape" - }, - { - "code": "def is_alive(pidfile):\n try:\n with salt.utils.files.fopen(pidfile) as fp_:\n os.kill(int(fp_.read().strip()), 0)\n return True\n except Exception as ex:\n if os.access(pidfile, os.W_OK) and os.path.isfile(pidfile):\n os.unlink(pidfile)\n return False", - "docstring": "Check if PID is still alive." - }, - { - "code": "def _get_signature_method(self, request):\n signature_method = request.get('oauth_signature_method')\n if signature_method is None:\n signature_method = SIGNATURE_METHOD\n try:\n return self.signature_methods[signature_method]\n except KeyError:\n signature_method_names = ', '.join(self.signature_methods.keys())\n raise Error('Signature method %s not supported try one of the '\n 'following: %s'\n % (signature_method, signature_method_names))", - "docstring": "Figure out the signature with some defaults." - }, - { - "code": "def make_imagehdu(data, wcs=None):\n data = np.asanyarray(data)\n if data.ndim != 2:\n raise ValueError('data must be a 2D array')\n if wcs is not None:\n header = wcs.to_header()\n else:\n header = None\n return fits.ImageHDU(data, header=header)", - "docstring": "Create a FITS `~astropy.io.fits.ImageHDU` containing the input 2D\n image.\n\n Parameters\n ----------\n data : 2D array-like\n The input 2D data.\n\n wcs : `~astropy.wcs.WCS`, optional\n The world coordinate system (WCS) transformation to include in\n the output FITS header.\n\n Returns\n -------\n image_hdu : `~astropy.io.fits.ImageHDU`\n The FITS `~astropy.io.fits.ImageHDU`.\n\n See Also\n --------\n make_wcs\n\n Examples\n --------\n >>> from photutils.datasets import make_imagehdu, make_wcs\n >>> shape = (100, 100)\n >>> data = np.ones(shape)\n >>> wcs = make_wcs(shape)\n >>> hdu = make_imagehdu(data, wcs=wcs)\n >>> print(hdu.data.shape)\n (100, 100)" - }, - { - "code": "def _kwargs_checks_gen(self, decorated_function, function_spec, arg_specs):\n\t\targs_names = []\n\t\targs_names.extend(function_spec.args)\n\t\tif function_spec.varargs is not None:\n\t\t\targs_names.append(function_spec.args)\n\t\targs_check = {}\n\t\tfor arg_name in arg_specs.keys():\n\t\t\tif arg_name not in args_names:\n\t\t\t\targs_check[arg_name] = self.check(\n\t\t\t\t\targ_specs[arg_name], arg_name, decorated_function\n\t\t\t\t)\n\t\treturn args_check", - "docstring": "Generate checks for keyword argument testing\n\n\t\t:param decorated_function: function decorator\n\t\t:param function_spec: function inspect information\n\t\t:param arg_specs: argument specification (same as arg_specs in :meth:`.Verifier.decorate`)\n\n\t\t:return: internal structure, that is used by :meth:`.Verifier._kwargs_checks_test`" - }, - { - "code": "def named_eq_relations(self, name, neg=False):\n if self.eqLinks and not neg:\n if isinstance(name, six.string_types):\n return filter(lambda x: x.relation.name == name,\n self.eqLinks)\n elif isinstance(name, list):\n return filter(lambda x: x.relation.name in name,\n self.eqLinks)\n else:\n return None\n elif self.eqLinks and neg:\n if isinstance(name, six.string_types):\n return filter(lambda x: x.relation.name != name,\n self.eqLinks)\n elif isinstance(name, list):\n return filter(lambda x: x.relation.name not in name,\n self.eqLinks)\n else:\n return None\n else:\n return None", - "docstring": "Returns list of named eqLinks.\n\n may be string or list." - }, - { - "code": "def parse_authn_request(self, enc_request, binding=BINDING_HTTP_REDIRECT):\n return self._parse_request(enc_request, AuthnRequest,\n \"single_sign_on_service\", binding)", - "docstring": "Parse a Authentication Request\n\n :param enc_request: The request in its transport format\n :param binding: Which binding that was used to transport the message\n to this entity.\n :return: A request instance" - }, - { - "code": "def ntiles(self):\n tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q\n return next_power_of_two(tcum_mismatch / self.deltam)", - "docstring": "The number of tiles in this row\n\n :type: `int`" - }, - { - "code": "def _unpickle_collection(self, collection):\n for mkey in collection:\n if isinstance(collection[mkey], list):\n for item in collection[mkey]:\n item.unpickle(self)\n else:\n collection[mkey].unpickle(self)", - "docstring": "Unpickles all members of the specified dictionary." - }, - { - "code": "def modify_target(self, to_state, to_key):\n if not isinstance(to_state, string_types):\n raise ValueError(\"Invalid data flow target port: from_state must be a string\")\n if not isinstance(to_key, int):\n raise ValueError(\"Invalid data flow target port: from_outcome must be of type int\")\n old_to_state = self.to_state\n old_to_key = self.to_key\n self._to_state = to_state\n self._to_key = to_key\n valid, message = self._check_validity()\n if not valid:\n self._to_state = old_to_state\n self._to_key = old_to_key\n raise ValueError(\"The data flow target could not be changed: {0}\".format(message))", - "docstring": "Set both to_state and to_key at the same time to modify data flow target\n\n :param str to_state: State id of the target state\n :param int to_key: Data port id of the target port\n :raises exceptions.ValueError: If parameters have wrong types or the new data flow is not valid" - }, - { - "code": "def filepaths(self) -> List[str]:\n path = self.currentpath\n return [os.path.join(path, name) for name in self.filenames]", - "docstring": "Absolute path names of the files contained in the current\n working directory.\n\n Files names starting with underscores are ignored:\n\n >>> from hydpy.core.filetools import FileManager\n >>> filemanager = FileManager()\n >>> filemanager.BASEDIR = 'basename'\n >>> filemanager.projectdir = 'projectname'\n >>> from hydpy import repr_, TestIO\n >>> with TestIO():\n ... filemanager.currentdir = 'testdir'\n ... open('projectname/basename/testdir/file1.txt', 'w').close()\n ... open('projectname/basename/testdir/file2.npy', 'w').close()\n ... open('projectname/basename/testdir/_file1.nc', 'w').close()\n ... for filepath in filemanager.filepaths:\n ... repr_(filepath) # doctest: +ELLIPSIS\n '...hydpy/tests/iotesting/projectname/basename/testdir/file1.txt'\n '...hydpy/tests/iotesting/projectname/basename/testdir/file2.npy'" - }, - { - "code": "def calc_reward_fn(self):\n model = copy.copy(self.model)\n model.train(self.dataset)\n reward = 0.\n for i in range(len(self.queried_hist_)):\n reward += self.W[i] * (\n model.predict(\n self.dataset.data[\n self.queried_hist_[i]][0].reshape(1, -1)\n )[0] ==\n self.dataset.data[self.queried_hist_[i]][1]\n )\n reward /= (self.dataset.len_labeled() + self.dataset.len_unlabeled())\n reward /= self.T\n return reward", - "docstring": "Calculate the reward value" - }, - { - "code": "def consonants(self):\n return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_consonant])", - "docstring": "Return a new IPAString, containing only the consonants in the current string.\n\n :rtype: IPAString" - }, - { - "code": "def match_source(self, src):\n srcs = []\n names = [src.name]\n for col in self.config['assoc_xmatch_columns']:\n if col in src.assoc and src.assoc[col]:\n names += [src.assoc[col]]\n for name in names:\n name = name.replace(' ', '').lower()\n if name not in self._src_dict:\n continue\n srcs += [s for s in self._src_dict[name] if s not in srcs]\n return srcs", - "docstring": "Look for source or sources in the model that match the\n given source. Sources are matched by name and any association\n columns defined in the assoc_xmatch_columns parameter." - }, - { - "code": "def metadata_converter_help():\n message = m.Message()\n message.add(m.Brand())\n message.add(heading())\n message.add(content())\n return message", - "docstring": "Help message for metadata converter Dialog.\n\n .. versionadded:: 4.3\n\n :returns: A message object containing helpful information.\n :rtype: messaging.message.Message" - }, - { - "code": "def from_args(cls, args):\n opts = SoSOptions()\n opts._merge_opts(args, True)\n return opts", - "docstring": "Initialise a new SoSOptions object from a ``Namespace``\n obtained by parsing command line arguments.\n\n :param args: parsed command line arguments\n :returns: an initialised SoSOptions object\n :returntype: SoSOptions" - }, - { - "code": "def conf(self):\n return self.env.get_template('conf.py.j2').render(\n metadata=self.metadata,\n package=self.package)", - "docstring": "Generate the Sphinx `conf.py` configuration file\n\n Returns:\n (str): the contents of the `conf.py` file." - }, - { - "code": "def make_auth_headers(self, content_type):\n headers = self.make_headers(content_type)\n headers['Authorization'] = 'Basic {}'.format(self.get_auth_string())\n return headers", - "docstring": "Add authorization header." - }, - { - "code": "def set_data_location(self, current_extent, tag_location):\n if not self._initialized:\n raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')\n self.new_extent_loc = current_extent\n if self.ptr is not None:\n self.ptr.update_extent_location(current_extent)", - "docstring": "A method to set the new extent location that the data for this Directory\n Record should live at.\n\n Parameters:\n current_extent - The new extent.\n Returns:\n Nothing." - }, - { - "code": "def get_message(self, stream, timeout=None):\n try:\n if stream == 'iopub':\n msg = self.kc.get_iopub_msg(timeout=timeout)\n elif stream == 'shell':\n msg = self.kc.get_shell_msg(timeout=timeout)\n else:\n raise ValueError('Invalid stream specified: \"%s\"' % stream)\n except Empty:\n logger.debug('Kernel: Timeout waiting for message on %s', stream)\n raise\n logger.debug(\"Kernel message (%s):\\n%s\", stream, pformat(msg))\n return msg", - "docstring": "Function is used to get a message from the iopub channel.\n Timeout is None by default\n When timeout is reached" - }, - { - "code": "def lemma(lemma_key):\n if lemma_key in LEMMAS_DICT:\n return LEMMAS_DICT[lemma_key]\n split_lemma_key = lemma_key.split('.')\n synset_key = '.'.join(split_lemma_key[:3])\n lemma_literal = split_lemma_key[3]\n lemma_obj = Lemma(synset_key,lemma_literal)\n LEMMAS_DICT[lemma_key] = lemma_obj\n return lemma_obj", - "docstring": "Returns the Lemma object with the given key.\n\n Parameters\n ----------\n lemma_key : str\n Key of the returned lemma.\n\n Returns\n -------\n Lemma\n Lemma matching the `lemma_key`." - }, - { - "code": "def find_ctrlpts_curve(t, curve, **kwargs):\n span_func = kwargs.get('find_span_func', helpers.find_span_linear)\n span = span_func(curve.degree, curve.knotvector, len(curve.ctrlpts), t)\n idx = span - curve.degree\n curve_ctrlpts = [() for _ in range(curve.degree + 1)]\n for i in range(0, curve.degree + 1):\n curve_ctrlpts[i] = curve.ctrlpts[idx + i]\n return curve_ctrlpts", - "docstring": "Finds the control points involved in the evaluation of the curve point defined by the input parameter.\n\n This function uses a modified version of the algorithm *A3.1 CurvePoint* from The NURBS Book by Piegl & Tiller.\n\n :param t: parameter\n :type t: float\n :param curve: input curve object\n :type curve: abstract.Curve\n :return: 1-dimensional control points array\n :rtype: list" - }, - { - "code": "def fastq_convert_pipe_cl(in_file, data):\n cmd = _seqtk_fastq_prep_cl(data, in_file)\n if not cmd:\n cat_cmd = \"zcat\" if in_file.endswith(\".gz\") else \"cat\"\n cmd = cat_cmd + \" \" + in_file\n return \"<(%s)\" % cmd", - "docstring": "Create an anonymous pipe converting Illumina 1.3-1.7 to Sanger.\n\n Uses seqtk: https://github.com/lh3/seqt" - }, - { - "code": "async def _start(self):\n self.agent._alive.wait()\n try:\n await self.on_start()\n except Exception as e:\n logger.error(\"Exception running on_start in behaviour {}: {}\".format(self, e))\n self.kill(exit_code=e)\n await self._step()\n self._is_done.clear()", - "docstring": "Start coroutine. runs on_start coroutine and then\n runs the _step coroutine where the body of the behaviour\n is called." - }, - { - "code": "def grid(self, b=None, which='major', axis='both', kind='arbitrary',\n center=None, **kwargs):\n grid_on = self._gridOn\n Axes.grid(self, False)\n if kind == 'polar':\n center = 0, 0\n if self._overlay_axes is not None:\n self._overlay_axes.remove()\n self._overlay_axes = None\n if not b and b is not None:\n return\n if b is None:\n if grid_on:\n return\n if center is None or np.allclose(center, (np.pi/2, 0)):\n return Axes.grid(self, b, which, axis, **kwargs)\n self._add_overlay(center)\n self._overlay_axes.grid(True, which, axis, **kwargs)\n self._gridOn = True", - "docstring": "Usage is identical to a normal axes grid except for the ``kind`` and\n ``center`` kwargs. ``kind=\"polar\"`` will add a polar overlay.\n\n The ``center`` and ``kind`` arguments allow you to add a grid from a\n differently-centered stereonet. This is useful for making \"polar\n stereonets\" that still use the same coordinate system as a standard\n stereonet. (i.e. a plane/line/whatever will have the same\n representation on both, but the grid is displayed differently.)\n\n To display a polar grid on a stereonet, use ``kind=\"polar\"``.\n\n It is also often useful to display a grid relative to an arbitrary\n measurement (e.g. a lineation axis). In that case, use the\n ``lon_center`` and ``lat_center`` arguments. Note that these are in\n radians in \"stereonet coordinates\". Therefore, you'll often want to\n use one of the functions in ``stereonet_math`` to convert a\n line/plane/rake into the longitude and latitude you'd input here. For\n example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``.\n\n If no parameters are specified, this is equivalent to turning on the\n standard grid." - }, - { - "code": "def pmf(value):\n probs = probabilities(value)\n if abs(1.-sum(map(float, value.split()))) > 1e-12:\n raise ValueError('The probabilities %s do not sum up to 1!' % value)\n return [(p, i) for i, p in enumerate(probs)]", - "docstring": "Comvert a string into a Probability Mass Function.\n\n :param value:\n a sequence of probabilities summing up to 1 (no commas)\n :returns:\n a list of pairs [(probability, index), ...] with index starting from 0\n\n >>> pmf(\"0.157 0.843\")\n [(0.157, 0), (0.843, 1)]" - }, - { - "code": "def chi_square_calc(classes, table, TOP, P, POP):\n try:\n result = 0\n for i in classes:\n for index, j in enumerate(classes):\n expected = (TOP[j] * P[i]) / (POP[i])\n result += ((table[i][j] - expected)**2) / expected\n return result\n except Exception:\n return \"None\"", - "docstring": "Calculate chi-squared.\n\n :param classes: confusion matrix classes\n :type classes : list\n :param table: confusion matrix table\n :type table : dict\n :param TOP: test outcome positive\n :type TOP : dict\n :param P: condition positive\n :type P : dict\n :param POP: population\n :type POP : dict\n :return: chi-squared as float" - }, - { - "code": "def create(self, id, fd, filename='attachment-name'):\n schema = AttachmentSchema(exclude=('id', 'created', 'updated', 'size', 'path', 'device_id'))\n resp = self.service.post(self._base(id),\n files={'file': (filename, fd)})\n return self.service.decode(schema, resp)", - "docstring": "Add an attachment to a device.\n\n :param id: Device ID as an int.\n :param fd: File-like object to upload.\n :param filename: (optional) Name to use for new attachment as a string.\n :return: :class:`attachments.Attachment ` object\n :rtype: attachments.Attachment" - }, - { - "code": "def getThirdPartyLibCompilerFlags(self, libs):\n\t\tfmt = PrintingFormat.singleLine()\n\t\tif libs[0] == '--multiline':\n\t\t\tfmt = PrintingFormat.multiLine()\n\t\t\tlibs = libs[1:]\n\t\tplatformDefaults = True\n\t\tif libs[0] == '--nodefaults':\n\t\t\tplatformDefaults = False\n\t\t\tlibs = libs[1:]\n\t\tdetails = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults)\n\t\treturn details.getCompilerFlags(self.getEngineRoot(), fmt)", - "docstring": "Retrieves the compiler flags for building against the Unreal-bundled versions of the specified third-party libraries" - }, - { - "code": "def add_menu(self, name, link=None):\n if self.menu_began:\n if self.menu_separator_tag:\n self.write(self.menu_separator_tag)\n else:\n self.write('