code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def addValuesToField(self, i, numValues):
assert(len(self.fields)>i)
values = [self.addValueToField(i) for n in range(numValues)]
return values
|
Add values to the field i.
|
def clean_email_or_username(self):
email_or_username = self.cleaned_data[self.Fields.EMAIL_OR_USERNAME].strip()
if not email_or_username:
return email_or_username
email = email_or_username__to__email(email_or_username)
bulk_entry = len(split_usernames_and_emails(email)) > 1
if bulk_entry:
for email in split_usernames_and_emails(email):
validate_email_to_link(
email,
None,
ValidationMessages.INVALID_EMAIL_OR_USERNAME,
ignore_existing=True
)
email = email_or_username
else:
validate_email_to_link(
email,
email_or_username,
ValidationMessages.INVALID_EMAIL_OR_USERNAME,
ignore_existing=True
)
return email
|
Clean email form field
Returns:
str: the cleaned value, converted to an email address (or an empty string)
|
def print_error(message, wrap=True):
if wrap:
message = 'ERROR: {0}. Exit...'.format(message.rstrip('.'))
colorizer = (_color_wrap(colorama.Fore.RED)
if colorama
else lambda message: message)
return print(colorizer(message), file=sys.stderr)
|
Print error message to stderr, using ANSI-colors.
:param message: Message to print
:param wrap:
Wrap message into ``ERROR: <message>. Exit...`` template. By default:
True
|
def fetch_events_for_issues_and_pr(self):
self.fetcher.fetch_events_async(self.issues, "issues")
self.fetcher.fetch_events_async(self.pull_requests, "pull requests")
|
Fetch event for issues and pull requests
@return [Array] array of fetched issues
|
def _parse_doc_ref(self):
command = LatexCommand(
'setDocRef',
{'name': 'handle', 'required': True, 'bracket': '{'})
try:
parsed = next(command.parse(self._tex))
except StopIteration:
self._logger.warning('lsstdoc has no setDocRef')
self._handle = None
self._series = None
self._serial = None
return
self._handle = parsed['handle']
try:
self._series, self._serial = self._handle.split('-', 1)
except ValueError:
self._logger.warning('lsstdoc handle cannot be parsed into '
'series and serial: %r', self._handle)
self._series = None
self._serial = None
|
Parse the document handle.
Sets the ``_series``, ``_serial``, and ``_handle`` attributes.
|
def lo_stops(self, lo_stops):
_set_params(self.ode_obj, 'LoStop', lo_stops, self.ADOF + self.LDOF)
|
Set the lo stop values for this object's degrees of freedom.
Parameters
----------
lo_stops : float or sequence of float
A lo stop value to set on all degrees of freedom, or a list
containing one such value for each degree of freedom. For rotational
degrees of freedom, these values must be in radians.
|
def get_all_keyring():
_load_plugins()
viable_classes = KeyringBackend.get_viable_backends()
rings = util.suppress_exceptions(viable_classes, exceptions=TypeError)
return list(rings)
|
Return a list of all implemented keyrings that can be constructed without
parameters.
|
def label(labels=[], language='any', sortLabel=False):
if not labels:
return None
if not language:
language = 'und'
labels = [dict_to_label(l) for l in labels]
l = False
if sortLabel:
l = find_best_label_for_type(labels, language, 'sortLabel')
if not l:
l = find_best_label_for_type(labels, language, 'prefLabel')
if not l:
l = find_best_label_for_type(labels, language, 'altLabel')
if l:
return l
else:
return label(labels, 'any', sortLabel) if language != 'any' else None
|
Provide a label for a list of labels.
The items in the list of labels are assumed to be either instances of
:class:`Label`, or dicts with at least the key `label` in them. These will
be passed to the :func:`dict_to_label` function.
This method tries to find a label by looking if there's
a pref label for the specified language. If there's no pref label,
it looks for an alt label. It disregards hidden labels.
While matching languages, preference will be given to exact matches. But,
if no exact match is present, an inexact match will be attempted. This might
be because a label in language `nl-BE` is being requested, but only `nl` or
even `nl-NL` is present. Similarly, when requesting `nl`, a label with
language `nl-NL` or even `nl-Latn-NL` will also be considered,
providing no label is present that has an exact match with the
requested language.
If language 'any' was specified, all labels will be considered,
regardless of language.
To find a label without a specified language, pass `None` as language.
If a language or None was specified, and no label could be found, this
method will automatically try to find a label in some other language.
Finally, if no label could be found, None is returned.
:param string language: The preferred language to receive the label in. This
should be a valid IANA language tag.
:param boolean sortLabel: Should sortLabels be considered or not? If True,
sortLabels will be preferred over prefLabels. Bear in mind that these
are still language dependent. So, it's possible to have a different
sortLabel per language.
:rtype: A :class:`Label` or `None` if no label could be found.
|
def clone(git_uri):
hash_digest = sha256_hash(git_uri)
local_path = home_directory_path(FOLDER, hash_digest)
exists_locally = path_exists(local_path)
if not exists_locally:
_clone_repo(git_uri, local_path)
else:
logging.info(
"Git repository already exists locally.")
return local_path
|
Clone a remote git repository to a local path.
:param git_uri: the URI to the git repository to be cloned
:return: the generated local path where the repository has been cloned to
|
def _filterRecord(filterList, record):
for (fieldIdx, fp, params) in filterList:
x = dict()
x['value'] = record[fieldIdx]
x['acceptValues'] = params['acceptValues']
x['min'] = params['min']
x['max'] = params['max']
if not fp(x):
return False
return True
|
Takes a record and returns true if record meets filter criteria,
false otherwise
|
def recursive_find_search(folder_path, regex=''):
outlist = []
for root, dirs, files in os.walk(folder_path):
outlist.extend([op.join(root, f) for f in files
if re.search(regex, f)])
return outlist
|
Returns absolute paths of files that match the regex within file_dir and
all its children folders.
Note: The regex matching is done using the search function
of the re module.
Parameters
----------
folder_path: string
regex: string
Returns
-------
A list of strings.
|
def _keys_to_camel_case(self, obj):
return dict((to_camel_case(key), value) for (key, value) in obj.items())
|
Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary.
:param obj: Dictionary to convert keys to camel case.
:return: Dictionary with the input values and all keys in camel case
|
def parse_datetime_to_epoch(datestamp, magnitude=1.0):
parsed_datetime = parse_lms_api_datetime(datestamp)
time_since_epoch = parsed_datetime - UNIX_EPOCH
return int(time_since_epoch.total_seconds() * magnitude)
|
Convert an ISO-8601 datetime string to a Unix epoch timestamp in some magnitude.
By default, returns seconds.
|
def serialize(self, data=None):
if data is not None and self.response is not None:
self.response['Content-Type'] = self.media_types[0]
self.response.write(data)
return data
|
Transforms the object into an acceptable format for transmission.
@throws ValueError
To indicate this serializer does not support the encoding of the
specified object.
|
def angles(self):
return [self.ode_obj.getAngle(i) for i in range(self.ADOF)]
|
List of angles for rotational degrees of freedom.
|
def element_to_unicode(element):
if hasattr(ElementTree, 'tounicode'):
return ElementTree.tounicode("element")
elif sys.version_info.major < 3:
return unicode(ElementTree.tostring(element))
else:
return ElementTree.tostring(element, encoding = "unicode")
|
Serialize an XML element into a unicode string.
This should work the same on Python2 and Python3 and with all
:etree:`ElementTree` implementations.
:Parameters:
- `element`: the XML element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
|
def inferSingleStep(self, patternNZ, weightMatrix):
outputActivation = weightMatrix[patternNZ].sum(axis=0)
outputActivation = outputActivation - numpy.max(outputActivation)
expOutputActivation = numpy.exp(outputActivation)
predictDist = expOutputActivation / numpy.sum(expOutputActivation)
return predictDist
|
Perform inference for a single step. Given an SDR input and a weight
matrix, return a predicted distribution.
:param patternNZ: list of the active indices from the output below
:param weightMatrix: numpy array of the weight matrix
:return: numpy array of the predicted class label distribution
|
def showPredictions():
for k in range(6):
tm.reset()
print "--- " + "ABCDXY"[k] + " ---"
tm.compute(set(seqT[k][:].nonzero()[0].tolist()), learn=False)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
print("Active cols: " + str(np.nonzero(currentColumns)[0]))
print("Predicted cols: " + str(np.nonzero(predictedColumns)[0]))
print ""
|
Shows predictions of the TM when presented with the characters A, B, C, D, X, and
Y without any contextual information, that is, not embedded within a sequence.
|
def _parse_iedb_response(response):
if len(response) == 0:
raise ValueError("Empty response from IEDB!")
df = pd.read_csv(io.BytesIO(response), delim_whitespace=True, header=0)
assert type(df) == pd.DataFrame
df = pd.DataFrame(df)
if len(df) == 0:
raise ValueError(
"No binding predictions in response from IEDB: %s" % (response,))
required_columns = [
"allele",
"peptide",
"ic50",
"start",
"end",
]
for column in required_columns:
if column not in df.columns:
raise ValueError(
"Response from IEDB is missing '%s' column: %s. Full "
"response:\n%s" % (
column,
df.ix[0],
response))
df = df.rename(columns={
"percentile_rank": "rank",
"percentile rank": "rank"})
return df
|
Take the binding predictions returned by IEDB's web API
and parse them into a DataFrame
Expect response to look like:
allele seq_num start end length peptide ic50 percentile_rank
HLA-A*01:01 1 2 10 9 LYNTVATLY 2145.70 3.7
HLA-A*01:01 1 5 13 9 TVATLYCVH 2216.49 3.9
HLA-A*01:01 1 7 15 9 ATLYCVHQR 2635.42 5.1
HLA-A*01:01 1 4 12 9 NTVATLYCV 6829.04 20
HLA-A*01:01 1 1 9 9 SLYNTVATL 8032.38 24
HLA-A*01:01 1 8 16 9 TLYCVHQRI 8853.90 26
HLA-A*01:01 1 3 11 9 YNTVATLYC 9865.62 29
HLA-A*01:01 1 6 14 9 VATLYCVHQ 27575.71 58
HLA-A*01:01 1 10 18 9 YCVHQRIDV 48929.64 74
HLA-A*01:01 1 9 17 9 LYCVHQRID 50000.00 75
|
def _child_allowed(self, child_rule):
num_kids = self.node.children.count()
num_kids_allowed = len(self.rule.children)
if not self.rule.multiple_paths:
num_kids_allowed = 1
if num_kids >= num_kids_allowed:
raise AttributeError('Rule %s only allows %s children' % (
self.rule_name, self.num_kids_allowed))
for node in self.node.children.all():
if node.data.rule_label == child_rule.class_label:
raise AttributeError('Child rule already exists')
if child_rule not in self.rule.children:
raise AttributeError('Rule %s is not a valid child of Rule %s' % (
child_rule.__name__, self.rule_name))
|
Called to verify that the given rule can become a child of the
current node.
:raises AttributeError:
if the child is not allowed
|
def _normalize_issue_dir_with_dtd(self, path):
if exists(join(path, 'resolved_issue.xml')):
return
issue_xml_content = open(join(path, 'issue.xml')).read()
sis = ['si510.dtd', 'si520.dtd', 'si540.dtd']
tmp_extracted = 0
for si in sis:
if si in issue_xml_content:
self._extract_correct_dtd_package(si.split('.')[0], path)
tmp_extracted = 1
if not tmp_extracted:
message = "It looks like the path " + path
message += " does not contain an si510, si520 or si540 in issue.xml file"
self.logger.error(message)
raise ValueError(message)
command = ["xmllint", "--format", "--loaddtd",
join(path, 'issue.xml'),
"--output", join(path, 'resolved_issue.xml')]
dummy, dummy, cmd_err = run_shell_command(command)
if cmd_err:
message = "Error in cleaning %s: %s" % (
join(path, 'issue.xml'), cmd_err)
self.logger.error(message)
raise ValueError(message)
|
issue.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the issue.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
|
def broadcast_event(self, event, *args):
pkt = dict(type="event",
name=event,
args=args,
endpoint=self.ns_name)
for sessid, socket in six.iteritems(self.socket.server.sockets):
socket.send_packet(pkt)
|
This is sent to all in the sockets in this particular Namespace,
including itself.
|
def passcode(callsign):
assert isinstance(callsign, str)
callsign = callsign.split('-')[0].upper()
code = 0x73e2
for i, char in enumerate(callsign):
code ^= ord(char) << (8 if not i % 2 else 0)
return code & 0x7fff
|
Takes a CALLSIGN and returns passcode
|
def PUSH(cpu, src):
size = src.size
v = src.read()
if size != 64 and size != cpu.address_bit_size // 2:
v = Operators.SEXTEND(v, size, cpu.address_bit_size)
size = cpu.address_bit_size
cpu.push(v, size)
|
Pushes a value onto the stack.
Decrements the stack pointer and then stores the source operand on the top of the stack.
:param cpu: current CPU.
:param src: source operand.
|
def count_annotation_values(graph: BELGraph, annotation: str) -> Counter:
return Counter(iter_annotation_values(graph, annotation))
|
Count in how many edges each annotation appears in a graph
:param graph: A BEL graph
:param annotation: The annotation to count
:return: A Counter from {annotation value: frequency}
|
async def send_offnetwork_invitation(
self, send_offnetwork_invitation_request
):
response = hangouts_pb2.SendOffnetworkInvitationResponse()
await self._pb_request('devices/sendoffnetworkinvitation',
send_offnetwork_invitation_request,
response)
return response
|
Send an email to invite a non-Google contact to Hangouts.
|
def register_lcformat(formatkey,
fileglob,
timecols,
magcols,
errcols,
readerfunc_module,
readerfunc,
readerfunc_kwargs=None,
normfunc_module=None,
normfunc=None,
normfunc_kwargs=None,
magsarefluxes=False,
overwrite_existing=False,
lcformat_dir='~/.astrobase/lcformat-jsons'):
LOGINFO('adding %s to LC format registry...' % formatkey)
lcformat_dpath = os.path.abspath(
os.path.expanduser(lcformat_dir)
)
if not os.path.exists(lcformat_dpath):
os.makedirs(lcformat_dpath)
lcformat_jsonpath = os.path.join(lcformat_dpath,'%s.json' % formatkey)
if os.path.exists(lcformat_jsonpath) and not overwrite_existing:
LOGERROR('There is an existing lcformat JSON: %s '
'for this formatkey: %s and '
'overwrite_existing = False, skipping...'
% (lcformat_jsonpath, formatkey))
return None
readermodule = _check_extmodule(readerfunc_module, formatkey)
if not readermodule:
LOGERROR("could not import the required "
"module: %s to read %s light curves" %
(readerfunc_module, formatkey))
return None
try:
getattr(readermodule, readerfunc)
readerfunc_in = readerfunc
except AttributeError:
LOGEXCEPTION('Could not get the specified reader '
'function: %s for lcformat: %s '
'from module: %s'
% (formatkey, readerfunc_module, readerfunc))
raise
if normfunc_module:
normmodule = _check_extmodule(normfunc_module, formatkey)
if not normmodule:
LOGERROR("could not import the required "
"module: %s to normalize %s light curves" %
(normfunc_module, formatkey))
return None
else:
normmodule = None
if normfunc_module and normfunc:
try:
getattr(normmodule, normfunc)
normfunc_in = normfunc
except AttributeError:
LOGEXCEPTION('Could not get the specified norm '
'function: %s for lcformat: %s '
'from module: %s'
% (normfunc, formatkey, normfunc_module))
raise
else:
normfunc_in = None
formatdict = {'fileglob':fileglob,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'magsarefluxes':magsarefluxes,
'lcreader_module':readerfunc_module,
'lcreader_func':readerfunc_in,
'lcreader_kwargs':readerfunc_kwargs,
'lcnorm_module':normfunc_module,
'lcnorm_func':normfunc_in,
'lcnorm_kwargs':normfunc_kwargs}
with open(lcformat_jsonpath,'w') as outfd:
json.dump(formatdict, outfd, indent=4)
return lcformat_jsonpath
|
This adds a new LC format to the astrobase LC format registry.
Allows handling of custom format light curves for astrobase lcproc
drivers. Once the format is successfully registered, light curves should
work transparently with all of the functions in this module, by simply
calling them with the `formatkey` in the `lcformat` keyword argument.
LC format specifications are generated as JSON files. astrobase comes with
several of these in `<astrobase install path>/data/lcformats`. LC formats
you add by using this function will have their specifiers written to the
`~/.astrobase/lcformat-jsons` directory in your home directory.
Parameters
----------
formatkey : str
A str used as the unique ID of this LC format for all lcproc functions
and can be used to look it up later and import the correct functions
needed to support it for lcproc operations. For example, we use
'kep-fits' as a the specifier for Kepler FITS light curves, which can be
read by the `astrobase.astrokep.read_kepler_fitslc` function as
specified by the `<astrobase install path>/data/lcformats/kep-fits.json`
LC format specification JSON produced by `register_lcformat`.
fileglob : str
The default UNIX fileglob to use to search for light curve files in this
LC format. This is a string like '*-whatever-???-*.*??-.lc'.
timecols,magcols,errcols : list of str
These are all lists of strings indicating which keys in the lcdict
produced by your `lcreader_func` that will be extracted and used by
lcproc functions for processing. The lists must all have the same
dimensions, e.g. if timecols = ['timecol1','timecol2'], then magcols
must be something like ['magcol1','magcol2'] and errcols must be
something like ['errcol1', 'errcol2']. This allows you to process
multiple apertures or multiple types of measurements in one go.
Each element in these lists can be a simple key, e.g. 'time' (which
would correspond to lcdict['time']), or a composite key,
e.g. 'aperture1.times.rjd' (which would correspond to
lcdict['aperture1']['times']['rjd']). See the examples in the lcformat
specification JSON files in `<astrobase install path>/data/lcformats`.
readerfunc_module : str
This is either:
- a Python module import path, e.g. 'astrobase.lcproc.catalogs' or
- a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'
that contains the Python module that contains functions used to open
(and optionally normalize) a custom LC format that's not natively
supported by astrobase.
readerfunc : str
This is the function name in `readerfunc_module` to use to read light
curves in the custom format. This MUST always return a dictionary (the
'lcdict') with the following signature (the keys listed below are
required, but others are allowed)::
{'objectid': this object's identifier as a string,
'objectinfo':{'ra': this object's right ascension in decimal deg,
'decl': this object's declination in decimal deg,
'ndet': the number of observations in this LC,
'objectid': the object ID again for legacy reasons},
...other time columns, mag columns go in as their own keys}
normfunc_kwargs : dict or None
This is a dictionary containing any kwargs to pass through to
the light curve norm function.
normfunc_module : str or None
This is either:
- a Python module import path, e.g. 'astrobase.lcproc.catalogs' or
- a path to a Python file, e.g. '/astrobase/hatsurveys/hatlc.py'
- None, in which case we'll use default normalization
that contains the Python module that contains functions used to
normalize a custom LC format that's not natively supported by astrobase.
normfunc : str or None
This is the function name in `normfunc_module` to use to normalize light
curves in the custom format. If None, the default normalization method
used by lcproc is to find gaps in the time-series, normalize
measurements grouped by these gaps to zero, then normalize the entire
magnitude time series to global time series median using the
`astrobase.lcmath.normalize_magseries` function.
If this is provided, the normalization function should take and return
an lcdict of the same form as that produced by `readerfunc` above. For
an example of a specific normalization function, see
`normalize_lcdict_by_inst` in the `astrobase.hatsurveys.hatlc` module.
normfunc_kwargs : dict or None
This is a dictionary containing any kwargs to pass through to
the light curve normalization function.
magsarefluxes : bool
If this is True, then all lcproc functions will treat the measurement
columns in the lcdict produced by your `readerfunc` as flux instead of
mags, so things like default normalization and sigma-clipping will be
done correctly. If this is False, magnitudes will be treated as
magnitudes.
overwrite_existing : bool
If this is True, this function will overwrite any existing LC format
specification JSON with the same name as that provided in the
`formatkey` arg. This can be used to update LC format specifications
while keeping the `formatkey` the same.
lcformat_dir : str
This specifies the directory where the the LC format specification JSON
produced by this function will be written. By default, this goes to the
`.astrobase/lcformat-jsons` directory in your home directory.
Returns
-------
str
Returns the file path to the generated LC format specification JSON
file.
|
def map_transaction(txn):
if isinstance(txn['sid'], dict):
sid = txn['sid']['sid']
symbol = txn['sid']['symbol']
else:
sid = txn['sid']
symbol = txn['sid']
return {'sid': sid,
'symbol': symbol,
'price': txn['price'],
'order_id': txn['order_id'],
'amount': txn['amount'],
'commission': txn['commission'],
'dt': txn['dt']}
|
Maps a single transaction row to a dictionary.
Parameters
----------
txn : pd.DataFrame
A single transaction object to convert to a dictionary.
Returns
-------
dict
Mapped transaction.
|
def upload_files(selected_file, selected_host, only_link, file_name):
try:
answer = requests.post(
url=selected_host[0]+"upload.php",
files={'files[]':selected_file})
file_name_1 = re.findall(r'"url": *"((h.+\/){0,1}(.+?))"[,\}]', \
answer.text.replace("\\", ""))[0][2]
if only_link:
return [selected_host[1]+file_name_1, "{}: {}{}".format(file_name, selected_host[1], file_name_1)]
else:
return "{}: {}{}".format(file_name, selected_host[1], file_name_1)
except requests.exceptions.ConnectionError:
print(file_name + ' couldn\'t be uploaded to ' + selected_host[0])
|
Uploads selected file to the host, thanks to the fact that
every pomf.se based site has pretty much the same architecture.
|
def getobjectinfo(self, window_name, object_name):
try:
obj_info = self._get_object_map(window_name, object_name,
wait_for_object=False)
except atomac._a11y.ErrorInvalidUIElement:
self._windows = {}
obj_info = self._get_object_map(window_name, object_name,
wait_for_object=False)
props = []
if obj_info:
for obj_prop in obj_info.keys():
if not obj_info[obj_prop] or obj_prop == "obj":
continue
props.append(obj_prop)
return props
|
Get object properties.
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: list of properties
@rtype: list
|
def get_attached_volumes(self, datacenter_id, server_id, depth=1):
response = self._perform_request(
'/datacenters/%s/servers/%s/volumes?depth=%s' % (
datacenter_id,
server_id,
str(depth)))
return response
|
Retrieves a list of volumes attached to the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
|
def delete(self, store_id, cart_id, line_id):
self.store_id = store_id
self.cart_id = cart_id
self.line_id = line_id
return self._mc_client._delete(url=self._build_path(store_id, 'carts', cart_id, 'lines', line_id))
|
Delete a cart.
:param store_id: The store id.
:type store_id: :py:class:`str`
:param cart_id: The id for the cart.
:type cart_id: :py:class:`str`
:param line_id: The id for the line item of a cart.
:type line_id: :py:class:`str`
|
async def query_presence(self, query_presence_request):
response = hangouts_pb2.QueryPresenceResponse()
await self._pb_request('presence/querypresence',
query_presence_request, response)
return response
|
Return presence status for a list of users.
|
def suggest_spelling(q, wait=10, asynchronous=False, cached=False):
return YahooSpelling(q, wait, asynchronous, cached)
|
Returns list of suggested spelling corrections for the given query.
|
def bounds(self):
if self._raw["bounds"] is None:
return self.process_pyramid.bounds
else:
return Bounds(*_validate_bounds(self._raw["bounds"]))
|
Process bounds as defined in the configuration.
|
def get_run_time_period(run_steps):
init_ts_start = get_standardized_timestamp('now', None)
ts_start = init_ts_start
ts_end = '0'
for run_step in run_steps:
if run_step.ts_start and run_step.ts_end:
if run_step.ts_start < ts_start:
ts_start = run_step.ts_start
if run_step.ts_end > ts_end:
ts_end = run_step.ts_end
if ts_end == '0':
ts_end = None
if ts_start == init_ts_start:
ts_start = None
logger.info('get_run_time_period range returned ' + str(ts_start) + ' to ' + str(ts_end))
return ts_start, ts_end
|
This method finds the time range which covers all the Run_Steps
:param run_steps: list of Run_Step objects
:return: tuple of start and end timestamps
|
def _getInputNeighborhood(self, centerInput):
if self._wrapAround:
return topology.wrappingNeighborhood(centerInput,
self._potentialRadius,
self._inputDimensions)
else:
return topology.neighborhood(centerInput,
self._potentialRadius,
self._inputDimensions)
|
Gets a neighborhood of inputs.
Simply calls topology.wrappingNeighborhood or topology.neighborhood.
A subclass can insert different topology behavior by overriding this method.
:param centerInput (int)
The center of the neighborhood.
@returns (1D numpy array of integers)
The inputs in the neighborhood.
|
def dipole_moment(CASRN, AvailableMethods=False, Method=None):
r
def list_methods():
methods = []
if CASRN in _dipole_CCDB.index and not np.isnan(_dipole_CCDB.at[CASRN, 'Dipole']):
methods.append(CCCBDB)
if CASRN in _dipole_Muller.index and not np.isnan(_dipole_Muller.at[CASRN, 'Dipole']):
methods.append(MULLER)
if CASRN in _dipole_Poling.index and not np.isnan(_dipole_Poling.at[CASRN, 'Dipole']):
methods.append(POLING)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == CCCBDB:
_dipole = float(_dipole_CCDB.at[CASRN, 'Dipole'])
elif Method == MULLER:
_dipole = float(_dipole_Muller.at[CASRN, 'Dipole'])
elif Method == POLING:
_dipole = float(_dipole_Poling.at[CASRN, 'Dipole'])
elif Method == NONE:
_dipole = None
else:
raise Exception('Failure in in function')
return _dipole
|
r'''This function handles the retrieval of a chemical's dipole moment.
Lookup is based on CASRNs. Will automatically select a data source to use
if no Method is provided; returns None if the data is not available.
Prefered source is 'CCCBDB'. Considerable variation in reported data has
found.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
dipole : float
Dipole moment, [debye]
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain dipole moment with the
given inputs
Other Parameters
----------------
Method : string, optional
The method name to use. Accepted methods are 'CCCBDB', 'MULLER', or
'POLING'. All valid values are also held in the list `dipole_methods`.
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
the dipole moment for the desired chemical, and will return methods
instead of the dipole moment
Notes
-----
A total of three sources are available for this function. They are:
* 'CCCBDB', a series of critically evaluated data for compounds in
[1]_, intended for use in predictive modeling.
* 'MULLER', a collection of data in a
group-contribution scheme in [2]_.
* 'POLING', in the appendix in [3].
This function returns dipole moment in units of Debye. This is actually
a non-SI unit; to convert to SI, multiply by 3.33564095198e-30 and its
units will be in ampere*second^2 or equivalently and more commonly given,
coulomb*second. The constant is the result of 1E-21/c, where c is the
speed of light.
Examples
--------
>>> dipole_moment(CASRN='64-17-5')
1.44
References
----------
.. [1] NIST Computational Chemistry Comparison and Benchmark Database
NIST Standard Reference Database Number 101 Release 17b, September 2015,
Editor: Russell D. Johnson III http://cccbdb.nist.gov/
.. [2] Muller, Karsten, Liudmila Mokrushina, and Wolfgang Arlt. "Second-
Order Group Contribution Method for the Determination of the Dipole
Moment." Journal of Chemical & Engineering Data 57, no. 4 (April 12,
2012): 1231-36. doi:10.1021/je2013395.
.. [3] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.
New York: McGraw-Hill Professional, 2000.
|
def get_dcm_reader(store_metadata=True, header_fields=None):
if not store_metadata:
return lambda fpath: fpath
if header_fields is None:
build_dcm = lambda fpath: DicomFile(fpath)
else:
dicom_header = namedtuple('DicomHeader', header_fields)
build_dcm = lambda fpath: dicom_header._make(DicomFile(fpath).get_attributes(header_fields))
return build_dcm
|
Creates a lambda function to read DICOM files.
If store_store_metadata is False, will only return the file path.
Else if you give header_fields, will return only the set of of
header_fields within a DicomFile object or the whole DICOM file if
None.
:return: function
This function has only one parameter: file_path
|
def readFromCheckpoint(cls, checkpointDir):
checkpointPath = cls._getModelCheckpointFilePath(checkpointDir)
with open(checkpointPath, 'r') as f:
proto = cls.getSchema().read(f,
traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS)
model = cls.read(proto)
return model
|
Deserializes model from checkpointDir using capnproto
|
def get_scheduler_location(self, topologyName, callback=None):
isWatching = False
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
ret["result"] = data
self._get_scheduler_location_with_watch(topologyName, callback, isWatching)
return ret["result"]
|
get scheduler location
|
def last_job_data(self, pk=None, **kwargs):
ujt = self.get(pk, include_debug_header=True, **kwargs)
if 'current_update' in ujt['related']:
debug.log('A current job; retrieving it.', header='details')
return client.get(ujt['related']['current_update'][7:]).json()
elif ujt['related'].get('last_update', None):
debug.log('No current job or update exists; retrieving the most recent.', header='details')
return client.get(ujt['related']['last_update'][7:]).json()
else:
raise exc.NotFound('No related jobs or updates exist.')
|
Internal utility function for Unified Job Templates. Returns data about the last job run off of that UJT
|
def to_representation(self, instance):
updated_program = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url(
updated_program['uuid']
)
for course in updated_program['courses']:
course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key'])
for course_run in course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_program
|
Return the updated program data dictionary.
Arguments:
instance (dict): The program data.
Returns:
dict: The updated program data.
|
def _characteristics_discovered(self, service):
self._discovered_services.add(service)
if self._discovered_services >= set(self._peripheral.services()):
self._discovered.set()
|
Called when GATT characteristics have been discovered.
|
def truncate(text, max_len=350, end='...'):
if len(text) <= max_len:
return text
return text[:max_len].rsplit(' ', maxsplit=1)[0] + end
|
Truncate the supplied text for display.
Arguments:
text (:py:class:`str`): The text to truncate.
max_len (:py:class:`int`, optional): The maximum length of the
text before truncation (defaults to 350 characters).
end (:py:class:`str`, optional): The ending to use to show that
the text was truncated (defaults to ``'...'``).
Returns:
:py:class:`str`: The truncated text.
|
def effective_bounds(self):
return snap_bounds(
bounds=clip_bounds(bounds=self.init_bounds, clip=self.process_pyramid.bounds),
pyramid=self.process_pyramid,
zoom=min(
self.baselevels["zooms"]
) if self.baselevels else min(
self.init_zoom_levels
)
)
|
Effective process bounds required to initialize inputs.
Process bounds sometimes have to be larger, because all intersecting process
tiles have to be covered as well.
|
def connect(self, addr, port = None, service = None):
with self.lock:
self._connect(addr, port, service)
|
Start establishing TCP connection with given address.
One of: `port` or `service` must be provided and `addr` must be
a domain name and not an IP address if `port` is not given.
When `service` is given try an SRV lookup for that service
at domain `addr`. If `service` is not given or `addr` is an IP address,
or the SRV lookup fails, connect to `port` at host `addr` directly.
[initiating entity only]
:Parameters:
- `addr`: peer name or IP address
- `port`: port number to connect to
- `service`: service name (to be resolved using SRV DNS records)
|
def validate_extra_link(self, extra_link):
if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link:
raise Exception("Invalid extra.links format. " +
"Extra link must include a 'name' and 'formatter' field")
self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY])
return extra_link
|
validate extra link
|
def soft_equals(a, b):
if isinstance(a, str) or isinstance(b, str):
return str(a) == str(b)
if isinstance(a, bool) or isinstance(b, bool):
return bool(a) is bool(b)
return a == b
|
Implements the '==' operator, which does type JS-style coertion.
|
def swap(self):
effect_args = ['swap']
self.effects.extend(effect_args)
self.effects_log.append('swap')
return self
|
Swap stereo channels. If the input is not stereo, pairs of channels
are swapped, and a possible odd last channel passed through.
E.g., for seven channels, the output order will be 2, 1, 4, 3, 6, 5, 7.
See Also
----------
remix
|
def add_to(self, other):
if type(other) is MaterialPackage:
if self.material == other.material:
self.compound_masses += other.compound_masses
else:
for compound in other.material.compounds:
if compound not in self.material.compounds:
raise Exception("Packages of '" + other.material.name +
"' cannot be added to packages of '" +
self.material.name +
"'. The compound '" + compound +
"' was not found in '" +
self.material.name + "'.")
self.add_to((compound, other.get_compound_mass(compound)))
elif self._is_compound_mass_tuple(other):
compound = other[0]
compound_index = self.material.get_compound_index(compound)
mass = other[1]
self.compound_masses[compound_index] += mass
else:
raise TypeError('Invalid addition argument.')
|
Add another chem material package to this material package.
:param other: The other material package.
|
def gamepad(self):
state = _xinput_state()
_xinput.XInputGetState(self.ControllerID - 1, pointer(state))
self.dwPacketNumber = state.dwPacketNumber
return state.XINPUT_GAMEPAD
|
Returns the current gamepad state. Buttons pressed is shown as a raw integer value.
Use rController.buttons for a list of buttons pressed.
|
def _api_call(self, method_name, *args, **kwargs):
params = kwargs.setdefault('params', {})
params.update({'key': self._apikey})
if self._token is not None:
params.update({'token': self._token})
http_method = getattr(requests, method_name)
return http_method(TRELLO_URL + self._url, *args, **kwargs)
|
Makes the HTTP request.
|
def p_file_contributor(self, f_term, predicate):
for _, _, contributor in self.graph.triples((f_term, predicate, None)):
self.builder.add_file_contribution(self.doc, six.text_type(contributor))
|
Parse all file contributors and adds them to the model.
|
def strftime(self, fmt="%d:%H:%M:%S"):
substitutions = {
"%d": str(self.days),
"%H": "{0:02d}".format(self.dhours),
"%h": str(24*self.days + self.dhours),
"%M": "{0:02d}".format(self.dminutes),
"%S": "{0:02d}".format(self.dseconds),
}
s = fmt
for search, replacement in substitutions.items():
s = s.replace(search, replacement)
return s
|
Primitive string formatter.
The only directives understood are the following:
============ ==========================
Directive meaning
============ ==========================
%d day as integer
%H hour [00-23]
%h hours including days
%M minute as integer [00-59]
%S second as integer [00-59]
============ ==========================
|
def get_trip_counts_per_day(self):
query = "SELECT date, count(*) AS number_of_trips FROM day_trips GROUP BY date"
trip_counts_per_day = pd.read_sql_query(query, self.conn, index_col="date")
max_day = trip_counts_per_day.index.max()
min_day = trip_counts_per_day.index.min()
min_date = datetime.datetime.strptime(min_day, '%Y-%m-%d')
max_date = datetime.datetime.strptime(max_day, '%Y-%m-%d')
num_days = (max_date - min_date).days
dates = [min_date + datetime.timedelta(days=x) for x in range(num_days + 1)]
trip_counts = []
date_strings = []
for date in dates:
date_string = date.strftime("%Y-%m-%d")
date_strings.append(date_string)
try:
value = trip_counts_per_day.loc[date_string, 'number_of_trips']
except KeyError:
value = 0
trip_counts.append(value)
for date_string in trip_counts_per_day.index:
assert date_string in date_strings
data = {"date": dates, "date_str": date_strings, "trip_counts": trip_counts}
return pd.DataFrame(data)
|
Get trip counts per day between the start and end day of the feed.
Returns
-------
trip_counts : pandas.DataFrame
Has columns "date_str" (dtype str) "trip_counts" (dtype int)
|
def package_verif_node(self, package):
verif_node = BNode()
type_triple = (verif_node, RDF.type, self.spdx_namespace.PackageVerificationCode)
self.graph.add(type_triple)
value_triple = (verif_node, self.spdx_namespace.packageVerificationCodeValue, Literal(package.verif_code))
self.graph.add(value_triple)
excl_file_nodes = map(
lambda excl: Literal(excl), package.verif_exc_files)
excl_predicate = self.spdx_namespace.packageVerificationCodeExcludedFile
excl_file_triples = [(verif_node, excl_predicate, xcl_file) for xcl_file in excl_file_nodes]
for trp in excl_file_triples:
self.graph.add(trp)
return verif_node
|
Return a node representing package verification code.
|
def outer_left_join(self, join_streamlet, window_config, join_function):
from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt
join_streamlet_result = JoinStreamlet(JoinBolt.OUTER_LEFT, window_config,
join_function, self, join_streamlet)
self._add_child(join_streamlet_result)
join_streamlet._add_child(join_streamlet_result)
return join_streamlet_result
|
Return a new Streamlet by left join_streamlet with this streamlet
|
def pre_deploy(self):
for service in self.genv.services:
service = service.strip().upper()
funcs = common.service_pre_deployers.get(service)
if funcs:
print('Running pre-deployments for service %s...' % (service,))
for func in funcs:
func()
|
Runs methods services have requested be run before each deployment.
|
def tf_retrieve_indices(self, indices):
states = dict()
for name in sorted(self.states_memory):
states[name] = tf.gather(params=self.states_memory[name], indices=indices)
internals = dict()
for name in sorted(self.internals_memory):
internals[name] = tf.gather(params=self.internals_memory[name], indices=indices)
actions = dict()
for name in sorted(self.actions_memory):
actions[name] = tf.gather(params=self.actions_memory[name], indices=indices)
terminal = tf.gather(params=self.terminal_memory, indices=indices)
reward = tf.gather(params=self.reward_memory, indices=indices)
if self.include_next_states:
assert util.rank(indices) == 1
next_indices = (indices + 1) % self.capacity
next_states = dict()
for name in sorted(self.states_memory):
next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices)
next_internals = dict()
for name in sorted(self.internals_memory):
next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices)
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
else:
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
|
Fetches experiences for given indices.
Args:
indices: Index tensor
Returns: Batch of experiences
|
def localize_datetime(dt, tz_name='UTC'):
tz_aware_dt = dt
if dt.tzinfo is None:
utc = pytz.timezone('UTC')
aware = utc.localize(dt)
timezone = pytz.timezone(tz_name)
tz_aware_dt = aware.astimezone(timezone)
else:
logger.warn('tzinfo already set')
return tz_aware_dt
|
Provide a timzeone-aware object for a given datetime and timezone name
|
def get_data(img):
if hasattr(img, '_data_cache') and img._data_cache is None:
img = copy.deepcopy(img)
gc.collect()
return img.get_data()
|
Get the data in the image without having a side effect on the Nifti1Image object
Parameters
----------
img: Nifti1Image
Returns
-------
np.ndarray
|
def _bib_processor(self, retrieved):
items = []
for bib in retrieved.entries:
items.append(bib["content"][0]["value"])
self.url_params = None
return items
|
Return a list of strings formatted as HTML bibliography entries
|
def ensure_dir_exists(func):
"wrap a function that returns a dir, making sure it exists"
@functools.wraps(func)
def make_if_not_present():
dir = func()
if not os.path.isdir(dir):
os.makedirs(dir)
return dir
return make_if_not_present
|
wrap a function that returns a dir, making sure it exists
|
def _generateMetricSpecString(inferenceElement, metric,
params=None, field=None,
returnLabel=False):
metricSpecArgs = dict(metric=metric,
field=field,
params=params,
inferenceElement=inferenceElement)
metricSpecAsString = "MetricSpec(%s)" % \
', '.join(['%s=%r' % (item[0],item[1])
for item in metricSpecArgs.iteritems()])
if not returnLabel:
return metricSpecAsString
spec = MetricSpec(**metricSpecArgs)
metricLabel = spec.getLabel()
return metricSpecAsString, metricLabel
|
Generates the string representation of a MetricSpec object, and returns
the metric key associated with the metric.
Parameters:
-----------------------------------------------------------------------
inferenceElement:
An InferenceElement value that indicates which part of the inference this
metric is computed on
metric:
The type of the metric being computed (e.g. aae, avg_error)
params:
A dictionary of parameters for the metric. The keys are the parameter names
and the values should be the parameter values (e.g. window=200)
field:
The name of the field for which this metric is being computed
returnLabel:
If True, returns the label of the MetricSpec that was generated
|
def FIR_header(fname_out, h):
M = len(h)
N = 3
f = open(fname_out, 'wt')
f.write('//define a FIR coefficient Array\n\n')
f.write('
f.write('
f.write('
f.write('
f.write('/************************************************************************/\n');
f.write('/* FIR Filter Coefficients */\n');
f.write('float32_t h_FIR[M_FIR] = {')
kk = 0;
for k in range(M):
if (kk < N - 1) and (k < M - 1):
f.write('%15.12f,' % h[k])
kk += 1
elif (kk == N - 1) & (k < M - 1):
f.write('%15.12f,\n' % h[k])
if k < M:
f.write(' ')
kk = 0
else:
f.write('%15.12f' % h[k])
f.write('};\n')
f.write('/************************************************************************/\n')
f.close()
|
Write FIR Filter Header Files
Mark Wickert February 2015
|
def draw(self, size=None, background_threshold=0.01, background_class_id=None, colors=None,
return_foreground_mask=False):
arr = self.get_arr_int(background_threshold=background_threshold, background_class_id=background_class_id)
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
if colors is None:
colors = SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS
ia.do_assert(nb_classes <= len(colors),
"Can't draw all %d classes as it would exceed the maximum number of %d available colors." % (
nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
if return_foreground_mask:
background_class_id = 0 if background_class_id is None else background_class_id
foreground_mask = (arr != background_class_id)
else:
foreground_mask = None
if size is not None:
segmap_drawn = ia.imresize_single_image(segmap_drawn, size, interpolation="nearest")
if foreground_mask is not None:
foreground_mask = ia.imresize_single_image(
foreground_mask.astype(np.uint8), size, interpolation="nearest") > 0
if foreground_mask is not None:
return segmap_drawn, foreground_mask
return segmap_drawn
|
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
If set to None, no resizing is performed and the size of the segmentation map array is used.
background_threshold : float, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
background_class_id : None or int, optional
See :func:`imgaug.SegmentationMapOnImage.get_arr_int`.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw. If None, then default colors will be used.
return_foreground_mask : bool, optional
Whether to return a mask of the same size as the drawn segmentation map, containing
True at any spatial location that is not the background class and False everywhere else.
Returns
-------
segmap_drawn : (H,W,3) ndarray
Rendered segmentation map (dtype is uint8).
foreground_mask : (H,W) ndarray
Mask indicating the locations of foreground classes (dtype is bool).
This value is only returned if `return_foreground_mask` is True.
|
def prepare_path(path):
if type(path) == list:
return os.path.join(*path)
return path
|
Path join helper method
Join paths if list passed
:type path: str|unicode|list
:rtype: str|unicode
|
def revoke_token(self, token, callback):
yield Task(self.data_store.remove, 'tokens', token=token)
callback()
|
revoke_token removes the access token from the data_store
|
def separate_particles_into_groups(s, region_size=40, bounds=None,
doshift=False):
imtile = s.oshape.translate(-s.pad)
bounding_tile = (imtile if bounds is None else Tile(bounds[0], bounds[1]))
rs = (np.ones(bounding_tile.dim, dtype='int')*region_size if
np.size(region_size) == 1 else np.array(region_size))
n_translate = np.ceil(bounding_tile.shape.astype('float')/rs).astype('int')
particle_groups = []
tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs)
if doshift == 'rand':
doshift = np.random.choice([True, False])
if doshift:
shift = rs // 2
n_translate += 1
else:
shift = 0
deltas = np.meshgrid(*[np.arange(i) for i in n_translate])
positions = s.obj_get_positions()
if bounds is None:
positions = np.clip(positions, imtile.l+1e-3, imtile.r-1e-3)
groups = list(map(lambda *args: find_particles_in_tile(positions,
tile.translate( np.array(args) * rs - shift)), *[d.ravel()
for d in deltas]))
for i in range(len(groups)-1, -1, -1):
if groups[i].size == 0:
groups.pop(i)
assert _check_groups(s, groups)
return groups
|
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
|
def ping(self):
self._idle()
self._transaction_start()
self._i2c_start()
self._i2c_write_bytes([self._address_byte(False)])
self._i2c_stop()
response = self._transaction_end()
if len(response) != 1:
raise RuntimeError('Expected 1 response byte but received {0} byte(s).'.format(len(response)))
return ((response[0] & 0x01) == 0x00)
|
Attempt to detect if a device at this address is present on the I2C
bus. Will send out the device's address for writing and verify an ACK
is received. Returns true if the ACK is received, and false if not.
|
def append_arguments(klass, sub_parsers, default_epilog, general_arguments):
entry_name = hump_to_underscore(klass.__name__).replace(
'_component',
'')
epilog = default_epilog if default_epilog \
else 'This tool generate by `cliez` ' \
'https://www.github.com/wangwenpei/cliez'
sub_parser = sub_parsers.add_parser(entry_name, help=klass.__doc__,
epilog=epilog)
sub_parser.description = klass.add_arguments.__doc__
if hasattr(klass, 'add_slot_args'):
slot_args = klass.add_slot_args() or []
for v in slot_args:
sub_parser.add_argument(*v[0], **v[1])
sub_parser.description = klass.add_slot_args.__doc__
pass
user_arguments = klass.add_arguments() or []
for v in user_arguments:
sub_parser.add_argument(*v[0], **v[1])
if not klass.exclude_global_option:
for v in general_arguments:
sub_parser.add_argument(*v[0], **v[1])
return sub_parser
|
Add class options to argparser options.
:param cliez.component.Component klass: subclass of Component
:param Namespace sub_parsers:
:param str default_epilog: default_epilog
:param list general_arguments: global options, defined by user
:return: Namespace subparser
|
def format_output(data: pd.DataFrame, source, col_maps=None) -> pd.DataFrame:
if data.empty: return pd.DataFrame()
if source == 'bdp': req_cols = ['ticker', 'field', 'value']
else: req_cols = ['ticker', 'field', 'name', 'value', 'position']
if any(col not in data for col in req_cols): return pd.DataFrame()
if data.dropna(subset=['value']).empty: return pd.DataFrame()
if source == 'bdp':
res = pd.DataFrame(pd.concat([
pd.Series({**{'ticker': t}, **grp.set_index('field').value.to_dict()})
for t, grp in data.groupby('ticker')
], axis=1, sort=False)).transpose().set_index('ticker')
else:
res = pd.DataFrame(pd.concat([
grp.loc[:, ['name', 'value']].set_index('name')
.transpose().reset_index(drop=True).assign(ticker=t)
for (t, _), grp in data.groupby(['ticker', 'position'])
], sort=False)).reset_index(drop=True).set_index('ticker')
res.columns.name = None
if col_maps is None: col_maps = dict()
return res.rename(
columns=lambda vv: col_maps.get(
vv, vv.lower().replace(' ', '_').replace('-', '_')
)
).apply(pd.to_numeric, errors='ignore', downcast='float')
|
Format `pdblp` outputs to column-based results
Args:
data: `pdblp` result
source: `bdp` or `bds`
col_maps: rename columns with these mappings
Returns:
pd.DataFrame
Examples:
>>> format_output(
... data=pd.read_pickle('xbbg/tests/data/sample_bdp.pkl'),
... source='bdp'
... ).reset_index()
ticker name
0 QQQ US Equity INVESCO QQQ TRUST SERIES 1
1 SPY US Equity SPDR S&P 500 ETF TRUST
>>> format_output(
... data=pd.read_pickle('xbbg/tests/data/sample_dvd.pkl'),
... source='bds', col_maps={'Dividend Frequency': 'dvd_freq'}
... ).loc[:, ['ex_date', 'dividend_amount', 'dvd_freq']].reset_index()
ticker ex_date dividend_amount dvd_freq
0 C US Equity 2018-02-02 0.32 Quarter
|
def activateDendrites(self, learn=True):
(numActiveConnected,
numActivePotential) = self.connections.computeActivity(
self.activeCells,
self.connectedPermanence)
activeSegments = (
self.connections.segmentForFlatIdx(i)
for i in xrange(len(numActiveConnected))
if numActiveConnected[i] >= self.activationThreshold
)
matchingSegments = (
self.connections.segmentForFlatIdx(i)
for i in xrange(len(numActivePotential))
if numActivePotential[i] >= self.minThreshold
)
self.activeSegments = sorted(activeSegments,
key=self.connections.segmentPositionSortKey)
self.matchingSegments = sorted(matchingSegments,
key=self.connections.segmentPositionSortKey)
self.numActiveConnectedSynapsesForSegment = numActiveConnected
self.numActivePotentialSynapsesForSegment = numActivePotential
if learn:
for segment in self.activeSegments:
self.lastUsedIterationForSegment[segment.flatIdx] = self.iteration
self.iteration += 1
|
Calculate dendrite segment activity, using the current active cells.
:param learn: (bool) If true, segment activations will be recorded. This
information is used during segment cleanup.
**Pseudocode:**
::
for each distal dendrite segment with activity >= activationThreshold
mark the segment as active
for each distal dendrite segment with unconnected activity >= minThreshold
mark the segment as matching
|
def instruction_to_svg_dict(self, instruction_or_id, copy_result=True):
instruction_id = self.get_instruction_id(instruction_or_id)
if instruction_id in self._cache:
result = self._cache[instruction_id]
else:
result = self._instruction_to_svg_dict(instruction_id)
self._cache[instruction_id] = result
if copy_result:
result = deepcopy(result)
return result
|
Return the SVG dict for the SVGBuilder.
:param instruction_or_id: the instruction or id, see
:meth:`get_instruction_id`
:param bool copy_result: whether to copy the result
:rtype: dict
The result is cached.
|
def filter_list(lst, pattern):
if is_fnmatch_regex(pattern) and not is_regex(pattern):
log.info('Using fnmatch for {0}'.format(pattern))
filst = fnmatch.filter(lst, pattern)
else:
log.info('Using regex match for {0}'.format(pattern))
filst = match_list(lst, pattern)
if filst:
filst.sort()
return filst
|
Filters the lst using pattern.
If pattern starts with '(' it will be considered a re regular expression,
otherwise it will use fnmatch filter.
:param lst: list of strings
:param pattern: string
:return: list of strings
Filtered list of strings
|
def send_command(self, command):
with self._lock:
try:
self._socket.send(command.encode("utf8"))
result = self.receive()
while result.startswith("S") or result.startswith("NEW"):
_LOGGER.debug("!Got response: %s", result)
result = self.receive()
_LOGGER.debug("Received: %s", result)
return result
except socket.error as error:
_LOGGER.error("Error sending command: %s", error)
self.connect()
return ""
|
Send TCP command to hub and return response.
|
def restart(self):
n = 60
sleep_n = int(self.env.max_restart_wait_minutes/10.*60)
for _ in xrange(n):
self.stop()
if self.dryrun or not self.is_running():
break
print('Waiting for supervisor to stop (%i of %i)...' % (_, n))
time.sleep(sleep_n)
self.start()
for _ in xrange(n):
if self.dryrun or self.is_running():
return
print('Waiting for supervisor to start (%i of %i)...' % (_, n))
time.sleep(sleep_n)
raise Exception('Failed to restart service %s!' % self.name)
|
Supervisor can take a very long time to start and stop,
so wait for it.
|
def list(self, ignore_patterns):
for storage in six.itervalues(self.storages):
if storage.exists(''):
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
|
List all files in all app storages.
|
def __getOptimizedMetricLabel(self):
matchingKeys = matchPatterns([self._optimizeKeyPattern],
self._getMetricLabels())
if len(matchingKeys) == 0:
raise Exception("None of the generated metrics match the specified "
"optimization pattern: %s. Available metrics are %s" % \
(self._optimizeKeyPattern, self._getMetricLabels()))
elif len(matchingKeys) > 1:
raise Exception("The specified optimization pattern '%s' matches more "
"than one metric: %s" % (self._optimizeKeyPattern, matchingKeys))
return matchingKeys[0]
|
Get the label for the metric being optimized. This function also caches
the label in the instance variable self._optimizedMetricLabel
Parameters:
-----------------------------------------------------------------------
metricLabels: A sequence of all the labels being computed for this model
Returns: The label for the metric being optmized over
|
def to_shapely_polygon(self):
import shapely.geometry
return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
|
Convert this polygon to a Shapely polygon.
Returns
-------
shapely.geometry.Polygon
The Shapely polygon matching this polygon's exterior.
|
def valid(self, time: int = None) -> bool:
if time is None:
epoch = datetime(1970, 1, 1, 0, 0, 0)
now = datetime.utcnow()
time = int((now - epoch).total_seconds())
if isinstance(self.valid_from, int) and time < self.valid_from:
return False
if isinstance(self.valid_to, int) and time > self.valid_to:
return False
return True
|
Is the token valid? This method only checks the timestamps within the
token and compares them against the current time if none is provided.
:param time: The timestamp to validate against
:type time: Union[int, None]
:return: The validity of the token.
:rtype: bool
|
def get_all_checkpoints(rundir="runinfo"):
if(not os.path.isdir(rundir)):
return []
dirs = sorted(os.listdir(rundir))
checkpoints = []
for runid in dirs:
checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
if os.path.isdir(checkpoint):
checkpoints.append(checkpoint)
return checkpoints
|
Finds the checkpoints from all last runs.
Note that checkpoints are incremental, and this helper will not find
previous checkpoints from earlier than the most recent run. It probably
should be made to do so.
Kwargs:
- rundir(str) : Path to the runinfo directory
Returns:
- a list suitable for the checkpointFiles parameter of DataFlowKernel
constructor
|
def create_body_index(xml_string):
xml = ET.fromstring(xml_string)
body_to_index = {}
for index, body in enumerate(xml.findall("*/Body/Name")):
body_to_index[body.text.strip()] = index
return body_to_index
|
Extract a name to index dictionary from 6dof settings xml
|
def dragMouseButtonLeft(self, coord, dest_coord, interval=0.5):
modFlags = 0
self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,
dest_coord=dest_coord)
self._postQueuedEvents(interval=interval)
|
Drag the left mouse button without modifiers pressed.
Parameters: coordinates to click on screen (tuple (x, y))
dest coordinates to drag to (tuple (x, y))
interval to send event of btn down, drag and up
Returns: None
|
def copy_web_file_to_local(file_path, target_path):
response = urllib.request.urlopen(file_path)
f = open(target_path, 'w')
f.write(response.read())
f.close()
|
Copies a file from its location on the web to a designated
place on the local machine.
Args:
file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css).
target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css)
Returns:
None.
|
def task_or_dryrun(*args, **kwargs):
invoked = bool(not args or kwargs)
task_class = kwargs.pop("task_class", WrappedCallableTask)
func, args = args[0], ()
def wrapper(func):
return task_class(func, *args, **kwargs)
wrapper.is_task_or_dryrun = True
wrapper.wrapped = func
return wrapper if invoked else wrapper(func)
|
Decorator declaring the wrapped function to be a new-style task.
May be invoked as a simple, argument-less decorator (i.e. ``@task``) or
with arguments customizing its behavior (e.g. ``@task(alias='myalias')``).
Please see the :ref:`new-style task <task-decorator>` documentation for
details on how to use this decorator.
.. versionchanged:: 1.2
Added the ``alias``, ``aliases``, ``task_class`` and ``default``
keyword arguments. See :ref:`task-decorator-arguments` for details.
.. versionchanged:: 1.5
Added the ``name`` keyword argument.
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.tasks.WrappedCallableTask`
|
def save_training_log(self, **kwargs):
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.TrainLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] train log: " + _log)
|
Saves the training log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_training_log(accuracy=0.33, loss=0.98)
|
def is_fully_within_image(self, image):
shape = normalize_shape(image)
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
|
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
|
def get_object(self, name):
parts = name.split(".")
model_name = parts.pop(0)
return self.models[model_name].get_object(".".join(parts))
|
Retrieve an object by its absolute name.
|
def bootstrap(self, force=0):
force = int(force)
if self.has_pip() and not force:
return
r = self.local_renderer
if r.env.bootstrap_method == GET_PIP:
r.sudo('curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | python')
elif r.env.bootstrap_method == EZ_SETUP:
r.run('wget http://peak.telecommunity.com/dist/ez_setup.py -O /tmp/ez_setup.py')
with self.settings(warn_only=True):
r.sudo('python /tmp/ez_setup.py -U setuptools')
r.sudo('easy_install -U pip')
elif r.env.bootstrap_method == PYTHON_PIP:
r.sudo('apt-get install -y python-pip')
else:
raise NotImplementedError('Unknown pip bootstrap method: %s' % r.env.bootstrap_method)
r.sudo('pip {quiet_flag} install --upgrade pip')
r.sudo('pip {quiet_flag} install --upgrade virtualenv')
|
Installs all the necessary packages necessary for managing virtual
environments with pip.
|
def tf_import_demo_experience(self, states, internals, actions, terminal, reward):
return self.demo_memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
|
Imports a single experience to memory.
|
def aov_theta(times, mags, errs, frequency,
binsize=0.05, minbin=9):
period = 1.0/frequency
fold_time = times[0]
phased = phase_magseries(times,
mags,
period,
fold_time,
wrap=False,
sort=True)
phases = phased['phase']
pmags = phased['mags']
bins = nparange(0.0, 1.0, binsize)
ndets = phases.size
binnedphaseinds = npdigitize(phases, bins)
bin_s1_tops = []
bin_s2_tops = []
binndets = []
goodbins = 0
all_xbar = npmedian(pmags)
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_mags = pmags[thisbin_inds]
if thisbin_mags.size > minbin:
thisbin_ndet = thisbin_mags.size
thisbin_xbar = npmedian(thisbin_mags)
thisbin_s1_top = (
thisbin_ndet *
(thisbin_xbar - all_xbar) *
(thisbin_xbar - all_xbar)
)
thisbin_s2_top = npsum((thisbin_mags - all_xbar) *
(thisbin_mags - all_xbar))
bin_s1_tops.append(thisbin_s1_top)
bin_s2_tops.append(thisbin_s2_top)
binndets.append(thisbin_ndet)
goodbins = goodbins + 1
bin_s1_tops = nparray(bin_s1_tops)
bin_s2_tops = nparray(bin_s2_tops)
binndets = nparray(binndets)
s1 = npsum(bin_s1_tops)/(goodbins - 1.0)
s2 = npsum(bin_s2_tops)/(ndets - goodbins)
theta_aov = s1/s2
return theta_aov
|
Calculates the Schwarzenberg-Czerny AoV statistic at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_aov : float
The value of the AoV statistic at the specified `frequency`.
|
def to_json(self):
if self.subreference is not None:
return {
"source": self.objectId,
"selector": {
"type": "FragmentSelector",
"conformsTo": "http://ontology-dts.org/terms/subreference",
"value": self.subreference
}
}
else:
return {"source": self.objectId}
|
Method to call to get a serializable object for json.dump or jsonify based on the target
:return: dict
|
def mark_all_read(user):
BackendClass = stored_messages_settings.STORAGE_BACKEND
backend = BackendClass()
backend.inbox_purge(user)
|
Mark all message instances for a user as read.
:param user: user instance for the recipient
|
def get_track_by_id(session, track_id, track_point_limit=None, track_point_offset=None):
tracking_data = {}
if track_point_limit:
tracking_data['track_point_limit'] = track_point_limit
if track_point_offset:
tracking_data['track_point_offset'] = track_point_offset
response = make_get_request(session, 'tracks/{}'.format(track_id),
params_data=tracking_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise TrackNotFoundException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'])
|
Gets a specific track
|
def get_media_timestamp(self, last_timestamp=None):
r = self.local_renderer
_latest_timestamp = -1e9999999999999999
for path in self.iter_static_paths():
path = r.env.static_root + '/' + path
self.vprint('checking timestamp of path:', path)
if not os.path.isfile(path):
continue
_latest_timestamp = max(_latest_timestamp, get_last_modified_timestamp(path) or _latest_timestamp)
if last_timestamp is not None and _latest_timestamp > last_timestamp:
break
self.vprint('latest_timestamp:', _latest_timestamp)
return _latest_timestamp
|
Retrieves the most recent timestamp of the media in the static root.
If last_timestamp is given, retrieves the first timestamp more recent than this value.
|
def get_var(data, var_name, not_found=None):
try:
for key in str(var_name).split('.'):
try:
data = data[key]
except TypeError:
data = data[int(key)]
except (KeyError, TypeError, ValueError):
return not_found
else:
return data
|
Gets variable value from data dictionary.
|
def start(self):
self.receiver = self.Receiver(
self.read,
self.write,
self.send_lock,
self.senders,
self.frames_received,
callback=self.receive_callback,
fcs_nack=self.fcs_nack,
)
self.receiver.start()
|
Starts HDLC controller's threads.
|
def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
basePath = permWorkDir
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath
|
Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.