code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def task_done(self):
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
|
Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
|
def _load_images_and_labels(self, images, labels=None):
if not isinstance(images, (list, tuple)):
raise ValueError('Expected an iterable (list or tuple) of strings or img-like objects. '
'Got a {}.'.format(type(images)))
if not len(images) > 0:
raise ValueError('Expected an iterable (list or tuple) of strings or img-like objects '
'of size higher than 0. Got {} items.'.format(len(images)))
if labels is not None and len(labels) != len(images):
raise ValueError('Expected the same length for image set ({}) and '
'labels list ({}).'.format(len(images), len(labels)))
first_file = images[0]
if first_file:
first_img = NeuroImage(first_file)
else:
raise('Error reading image {}.'.format(repr_imgs(first_file)))
for idx, image in enumerate(images):
try:
img = NeuroImage(image)
self.check_compatibility(img, first_img)
except:
log.exception('Error reading image {}.'.format(repr_imgs(image)))
raise
else:
self.items.append(img)
self.set_labels(labels)
|
Read the images, load them into self.items and set the labels.
|
def db_value(self, value):
if not isinstance(value, UUID):
value = UUID(value)
parts = str(value).split("-")
reordered = ''.join([parts[2], parts[1], parts[0], parts[3], parts[4]])
value = binascii.unhexlify(reordered)
return super(OrderedUUIDField, self).db_value(value)
|
Convert UUID to binary blob
|
def __getLogger(cls):
if cls.__logger is None:
cls.__logger = opf_utils.initLogger(cls)
return cls.__logger
|
Get the logger for this object.
:returns: (Logger) A Logger object.
|
def get_compound_mfr(self, compound):
if compound in self.material.compounds:
return self._compound_mfrs[
self.material.get_compound_index(compound)]
else:
return 0.0
|
Determine the mass flow rate of the specified compound in the stream.
:param compound: Formula and phase of a compound, e.g. "Fe2O3[S1]".
:returns: Mass flow rate. [kg/h]
|
def join(self, shape, body_a, body_b=None, name=None, **kwargs):
ba = self.get_body(body_a)
bb = self.get_body(body_b)
shape = shape.lower()
if name is None:
name = '{}^{}^{}'.format(ba.name, shape, bb.name if bb else '')
self._joints[name] = Joint.build(
shape, name, self, body_a=ba, body_b=bb, **kwargs)
return self._joints[name]
|
Create a new joint that connects two bodies together.
Parameters
----------
shape : str
The "shape" of the joint to use for joining together two bodies.
This should name a type of joint, such as "ball" or "piston".
body_a : str or :class:`Body`
The first body to join together with this joint. If a string is
given, it will be used as the name of a body to look up in the
world.
body_b : str or :class:`Body`, optional
If given, identifies the second body to join together with
``body_a``. If not given, ``body_a`` is joined to the world.
name : str, optional
If given, use this name for the created joint. If not given, a name
will be constructed of the form
"{body_a.name}^{shape}^{body_b.name}".
Returns
-------
joint : :class:`Joint`
The joint object that was created.
|
def _get_error(self, stanza):
if stanza:
logger.debug(u"Roster request failed: {0}".format(
stanza.error.condition_name))
else:
logger.debug(u"Roster request failed: timeout")
self._event_queue.put(RosterNotReceivedEvent(self, stanza))
|
Handle failure of the roster request.
|
def add_observer(self, callback):
if callback in self._observers:
raise ValueError('{} is already an observer of {}'
.format(callback, self))
self._observers.append(callback)
|
Add an observer to this event.
Args:
callback: A function or coroutine callback to call when the event
is fired.
Raises:
ValueError: If the callback has already been added.
|
def nupicBindingsPrereleaseInstalled():
try:
nupicDistribution = pkg_resources.get_distribution("nupic.bindings")
if pkg_resources.parse_version(nupicDistribution.version).is_prerelease:
return True
except pkg_resources.DistributionNotFound:
pass
return False
|
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
|
def do_apply(mutation_pk, dict_synonyms, backup):
filename, mutation_id = filename_and_mutation_id_from_pk(int(mutation_pk))
update_line_numbers(filename)
context = Context(
mutation_id=mutation_id,
filename=filename,
dict_synonyms=dict_synonyms,
)
mutate_file(
backup=backup,
context=context,
)
if context.number_of_performed_mutations == 0:
raise RuntimeError('No mutations performed.')
|
Apply a specified mutant to the source code
:param mutation_pk: mutmut cache primary key of the mutant to apply
:type mutation_pk: str
:param dict_synonyms: list of synonym keywords for a python dictionary
:type dict_synonyms: list[str]
:param backup: if :obj:`True` create a backup of the source file
before applying the mutation
:type backup: bool
|
def get_params(self, param=""):
fullcurdir = os.path.realpath(os.path.curdir)
if not param:
for index, (key, value) in enumerate(self.paramsdict.items()):
if isinstance(value, str):
value = value.replace(fullcurdir+"/", "./")
sys.stdout.write("{}{:<4}{:<28}{:<45}\n"\
.format(self._spacer, index, key, value))
else:
try:
if int(param):
return self.paramsdict.values()[int(param)]
except (ValueError, TypeError, NameError, IndexError):
try:
return self.paramsdict[param]
except KeyError:
return 'key not recognized'
|
pretty prints params if called as a function
|
def concatenate_textlcs_for_objectid(lcbasedir,
objectid,
aperture='TF1',
postfix='.gz',
sortby='rjd',
normalize=True,
recursive=True):
LOGINFO('looking for light curves for %s, aperture %s in directory: %s'
% (objectid, aperture, lcbasedir))
if recursive is False:
matching = glob.glob(os.path.join(lcbasedir,
'*%s*%s*%s' % (objectid,
aperture,
postfix)))
else:
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcbasedir,
'**',
'*%s*%s*%s' % (objectid,
aperture,
postfix)),
recursive=True)
LOGINFO('found %s files: %s' % (len(matching), repr(matching)))
else:
walker = os.walk(lcbasedir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
'*%s*%s*%s' % (objectid,
aperture,
postfix))
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO(
'found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir))
)
if matching and len(matching) > 0:
clcdict = concatenate_textlcs(matching,
sortby=sortby,
normalize=normalize)
return clcdict
else:
LOGERROR('did not find any light curves for %s and aperture %s' %
(objectid, aperture))
return None
|
This concatenates all text LCs for an objectid with the given aperture.
Does not care about overlaps or duplicates. The light curves must all be
from the same aperture.
The intended use is to concatenate light curves across CCDs or instrument
changes for a single object. These can then be normalized later using
standard astrobase tools to search for variablity and/or periodicity.
lcbasedir is the directory to start searching in.
objectid is the object to search for.
aperture is the aperture postfix to use: (TF1 = aperture 1,
TF2 = aperture 2,
TF3 = aperture 3)
sortby is a column to sort the final concatenated light curve by in
ascending order.
If normalize is True, then each light curve's magnitude columns are
normalized to zero, and the whole light curve is then normalized to the
global median magnitude for each magnitude column.
If recursive is True, then the function will search recursively in lcbasedir
for any light curves matching the specified criteria. This may take a while,
especially on network filesystems.
The returned lcdict has an extra column: 'lcn' that tracks which measurement
belongs to which input light curve. This can be used with
lcdict['concatenated'] which relates input light curve index to input light
curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that
contains the total number of concatenated light curves.
|
def get_request(self, request_id, status=False):
if status:
response = self._perform_request(
'/requests/' + request_id + '/status')
else:
response = self._perform_request(
'/requests/%s' % request_id)
return response
|
Retrieves a single request by ID.
:param request_id: The unique ID of the request.
:type request_id: ``str``
:param status: Retreive the full status of the request.
:type status: ``bool``
|
def Watson(T, Hvap_ref, T_Ref, Tc, exponent=0.38):
Tr = T/Tc
Trefr = T_Ref/Tc
H2 = Hvap_ref*((1-Tr)/(1-Trefr))**exponent
return H2
|
Adjusts enthalpy of vaporization of enthalpy for another temperature, for one temperature.
|
def model_uncert(self):
Y = self.photometry_array.T
Y /= np.median(Y, axis=1)[:, None]
C = np.median(Y, axis=0)
nstars, nobs = np.shape(Y)
Z = np.empty((nstars, 4))
qs = self.qs.astype(int)
for s in range(4):
Z[:, s] = np.median((Y / C)[:, qs == s], axis=1)
resid2 = (Y - Z[:, qs] * C)**2
z = Z[:, qs]
trend = z * C[None, :]
lnS = np.log(np.nanmedian(resid2, axis=0))
jitter = np.log(0.1*np.nanmedian(np.abs(np.diff(Y, axis=1))))
cal_ferr = np.sqrt(np.exp(2*(jitter/trend))+z**2*np.exp(lnS)[None, :])
self.modeled_uncert = cal_ferr
self.target_uncert = cal_ferr[0]
|
Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper.
Based on the kepcal package of Dan Foreman-Mackey.
|
def send(self, msg):
slipDriver = sliplib.Driver()
slipData = slipDriver.send(msg)
res = self._serialPort.write(slipData)
return res
|
Encodes data to slip protocol and then sends over serial port
Uses the SlipLib module to convert the message data into SLIP format.
The message is then sent over the serial port opened with the instance
of the Faraday class used when invoking send().
Args:
msg (bytes): Bytes format message to send over serial port.
Returns:
int: Number of bytes transmitted over the serial port.
|
def path_from_keywords(keywords,into='path'):
subdirs = []
def prepare_string(s):
s = str(s)
s = re.sub('[][{},*"'+f"'{os.sep}]",'_',s)
if into=='file':
s = s.replace('_', ' ')
if ' ' in s:
s = s.title()
s = s.replace(' ','')
return s
if isinstance(keywords,set):
keywords_list = sorted(keywords)
for property in keywords_list:
subdirs.append(prepare_string(property))
else:
keywords_list = sorted(keywords.items())
for property,value in keywords_list:
if Bool.valid(value):
subdirs.append(('' if value else ('not_' if into=='path' else 'not'))+prepare_string(property))
elif (Float|Integer).valid(value):
subdirs.append('{}{}'.format(prepare_string(property),prepare_string(value)))
else:
subdirs.append('{}{}{}'.format(prepare_string(property),'_' if into == 'path' else '',prepare_string(value)))
if into == 'path':
out = os.path.join(*subdirs)
else:
out = '_'.join(subdirs)
return out
|
turns keyword pairs into path or filename
if `into=='path'`, then keywords are separted by underscores, else keywords are used to create a directory hierarchy
|
def webhook_handler(*event_types):
event_types_to_register = set()
for event_type in event_types:
event_type = event_type.lower()
if "*" in event_type:
for t in WEBHOOK_EVENT_TYPES:
if fnmatch(t, event_type):
event_types_to_register.add(t)
elif event_type not in WEBHOOK_EVENT_TYPES:
raise ValueError("Unknown webhook event: %r" % (event_type))
else:
event_types_to_register.add(event_type)
def decorator(func):
for event_type in event_types_to_register:
WEBHOOK_SIGNALS[event_type].connect(func)
return func
return decorator
|
Decorator that registers a function as a webhook handler.
Usage examples:
>>> # Hook a single event
>>> @webhook_handler("payment.sale.completed")
>>> def on_payment_received(event):
>>> payment = event.get_resource()
>>> print("Received payment:", payment)
>>> # Multiple events supported
>>> @webhook_handler("billing.subscription.suspended", "billing.subscription.cancelled")
>>> def on_subscription_stop(event):
>>> subscription = event.get_resource()
>>> print("Stopping subscription:", subscription)
>>> # Using a wildcard works as well
>>> @webhook_handler("billing.subscription.*")
>>> def on_subscription_update(event):
>>> subscription = event.get_resource()
>>> print("Updated subscription:", subscription)
|
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
|
Configure environment for DeepMind-style Atari.
|
def GetConsoleTitle() -> str:
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.kernel32.GetConsoleTitleW(values, MAX_PATH)
return values.value
|
GetConsoleTitle from Win32.
Return str.
|
def from_rectilinear(cls, x, y, z, formatter=numpy_formatter):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = np.ma.asarray(z, dtype=np.float64)
if x.ndim != 1:
raise TypeError(
"'x' must be a 1D array but is a {:d}D array".format(x.ndim))
if y.ndim != 1:
raise TypeError(
"'y' must be a 1D array but is a {:d}D array".format(y.ndim))
if z.ndim != 2:
raise TypeError(
"'z' must be a 2D array but it a {:d}D array".format(z.ndim))
if x.size != z.shape[1]:
raise TypeError(
("the length of 'x' must be equal to the number of columns in "
"'z' but the length of 'x' is {:d} and 'z' has {:d} "
"columns").format(x.size, z.shape[1]))
if y.size != z.shape[0]:
raise TypeError(
("the length of 'y' must be equal to the number of rows in "
"'z' but the length of 'y' is {:d} and 'z' has {:d} "
"rows").format(y.size, z.shape[0]))
y, x = np.meshgrid(y, x, indexing='ij')
return cls(x, y, z, formatter)
|
Construct a contour generator from a rectilinear grid.
Parameters
----------
x : array_like
x coordinates of each column of `z`. Must be the same length as
the number of columns in `z`. (len(x) == z.shape[1])
y : array_like
y coordinates of each row of `z`. Must be the same length as the
number of columns in `z`. (len(y) == z.shape[0])
z : array_like
The 2-dimensional rectilinear grid of data to compute contours for.
Masked arrays are supported.
formatter : callable
A conversion function to convert from the internal `Matplotlib`_
contour format to an external format. See :ref:`formatters` for
more information.
Returns
-------
: :class:`QuadContourGenerator`
Initialized contour generator.
|
def create(self, *args, **kwargs):
data = self.get_data('floating_ips/',
type=POST,
params={'droplet_id': self.droplet_id})
if data:
self.ip = data['floating_ip']['ip']
self.region = data['floating_ip']['region']
return self
|
Creates a FloatingIP and assigns it to a Droplet.
Note: Every argument and parameter given to this method will be
assigned to the object.
Args:
droplet_id: int - droplet id
|
def send_message(self, channel, text):
if isinstance(channel, SlackIM) or isinstance(channel, SlackUser):
self._bot.send_im(channel, text)
elif isinstance(channel, SlackRoom):
self._bot.send_message(channel, text)
elif isinstance(channel, basestring):
if channel[0] == '@':
self._bot.send_im(channel[1:], text)
elif channel[0] == '
self._bot.send_message(channel[1:], text)
else:
self._bot.send_message(channel, text)
else:
self._bot.send_message(channel, text)
|
Used to send a message to the specified channel.
* channel - can be a channel or user
* text - message to send
|
def enterabs(self, time, priority, action, argument):
event = Event(time, priority, action, argument)
heapq.heappush(self._queue, event)
return event
|
Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
|
def update_loadbalancer(self, datacenter_id,
loadbalancer_id, **kwargs):
data = {}
for attr, value in kwargs.items():
data[self._underscore_to_camelcase(attr)] = value
response = self._perform_request(
url='/datacenters/%s/loadbalancers/%s' % (datacenter_id,
loadbalancer_id),
method='PATCH',
data=json.dumps(data))
return response
|
Updates a load balancer
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer_id: The unique ID of the load balancer.
:type loadbalancer_id: ``str``
|
def check_completion(self):
terminate = False
term_dict = self.get_termination_stats(get_cos=self.costol is not None)
terminate |= np.all(np.abs(term_dict['delta_vals']) < self.paramtol)
terminate |= (term_dict['delta_err'] < self.errtol)
terminate |= (term_dict['exp_err'] < self.exptol)
terminate |= (term_dict['frac_err'] < self.fractol)
if self.costol is not None:
terminate |= (curcos < term_dict['model_cosine'])
return terminate
|
Returns a Bool of whether the algorithm has found a satisfactory minimum
|
def save_vocab(count=None, name='vocab.txt'):
if count is None:
count = []
pwd = os.getcwd()
vocabulary_size = len(count)
with open(os.path.join(pwd, name), "w") as f:
for i in xrange(vocabulary_size):
f.write("%s %d\n" % (tf.compat.as_text(count[i][0]), count[i][1]))
tl.logging.info("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd))
|
Save the vocabulary to a file so the model can be reloaded.
Parameters
----------
count : a list of tuple and list
count[0] is a list : the number of rare words,
count[1:] are tuples : the number of occurrence of each word,
e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
Examples
---------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> tl.nlp.save_vocab(count, name='vocab_text8.txt')
>>> vocab_text8.txt
UNK 418391
the 1061396
of 593677
and 416629
one 411764
in 372201
a 325873
to 316376
|
def subscribe(self, event, bet_ids):
if not self._subscriptions.get(event):
self._subscriptions[event] = set()
self._subscriptions[event] = self._subscriptions[event].union(bet_ids)
|
Subscribe to event for given bet ids.
|
def ensure_app_data_dir(appname, *args):
from ubelt import util_path
dpath = get_app_data_dir(appname, *args)
util_path.ensuredir(dpath)
return dpath
|
Calls `get_app_data_dir` but ensures the directory exists.
Args:
appname (str): the name of the application
*args: any other subdirectories may be specified
SeeAlso:
get_app_data_dir
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_data_dir('ubelt')
>>> assert exists(dpath)
|
def foex(a, b):
return (np.sum(a > b, dtype=float) / len(a) - 0.5) * 100
|
Returns the factor of exceedance
|
def set_date_from_event(self, event, issue):
if not event.get('commit_id', None):
issue['actual_date'] = timestring_to_datetime(issue['closed_at'])
return
try:
commit = self.fetcher.fetch_commit(event)
issue['actual_date'] = timestring_to_datetime(
commit['author']['date']
)
except ValueError:
print("WARNING: Can't fetch commit {0}. "
"It is probably referenced from another repo.".
format(event['commit_id']))
issue['actual_date'] = timestring_to_datetime(issue['closed_at'])
|
Set closed date from this issue.
:param dict event: event data
:param dict issue: issue data
|
def _add_to_conf(self, new_conf):
for section in new_conf:
if section not in self.conf:
self.conf[section] = new_conf[section]
else:
for param in new_conf[section]:
self.conf[section][param] = new_conf[section][param]
|
Add new configuration to self.conf.
Adds configuration parameters in new_con to self.conf.
If they already existed in conf, overwrite them.
:param new_conf: new configuration, to add
|
def validated_formatter(self, url_format):
valid_parameters = {
"${CLUSTER}": "cluster",
"${ENVIRON}": "environ",
"${TOPOLOGY}": "topology",
"${ROLE}": "role",
"${USER}": "user",
}
dummy_formatted_url = url_format
for key, value in valid_parameters.items():
dummy_formatted_url = dummy_formatted_url.replace(key, value)
if '$' in dummy_formatted_url:
raise Exception("Invalid viz.url.format: %s" % (url_format))
return url_format
|
validate visualization url format
|
def exists(self, pk):
conn = self._get_connection()
key = self._get_key_for_id(pk)
return conn.exists(key)
|
exists - Tests whether a record holding the given primary key exists.
@param pk - Primary key (see getPk method)
Example usage: Waiting for an object to be deleted without fetching the object or running a filter.
This is a very cheap operation.
@return <bool> - True if object with given pk exists, otherwise False
|
def _write_callback(connection_id, data_buffer, data_length_pointer):
try:
self = _connection_refs.get(connection_id)
if not self:
socket = _socket_refs.get(connection_id)
else:
socket = self._socket
if not self and not socket:
return 0
data_length = deref(data_length_pointer)
data = bytes_from_buffer(data_buffer, data_length)
if self and not self._done_handshake:
self._client_hello += data
error = None
try:
sent = socket.send(data)
except (socket_.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedNoNotify
return SecurityConst.errSSLClosedAbort
if sent != data_length:
pointer_set(data_length_pointer, sent)
return SecurityConst.errSSLWouldBlock
return 0
except (KeyboardInterrupt) as e:
self._exception = e
return SecurityConst.errSSLPeerUserCancelled
|
Callback called by Secure Transport to actually write to the socket
:param connection_id:
An integer identifing the connection
:param data_buffer:
A char pointer FFI type containing the data to write
:param data_length_pointer:
A size_t pointer FFI type of the amount of data to write. Will be
overwritten with the amount of data actually written on return.
:return:
An integer status code of the result - 0 for success
|
def GetHeaderGuardCPPVariable(filename):
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
suffix = os.sep
if suffix == '\\':
suffix += '\\'
file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root)
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
|
Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
|
async def run_tasks(self):
tasks = self.get_tasks()
self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop)
try:
await self._gathered_tasks
except CancelledError:
pass
|
Run the tasks attached to the instance
|
def set_item(filename, item):
with atomic_write(os.fsencode(str(filename))) as temp_file:
with open(os.fsencode(str(filename))) as products_file:
products_data = json.load(products_file)
uuid_list = [i for i in filter(
lambda z: z["uuid"] == str(item["uuid"]), products_data)]
if len(uuid_list) == 0:
products_data.append(item)
json.dump(products_data, temp_file)
return True
return None
|
Save entry to JSON file
|
def unread_events(self):
return [conv_event for conv_event in self._events
if conv_event.timestamp > self.latest_read_timestamp]
|
Loaded events which are unread sorted oldest to newest.
Some Hangouts clients don't update the read timestamp for certain event
types, such as membership changes, so this may return more unread
events than these clients will show. There's also a delay between
sending a message and the user's own message being considered read.
(list of :class:`.ConversationEvent`).
|
def p_file_lic_conc(self, f_term, predicate):
try:
for _, _, licenses in self.graph.triples((f_term, predicate, None)):
if (licenses, RDF.type, self.spdx_namespace['ConjunctiveLicenseSet']) in self.graph:
lics = self.handle_conjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics)
elif (licenses, RDF.type, self.spdx_namespace['DisjunctiveLicenseSet']) in self.graph:
lics = self.handle_disjunctive_list(licenses)
self.builder.set_concluded_license(self.doc, lics)
else:
try:
lics = self.handle_lics(licenses)
self.builder.set_concluded_license(self.doc, lics)
except SPDXValueError:
self.value_error('FILE_SINGLE_LICS', licenses)
except CardinalityError:
self.more_than_one_error('file {0}'.format(predicate))
|
Sets file licenses concluded.
|
def extract(self, other):
if type(other) is float or \
type(other) is numpy.float64 or \
type(other) is numpy.float32:
return self._extract_mass(other)
elif self._is_compound_mass_tuple(other):
return self._extract_compound_mass(other[0], other[1])
elif type(other) is str:
return self._extract_compound(other)
elif type(other) is Material:
return self._extract_material(other)
else:
raise TypeError("Invalid extraction argument.")
|
Extract 'other' from this package, modifying this package and
returning the extracted material as a new package.
:param other: Can be one of the following:
* float: A mass equal to other is extracted from self. Self is
reduced by other and the extracted package is returned as
a new package.
* tuple (compound, mass): The other tuple specifies the mass
of a compound to be extracted. It is extracted from self and
the extracted mass is returned as a new package.
* string: The 'other' string specifies the compound to be
extracted. All of the mass of that compound will be removed
from self and a new package created with it.
* Material: The 'other' material specifies the list of
compounds to extract.
:returns: New MaterialPackage object.
|
def getVector(self, tree, branchName):
if (tree, branchName) in self.__class__.addressDict:
return self.__class__.addressDict[(tree, branchName)]
itsVector = self._getVector(tree, branchName)
self.__class__.addressDict[(tree, branchName)] = itsVector
return itsVector
|
return the ROOT.vector object for the branch.
|
def wrap(text, width=70, **kwargs):
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
|
Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
|
def done_chomping(self) -> bool:
return self.tag in self.graph.nodes[self.target_node]
|
Determines if the algorithm is complete by checking if the target node of this analysis has been scored
yet. Because the algorithm removes edges when it gets stuck until it is un-stuck, it is always guaranteed to
finish.
:return: Is the algorithm done running?
|
def _handle_event(self, conv_event):
if not self._is_scrolling:
self.set_focus(conv_event.id_)
else:
self._modified()
|
Handle updating and scrolling when a new event is added.
Automatically scroll down to show the new text if the bottom is
showing. This allows the user to scroll up to read previous messages
while new messages are arriving.
|
def parse_alert(server_handshake_bytes):
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x15':
continue
if len(record_data) != 2:
return None
return (int_from_bytes(record_data[0:1]), int_from_bytes(record_data[1:2]))
return None
|
Parses the handshake for protocol alerts
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an 2-element tuple of integers:
0: 1 (warning) or 2 (fatal)
1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)
|
def _debug_off():
if _os.path.exists(__debugflag__):
_os.remove(__debugflag__)
__loglevel__ = "ERROR"
_LOGGER.info("debugging turned off")
_set_debug_dict(__loglevel__)
|
turns off debugging by removing hidden tmp file
|
async def _connect(self):
logger.debug("connecting to the stream")
await self.client.setup
if self.session is None:
self.session = self.client._session
kwargs = await self.client.headers.prepare_request(**self.kwargs)
request = self.client.error_handler(self.session.request)
return await request(timeout=0, **kwargs)
|
Connect to the stream
Returns
-------
asyncio.coroutine
The streaming response
|
def encode_observation(ob_space, placeholder):
if isinstance(ob_space, Discrete):
return tf.to_float(tf.one_hot(placeholder, ob_space.n))
elif isinstance(ob_space, Box):
return tf.to_float(placeholder)
elif isinstance(ob_space, MultiDiscrete):
placeholder = tf.cast(placeholder, tf.int32)
one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-1])]
return tf.concat(one_hots, axis=-1)
else:
raise NotImplementedError
|
Encode input in the way that is appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
placeholder: tf.placeholder observation input placeholder
|
def dist_eudex(src, tar, weights='exponential', max_length=8):
return Eudex().dist(src, tar, weights, max_length)
|
Return normalized Hamming distance between Eudex hashes of two terms.
This is a wrapper for :py:meth:`Eudex.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
max_length : int
The number of characters to encode as a eudex hash
Returns
-------
int
The normalized Eudex Hamming distance
Examples
--------
>>> round(dist_eudex('cat', 'hat'), 12)
0.062745098039
>>> round(dist_eudex('Niall', 'Neil'), 12)
0.000980392157
>>> round(dist_eudex('Colin', 'Cuilen'), 12)
0.004901960784
>>> round(dist_eudex('ATCG', 'TAGC'), 12)
0.197549019608
|
def trainTM(sequence, timeSteps, noiseLevel):
currentColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
predictedColumns = np.zeros(tm.numberOfColumns(), dtype="uint32")
ts = 0
for t in range(timeSteps):
tm.reset()
for k in range(4):
v = corruptVector(sequence[k][:], noiseLevel, sparseCols)
tm.compute(set(v[:].nonzero()[0].tolist()), learn=True)
activeColumnsIndices = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndices = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
currentColumns = [1 if i in activeColumnsIndices else 0 for i in range(tm.numberOfColumns())]
acc = accuracy(currentColumns, predictedColumns)
x.append(ts)
y.append(acc)
ts += 1
predictedColumns = [1 if i in predictedColumnIndices else 0 for i in range(tm.numberOfColumns())]
|
Trains the TM with given sequence for a given number of time steps and level of input
corruption
@param sequence (array) array whose rows are the input characters
@param timeSteps (int) number of time steps in which the TM will be presented with sequence
@param noiseLevel (float) amount of noise to be applied on the characters in the sequence
|
def _parse_document_id(elm_tree):
xpath = '//md:content-id/text()'
return [x for x in elm_tree.xpath(xpath, namespaces=COLLECTION_NSMAP)][0]
|
Given the parsed xml to an `ElementTree`,
parse the id from the content.
|
def onehot_like(a, index, value=1):
x = np.zeros_like(a)
x[index] = value
return x
|
Creates an array like a, with all values
set to 0 except one.
Parameters
----------
a : array_like
The returned one-hot array will have the same shape
and dtype as this array
index : int
The index that should be set to `value`
value : single value compatible with a.dtype
The value to set at the given index
Returns
-------
`numpy.ndarray`
One-hot array with the given value at the given
location and zeros everywhere else.
|
def fill(text, width=70, **kwargs):
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
|
Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
|
def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:
edge_filter = and_edge_predicates(edge_predicates)
for u, v in itt.product(graph, repeat=2):
if graph.has_edge(u, v) or not universe.has_edge(u, v):
continue
rs = defaultdict(list)
for key, data in universe[u][v].items():
if not edge_filter(universe, u, v, key):
continue
rs[data[RELATION]].append((key, data))
if 1 == len(rs):
relation = list(rs)[0]
for key, data in rs[relation]:
graph.add_edge(u, v, key=key, **data)
else:
log.debug('Multiple relationship types found between %s and %s', u, v)
|
Edges between entities in the sub-graph that pass the given filters.
:param universe: The full graph
:param graph: A sub-graph to find the upstream information
:param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool
|
def rand_blend_mask(shape, rand=rand.uniform(-10, 10), **kwargs):
z = rand(shape[0])
noise = snoise2dz((shape[1], shape[2]), z, **kwargs)
return noise
|
random blending masks
|
def refresh_access_token(self,):
logger.debug("REFRESHING TOKEN")
self.token_time = time.time()
credentials = {
'token_time': self.token_time
}
if self.oauth_version == 'oauth1':
self.access_token, self.access_token_secret = self.oauth.get_access_token(self.access_token, self.access_token_secret, params={"oauth_session_handle": self.session_handle})
credentials.update({
'access_token': self.access_token,
'access_token_secret': self.access_token_secret,
'session_handle': self.session_handle,
'token_time': self.token_time
})
else:
headers = self.generate_oauth2_headers()
raw_access = self.oauth.get_raw_access_token(data={"refresh_token": self.refresh_token, 'redirect_uri': self.callback_uri,'grant_type':'refresh_token'}, headers=headers)
credentials.update(self.oauth2_access_parser(raw_access))
return credentials
|
Refresh access token
|
def __set_baudrate(self, baud):
log.info('Changing communication to %s baud', baud)
self.__writeln(UART_SETUP.format(baud=baud))
time.sleep(0.1)
try:
self._port.setBaudrate(baud)
except AttributeError:
self._port.baudrate = baud
|
setting baudrate if supported
|
def template_heron_tools_hcl(cl_args, masters, zookeepers):
heron_tools_hcl_template = "%s/standalone/templates/heron_tools.template.hcl" \
% cl_args["config_path"]
heron_tools_hcl_actual = "%s/standalone/resources/heron_tools.hcl" \
% cl_args["config_path"]
single_master = masters[0]
template_file(heron_tools_hcl_template, heron_tools_hcl_actual,
{
"<zookeeper_host:zookeeper_port>": ",".join(
['%s' % zk if ":" in zk else '%s:2181' % zk for zk in zookeepers]),
"<heron_tracker_executable>": '"%s/heron-tracker"' % config.get_heron_bin_dir(),
"<heron_tools_hostname>": '"%s"' % get_hostname(single_master, cl_args),
"<heron_ui_executable>": '"%s/heron-ui"' % config.get_heron_bin_dir()
})
|
template heron tools
|
def is_temple_project():
if not os.path.exists(temple.constants.TEMPLE_CONFIG_FILE):
msg = 'No {} file found in repository.'.format(temple.constants.TEMPLE_CONFIG_FILE)
raise temple.exceptions.InvalidTempleProjectError(msg)
|
Raises `InvalidTempleProjectError` if repository is not a temple project
|
def start_api_and_rpc_workers(self):
pool = eventlet.GreenPool()
quark_rpc = self.serve_rpc()
pool.spawn(quark_rpc.wait)
pool.waitall()
|
Initializes eventlet and starts wait for workers to exit.
Spawns the workers returned from serve_rpc
|
def count_sources(edge_iter: EdgeIterator) -> Counter:
return Counter(u for u, _, _ in edge_iter)
|
Count the source nodes in an edge iterator with keys and data.
:return: A counter of source nodes in the iterable
|
def strokewidth(self, w=None):
if w is not None:
self._canvas.strokewidth = w
else:
return self._canvas.strokewidth
|
Set the stroke width.
:param w: Stroke width.
:return: If no width was specified then current width is returned.
|
def subseq(self, start, end):
return Fastq(self.id, self.seq[start:end], self.qual[start:end])
|
Returns Fastq object with the same name, of the bases from start to end, but not including end
|
def is_locked(self):
if self.provider.lock_manager is None:
return False
return self.provider.lock_manager.is_url_locked(self.get_ref_url())
|
Return True, if URI is locked.
|
def _review_all(self, launchers):
if self.launch_args is not None:
proceed = self.review_args(self.launch_args,
show_repr=True,
heading='Meta Arguments')
if not proceed: return False
reviewers = [self.review_args,
self.review_command,
self.review_launcher]
for (count, launcher) in enumerate(launchers):
if not all(reviewer(launcher) for reviewer in reviewers):
print("\n == Aborting launch ==")
return False
if len(launchers)!= 1 and count < len(launchers)-1:
skip_remaining = self.input_options(['Y', 'n','quit'],
'\nSkip remaining reviews?', default='y')
if skip_remaining == 'y': break
elif skip_remaining == 'quit': return False
if self.input_options(['y','N'], 'Execute?', default='n') != 'y':
return False
else:
return self._launch_all(launchers)
|
Runs the review process for all the launchers.
|
def contains(self, items, pad=0):
o = ((items >= self.l-pad) & (items < self.r+pad))
if len(o.shape) == 2:
o = o.all(axis=-1)
elif len(o.shape) == 1:
o = o.all()
return o
|
Test whether coordinates are contained within this tile.
Parameters
----------
items : ndarray [3] or [N, 3]
N coordinates to check are within the bounds of the tile
pad : integer or ndarray [3]
anisotropic padding to apply in the contain test
Examples
--------
>>> Tile(5, dim=2).contains([[-1, 0], [2, 3], [2, 6]])
array([False, True, False], dtype=bool)
|
def clean_attribute(attribute):
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
}.get(attribute, attribute)
if attribute[0] == '_':
attribute = attribute[1:]
if attribute in set(['http_equiv']) or attribute.startswith('data_'):
attribute = attribute.replace('_', '-').lower()
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute
|
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
|
def value(self):
r
from numpy_sugar.linalg import ddot, sum2diag
if self._cache["value"] is not None:
return self._cache["value"]
scale = exp(self.logscale)
delta = 1 / (1 + exp(-self.logitdelta))
v0 = scale * (1 - delta)
v1 = scale * delta
mu = self.eta / self.tau
n = len(mu)
if self._QS is None:
K = zeros((n, n))
else:
Q0 = self._QS[0][0]
S0 = self._QS[1]
K = dot(ddot(Q0, S0), Q0.T)
A = sum2diag(sum2diag(v0 * K, v1), 1 / self.tau)
m = mu - self.mean()
v = -n * log(2 * pi)
v -= slogdet(A)[1]
v -= dot(m, solve(A, m))
self._cache["value"] = v / 2
return self._cache["value"]
|
r"""Log of the marginal likelihood.
Formally,
.. math::
- \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left|
v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|}
- \frac{1}{2}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)^{\intercal}
\left( v_0 \mathrm K + v_1 \mathrm I +
\tilde{\Sigma} \right)^{-1}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)
Returns
-------
float
:math:`\log{p(\tilde{\boldsymbol\mu})}`
|
def get_data(self, **kwargs):
limit = int(kwargs.get('limit', 288))
end_date = kwargs.get('end_date', False)
if end_date and isinstance(end_date, datetime.datetime):
end_date = self.convert_datetime(end_date)
if self.mac_address is not None:
service_address = 'devices/%s' % self.mac_address
self.api_instance.log('SERVICE ADDRESS: %s' % service_address)
data = dict(limit=limit)
if end_date:
data.update({'endDate': end_date})
self.api_instance.log('DATA:')
self.api_instance.log(data)
return self.api_instance.api_call(service_address, **data)
|
Get the data for a specific device for a specific end date
Keyword Arguments:
limit - max 288
end_date - is Epoch in milliseconds
:return:
|
def delete(ctx):
user, project_name = get_project_or_local(ctx.obj.get('project'))
if not click.confirm("Are sure you want to delete project `{}/{}`".format(user, project_name)):
click.echo('Existing without deleting project.')
sys.exit(1)
try:
response = PolyaxonClient().project.delete_project(user, project_name)
local_project = ProjectManager.get_config()
if local_project and (user, project_name) == (local_project.user, local_project.name):
ProjectManager.purge()
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not delete project `{}/{}`.'.format(user, project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.status_code == 204:
Printer.print_success("Project `{}/{}` was delete successfully".format(user, project_name))
|
Delete project.
Uses [Caching](/references/polyaxon-cli/#caching)
|
def size(self):
try:
return self._stat.st_size
except:
self._stat = self.stat()
return self.size
|
File size in bytes.
|
def get_anonymization_salt(ts):
salt_key = 'stats:salt:{}'.format(ts.date().isoformat())
salt = current_cache.get(salt_key)
if not salt:
salt_bytes = os.urandom(32)
salt = b64encode(salt_bytes).decode('utf-8')
current_cache.set(salt_key, salt, timeout=60 * 60 * 24)
return salt
|
Get the anonymization salt based on the event timestamp's day.
|
def write8(self, register, value):
value = value & 0xFF
self._bus.write_byte_data(self._address, register, value)
self._logger.debug("Wrote 0x%02X to register 0x%02X",
value, register)
|
Write an 8-bit value to the specified register.
|
def get(http, path, root=METADATA_ROOT, recursive=None):
url = urlparse.urljoin(root, path)
url = _helpers._add_query_parameter(url, 'recursive', recursive)
response, content = transport.request(
http, url, headers=METADATA_HEADERS)
if response.status == http_client.OK:
decoded = _helpers._from_bytes(content)
if response['content-type'] == 'application/json':
return json.loads(decoded)
else:
return decoded
else:
raise http_client.HTTPException(
'Failed to retrieve {0} from the Google Compute Engine'
'metadata service. Response:\n{1}'.format(url, response))
|
Fetch a resource from the metadata server.
Args:
http: an object to be used to make HTTP requests.
path: A string indicating the resource to retrieve. For example,
'instance/service-accounts/default'
root: A string indicating the full path to the metadata server root.
recursive: A boolean indicating whether to do a recursive query of
metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents
Returns:
A dictionary if the metadata server returns JSON, otherwise a string.
Raises:
http_client.HTTPException if an error corrured while
retrieving metadata.
|
def _run_flup(app, config, mode):
if mode == "flup-fcgi":
from flup.server.fcgi import WSGIServer, __version__ as flupver
elif mode == "flup-fcgi-fork":
from flup.server.fcgi_fork import WSGIServer, __version__ as flupver
else:
raise ValueError
_logger.info(
"Running WsgiDAV/{} {}/{}...".format(
__version__, WSGIServer.__module__, flupver
)
)
server = WSGIServer(
app,
bindAddress=(config["host"], config["port"]),
)
try:
server.run()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return
|
Run WsgiDAV using flup.server.fcgi if Flup is installed.
|
def allow(self, roles, methods, with_children=True):
def decorator(view_func):
_methods = [m.upper() for m in methods]
for r, m, v in itertools.product(roles, _methods, [view_func.__name__]):
self.before_acl['allow'].append((r, m, v, with_children))
return view_func
return decorator
|
This is a decorator function.
You can allow roles to access the view func with it.
An example::
@app.route('/website/setting', methods=['GET', 'POST'])
@rbac.allow(['administrator', 'super_user'], ['GET', 'POST'])
def website_setting():
return Response('Setting page.')
:param roles: List, each name of roles. Please note that,
`anonymous` is refered to anonymous.
If you add `anonymous` to the rule,
everyone can access the resource,
unless you deny other roles.
:param methods: List, each name of methods.
methods is valid in ['GET', 'POST', 'PUT', 'DELETE']
:param with_children: Whether allow children of roles as well.
True by default.
|
def instance_for_arguments(self, arguments):
profiles = {**{key: value.instance_for_arguments(arguments)
for key, value
in self.profile_prior_model_dict.items()}, **self.constant_profile_dict}
try:
redshift = self.redshift.instance_for_arguments(arguments)
except AttributeError:
redshift = self.redshift
pixelization = self.pixelization.instance_for_arguments(arguments) \
if isinstance(self.pixelization, pm.PriorModel) \
else self.pixelization
regularization = self.regularization.instance_for_arguments(arguments) \
if isinstance(self.regularization, pm.PriorModel) \
else self.regularization
hyper_galaxy = self.hyper_galaxy.instance_for_arguments(arguments) \
if isinstance(self.hyper_galaxy, pm.PriorModel) \
else self.hyper_galaxy
return galaxy.Galaxy(redshift=redshift, pixelization=pixelization, regularization=regularization,
hyper_galaxy=hyper_galaxy, **profiles)
|
Create an instance of the associated class for a set of arguments
Parameters
----------
arguments: {Prior: value}
Dictionary mapping_matrix priors to attribute analysis_path and value pairs
Returns
-------
An instance of the class
|
def serialize(self, data, format=None):
return self._resource.serialize(data, response=self, format=format)
|
Serializes the data into this response using a serializer.
@param[in] data
The data to be serialized.
@param[in] format
A specific format to serialize in; if provided, no detection is
done. If not provided, the accept header (as well as the URL
extension) is looked at to determine an appropriate serializer.
@returns
A tuple of the serialized text and an instance of the
serializer used.
|
def aggregate(l):
tree = radix.Radix()
for item in l:
try:
tree.add(item)
except (ValueError) as err:
raise Exception("ERROR: invalid IP prefix: {}".format(item))
return aggregate_tree(tree).prefixes()
|
Aggregate a `list` of prefixes.
Keyword arguments:
l -- a python list of prefixes
Example use:
>>> aggregate(["10.0.0.0/8", "10.0.0.0/24"])
['10.0.0.0/8']
|
def courses(self, request, pk=None):
enterprise_customer = self.get_object()
self.check_object_permissions(request, enterprise_customer)
self.ensure_data_exists(
request,
enterprise_customer.catalog,
error_message="No catalog is associated with Enterprise {enterprise_name} from endpoint '{path}'.".format(
enterprise_name=enterprise_customer.name,
path=request.get_full_path()
)
)
catalog_api = CourseCatalogApiClient(request.user, enterprise_customer.site)
courses = catalog_api.get_paginated_catalog_courses(enterprise_customer.catalog, request.GET)
self.ensure_data_exists(
request,
courses,
error_message=(
"Unable to fetch API response for catalog courses for "
"Enterprise {enterprise_name} from endpoint '{path}'.".format(
enterprise_name=enterprise_customer.name,
path=request.get_full_path()
)
)
)
serializer = serializers.EnterpriseCatalogCoursesReadOnlySerializer(courses)
serializer.update_enterprise_courses(enterprise_customer, catalog_id=enterprise_customer.catalog)
return get_paginated_response(serializer.data, request)
|
Retrieve the list of courses contained within the catalog linked to this enterprise.
Only courses with active course runs are returned. A course run is considered active if it is currently
open for enrollment, or will open in the future.
|
def delete_user(self, user_id):
response = self._perform_request(
url='/um/users/%s' % user_id,
method='DELETE')
return response
|
Removes a user.
:param user_id: The unique ID of the user.
:type user_id: ``str``
|
def get_centroid_offsets(lcd, t_ing_egr, oot_buffer_time=0.1, sample_factor=3):
qnum = int(np.unique(lcd['quarter']))
LOGINFO('Getting centroid offsets (qnum: {:d})...'.format(qnum))
arcsec_per_px = 3.98
times = lcd['ctd_dtr']['times']
ctd_resid_x = lcd['ctd_dtr']['ctd_x'] - lcd['ctd_dtr']['fit_ctd_x']
ctd_resid_y = lcd['ctd_dtr']['ctd_y'] - lcd['ctd_dtr']['fit_ctd_y']
cd = {}
for ix,(t_ing,t_egr) in enumerate(t_ing_egr):
in_tra_times = times[(times > t_ing) & (times < t_egr)]
transit_dur = t_egr - t_ing
oot_window_len = sample_factor * transit_dur
oot_before = times[
(times < (t_ing-oot_buffer_time)) &
(times > (t_ing-oot_buffer_time-oot_window_len))
]
oot_after = times[
(times > (t_egr+oot_buffer_time)) &
(times < (t_egr+oot_buffer_time+oot_window_len))
]
oot_times = npconcatenate([oot_before, oot_after])
mask_tra = npin1d(times, in_tra_times)
mask_oot = npin1d(times, oot_times)
ctd_x_in_tra = ctd_resid_x[mask_tra]*arcsec_per_px
ctd_y_in_tra = ctd_resid_y[mask_tra]*arcsec_per_px
ctd_x_oot = ctd_resid_x[mask_oot]*arcsec_per_px
ctd_y_oot = ctd_resid_y[mask_oot]*arcsec_per_px
cd[ix] = {'ctd_x_in_tra':ctd_x_in_tra,
'ctd_y_in_tra':ctd_y_in_tra,
'ctd_x_oot':ctd_x_oot,
'ctd_y_oot':ctd_y_oot,
'npts_in_tra':len(ctd_x_in_tra),
'npts_oot':len(ctd_x_oot),
'in_tra_times':in_tra_times,
'oot_times':oot_times}
LOGINFO('Got centroid offsets (qnum: {:d}).'.format(qnum))
return cd
|
After running `detrend_centroid`, this gets positions of centroids during
transits, and outside of transits.
These positions can then be used in a false positive analysis.
This routine requires knowing the ingress and egress times for every
transit of interest within the quarter this routine is being called for.
There is currently no astrobase routine that automates this for periodic
transits (it must be done in a calling routine).
To get out of transit centroids, this routine takes points outside of the
"buffer" set by `oot_buffer_time`, sampling 3x as many points on either
side of the transit as are in the transit (or however many are specified by
`sample_factor`).
Parameters
----------
lcd : lcdict
An `lcdict` generated by the `read_kepler_fitslc` function. We assume
that the `detrend_centroid` function has been run on this `lcdict`.
t_ing_egr : list of tuples
This is of the form::
[(ingress time of i^th transit, egress time of i^th transit)]
for i the transit number index in this quarter (starts at zero at the
beginning of every quarter). Assumes units of BJD.
oot_buffer_time : float
Number of days away from ingress and egress times to begin sampling "out
of transit" centroid points. The number of out of transit points to take
per transit is 3x the number of points in transit.
sample_factor : float
The size of out of transit window from which to sample.
Returns
-------
dict
This is a dictionary keyed by transit number (i.e., the same index as
`t_ing_egr`), where each key contains the following value::
{'ctd_x_in_tra':ctd_x_in_tra,
'ctd_y_in_tra':ctd_y_in_tra,
'ctd_x_oot':ctd_x_oot,
'ctd_y_oot':ctd_y_oot,
'npts_in_tra':len(ctd_x_in_tra),
'npts_oot':len(ctd_x_oot),
'in_tra_times':in_tra_times,
'oot_times':oot_times}
|
def getphraselist(self):
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
|
Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
|
def sample_top(a=None, top_k=10):
if a is None:
a = []
idx = np.argpartition(a, -top_k)[-top_k:]
probs = a[idx]
probs = probs / np.sum(probs)
choice = np.random.choice(idx, p=probs)
return choice
|
Sample from ``top_k`` probabilities.
Parameters
----------
a : list of float
List of probabilities.
top_k : int
Number of candidates to be considered.
|
def start(self):
logger = _get_logger(self.debug)
started = self._session.start()
if started:
ev = self._session.nextEvent()
ev_name = _EVENT_DICT[ev.eventType()]
logger.info('Event Type: {!r}'.format(ev_name))
for msg in ev:
logger.info('Message Received:\n{}'.format(msg))
if ev.eventType() != blpapi.Event.SESSION_STATUS:
raise RuntimeError('Expected a "SESSION_STATUS" event but '
'received a {!r}'.format(ev_name))
ev = self._session.nextEvent()
ev_name = _EVENT_DICT[ev.eventType()]
logger.info('Event Type: {!r}'.format(ev_name))
for msg in ev:
logger.info('Message Received:\n{}'.format(msg))
if ev.eventType() != blpapi.Event.SESSION_STATUS:
raise RuntimeError('Expected a "SESSION_STATUS" event but '
'received a {!r}'.format(ev_name))
else:
ev = self._session.nextEvent(self.timeout)
if ev.eventType() == blpapi.Event.SESSION_STATUS:
for msg in ev:
logger.warning('Message Received:\n{}'.format(msg))
raise ConnectionError('Could not start blpapi.Session')
self._init_services()
return self
|
Start connection and initialize session services
|
def read(cls, proto):
instance = object.__new__(cls)
super(PreviousValueModel, instance).__init__(proto=proto.modelBase)
instance._logger = opf_utils.initLogger(instance)
if len(proto.predictedField):
instance._predictedField = proto.predictedField
else:
instance._predictedField = None
instance._fieldNames = list(proto.fieldNames)
instance._fieldTypes = list(proto.fieldTypes)
instance._predictionSteps = list(proto.predictionSteps)
return instance
|
Deserialize via capnp
:param proto: capnp PreviousValueModelProto message reader
:returns: new instance of PreviousValueModel deserialized from the given
proto
|
def render(self, template, **data):
dct = self.global_data.copy()
dct.update(data)
try:
html = self.env.get_template(template).render(**dct)
except TemplateNotFound:
raise JinjaTemplateNotFound
return html
|
Render data with template, return html unicodes.
parameters
template str the template's filename
data dict the data to render
|
def reset_annotations(self):
self.annotation_date_set = False
self.annotation_comment_set = False
self.annotation_type_set = False
self.annotation_spdx_id_set = False
|
Resets the builder's state to allow building new annotations.
|
def poll(self):
try:
ret = self._buffer.get(block=False)
if self._producer_callback is not None:
self._producer_callback()
return ret
except Queue.Empty:
Log.debug("%s: Empty in poll()" % str(self))
raise Queue.Empty
|
Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
|
def get_date(self, filename):
try:
self.document = parse(filename)
return self._get_date()
except DateNotFoundException:
print("Date problem found in {0}".format(filename))
return datetime.datetime.strftime(datetime.datetime.now(),
"%Y-%m-%d")
|
Return the date of the article in file.
|
def search_messages(session, thread_id, query, limit=20,
offset=0, message_context_details=None,
window_above=None, window_below=None):
query = {
'thread_id': thread_id,
'query': query,
'limit': limit,
'offset': offset
}
if message_context_details:
query['message_context_details'] = message_context_details
if window_above:
query['window_above'] = window_above
if window_below:
query['window_below'] = window_below
response = make_get_request(session, 'messages/search', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MessagesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
)
|
Search for messages
|
def _parse_sid_response(res):
res = json.loads(list(ChunkParser().get_chunks(res))[0])
sid = res[0][1][1]
gsessionid = res[1][1][0]['gsid']
return (sid, gsessionid)
|
Parse response format for request for new channel SID.
Example format (after parsing JS):
[ [0,["c","SID_HERE","",8]],
[1,[{"gsid":"GSESSIONID_HERE"}]]]
Returns (SID, gsessionid) tuple.
|
def count_params(self):
n_params = 0
for _i, p in enumerate(self.all_params):
n = 1
for s in p.get_shape():
try:
s = int(s)
except Exception:
s = 1
if s:
n = n * s
n_params = n_params + n
return n_params
|
Returns the number of parameters in the network.
|
def person_inn():
mask11 = [7, 2, 4, 10, 3, 5, 9, 4, 6, 8]
mask12 = [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8]
inn = [random.randint(1, 9) for _ in range(12)]
weighted11 = [v * mask11[i] for i, v in enumerate(inn[:-2])]
inn[10] = sum(weighted11) % 11 % 10
weighted12 = [v * mask12[i] for i, v in enumerate(inn[:-1])]
inn[11] = sum(weighted12) % 11 % 10
return "".join(map(str, inn))
|
Return a random taxation ID number for a natural person.
|
def log_every_n(level, msg, n, *args):
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, not (count % n), *args)
|
Log 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
|
def adsSyncReadByNameEx(port, address, data_name, data_type, return_ctypes=False):
handle = adsSyncReadWriteReqEx2(
port,
address,
ADSIGRP_SYM_HNDBYNAME,
0x0,
PLCTYPE_UDINT,
data_name,
PLCTYPE_STRING,
)
value = adsSyncReadReqEx2(
port, address, ADSIGRP_SYM_VALBYHND, handle, data_type, return_ctypes
)
adsSyncWriteReqEx(port, address, ADSIGRP_SYM_RELEASEHND, 0, handle, PLCTYPE_UDINT)
return value
|
Read data synchronous from an ADS-device from data name.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param string data_name: data name
:param Type data_type: type of the data given to the PLC, according to
PLCTYPE constants
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:rtype: data_type
:return: value: **value**
|
def full(shape, value, dtype='f8'):
shared = empty(shape, dtype)
shared[:] = value
return shared
|
Create a shared memory array of given shape and type, filled with `value`.
|
def _is_redundant(self, matrix, cutoff=None):
cutoff = 1.0 - self.feasibility_tol
extra_col = matrix[:, 0] + 1
extra_col[matrix.sum(axis=1) == 0] = 2
corr = np.corrcoef(np.c_[matrix, extra_col])
corr = np.tril(corr, -1)
return (np.abs(corr) > cutoff).any(axis=1)
|
Identify rdeundant rows in a matrix that can be removed.
|
def _is_root():
import os
import ctypes
try:
return os.geteuid() == 0
except AttributeError:
return ctypes.windll.shell32.IsUserAnAdmin() != 0
return False
|
Checks if the user is rooted.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.