code
stringlengths 51
2.34k
| docstring
stringlengths 11
171
|
|---|---|
def Screens(self, text, prog, screen, clock):
BaseScreen.__init__(self, self.size, self.background)
xmid = self.size[0]//2
Linesoftext(text, (xmid, 40), xmid=True, surface=self.image,
fontsize=30)
self.buttonlist = []
if prog == 0:
self.buttonlist += [self.nextbutton]
elif prog == 1:
self.buttonlist += [self.nextbutton]
self.buttonlist += [self.backbutton]
elif prog == 2:
self.buttonlist += [self.lastbutton]
self.buttonlist += [self.backbutton]
for i in self.buttonlist:
self.image.blit(*i.blitinfo)
return Menu.update(self, screen, clock)
|
Prog = 0 for first page, 1 for middle pages, 2 for last page
|
def on_bottom_align_toggled(self, chk):
v = chk.get_active()
self.settings.general.set_int('window-valignment', ALIGN_BOTTOM if v else ALIGN_TOP)
|
set the vertical alignment setting.
|
def date_for_str(date_str):
try:
for date_format in itertools.permutations(['%Y','%m','%d']):
try:
date = datetime.strptime(date_str,''.join(date_format))
raise StopIteration
except ValueError:
pass
return None
except StopIteration:
return date
|
tries to guess date from ambiguous date string
|
def gen403(request, baseURI, reason, project=None):
orgas = None
public_ask = False
if not settings.PIAPI_STANDALONE:
from organizations.models import Organization
if project and project.plugItLimitOrgaJoinable:
orgas = project.plugItOrgaJoinable.order_by('name').all()
else:
orgas = Organization.objects.order_by('name').all()
rorgas = []
for o in orgas:
if str(o.pk) == settings.VISITOR_ORGA_PK:
public_ask = True
else:
rorgas.append(o)
orgas = rorgas
return HttpResponseForbidden(render_to_response('plugIt/403.html', {'context':
{
'reason': reason,
'orgas': orgas,
'public_ask': public_ask,
'ebuio_baseUrl': baseURI,
'ebuio_userMode': request.session.get('plugit-standalone-usermode', 'ano'),
},
'project': project
}, context_instance=RequestContext(request)))
|
Return a 403 error
|
def _filter_results(self, result, anchor):
valid = True
try:
cat_tag = result.find('a', {'rel': 'category tag'}).string
title = anchor.string.lower()
date_tag = result.find('time').string
except (AttributeError, TypeError):
return False
if cat_tag != "Daily Ratings":
valid = False
if not date_in_range(self.date, date_tag, 5):
valid = False
if self.category == 'cable' and 'cable' not in title:
valid = False
elif self.category != 'cable' and 'cable' in title:
valid = False
return valid
|
Filter search results by checking category titles and dates
|
def execute_policy(self, scaling_group, policy):
return self._manager.execute_policy(scaling_group=scaling_group,
policy=policy)
|
Executes the specified policy for the scaling group.
|
def on_exception(self, exception):
logger.error('Exception from stream!', exc_info=True)
self.streaming_exception = exception
|
An exception occurred in the streaming thread
|
def check_matching_coordinates(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
data_arrays = ([a for a in args if isinstance(a, xr.DataArray)]
+ [a for a in kwargs.values() if isinstance(a, xr.DataArray)])
if len(data_arrays) > 1:
first = data_arrays[0]
for other in data_arrays[1:]:
if not first.metpy.coordinates_identical(other):
raise ValueError('Input DataArray arguments must be on same coordinates.')
return func(*args, **kwargs)
return wrapper
|
Decorate a function to make sure all given DataArrays have matching coordinates.
|
def _get_axis_label(self, dim):
if u(dim[:-1]).isdecimal():
n = len(self.channel_ids)
return str(self.channel_ids[int(dim[:-1]) % n]) + dim[-1]
else:
return dim
|
Return the channel id from a dimension, if applicable.
|
def _wrapper(self):
try:
res = self.func(*self.args, **self.kw)
except Exception as e:
self.mediator.set_error(e)
else:
self.mediator.set_result(res)
|
Wraps around a few calls which need to be made in the same thread.
|
def convert_from_gps_time(gps_time, gps_week=None):
converted_gps_time = None
gps_timestamp = float(gps_time)
if gps_week != None:
converted_gps_time = GPS_START + datetime.timedelta(seconds=int(gps_week) *
SECS_IN_WEEK + gps_timestamp)
else:
os.environ['TZ'] = 'right/UTC'
gps_time_as_gps = GPS_START + \
datetime.timedelta(seconds=gps_timestamp)
gps_time_as_tai = gps_time_as_gps + \
datetime.timedelta(seconds=19)
tai_epoch_as_tai = datetime.datetime(1970, 1, 1, 0, 0, 10)
tai_timestamp = (gps_time_as_tai - tai_epoch_as_tai).total_seconds()
converted_gps_time = (
datetime.datetime.utcfromtimestamp(tai_timestamp))
return converted_gps_time
|
Convert gps time in ticks to standard time.
|
def featureServers(self):
if self.urls == {}:
return {}
featuresUrls = self.urls['urls']['features']
if 'https' in featuresUrls:
res = featuresUrls['https']
elif 'http' in featuresUrls:
res = featuresUrls['http']
else:
return None
services = []
for urlHost in res:
if self.isPortal:
services.append(AGSAdministration(
url='%s/admin' % urlHost,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
else:
services.append(Services(
url='https://%s/%s/ArcGIS/admin' % (urlHost, self.portalId),
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port))
return services
|
gets the hosting feature AGS Server
|
def _update_Pxy_diag(self):
for r in range(1):
Phi_x_half = self.Phi_x**0.5
Phi_x_neghalf = self.Phi_x**-0.5
symm_p = (Phi_x_half * (self.Pxy[r] * Phi_x_neghalf).transpose()).transpose()
(evals, evecs) = scipy.linalg.eigh(symm_p)
self.D[r] = evals
self.Ainv[r] = evecs.transpose() * Phi_x_half
self.A[r] = (Phi_x_neghalf * evecs.transpose()).transpose()
|
Update `D`, `A`, `Ainv` from `Pxy`, `Phi_x`.
|
def _createComboBoxes(self, row):
tree = self.tree
model = self.tree.model()
self._setColumnCountForContents()
for col, _ in enumerate(self._axisNames, self.COL_FIRST_COMBO):
logger.debug("Adding combobox at ({}, {})".format(row, col))
comboBox = QtWidgets.QComboBox()
comboBox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
comboBox.activated.connect(self._comboBoxActivated)
self._comboBoxes.append(comboBox)
tree.setIndexWidget(model.index(row, col), comboBox)
|
Creates a combo box for each of the fullAxisNames
|
def find_root_and_sink_variables(self):
is_root = {name: True for scope in self.scopes for name in scope.variables.keys()}
for operator in self.unordered_operator_iterator():
for variable in operator.outputs:
is_root[variable.onnx_name] = False
is_sink = {name: True for scope in self.scopes for name in scope.variables.keys()}
for operator in self.unordered_operator_iterator():
for variable in operator.inputs:
is_sink[variable.onnx_name] = False
return [variable for scope in self.scopes for name, variable in scope.variables.items()
if is_root[name] or is_sink[name]]
|
Find root variables of the whole graph
|
def executor_cleanup(self):
try:
executor = self.get_executor(create=None)
except AttributeError:
pass
else:
if executor is not None:
executor.cleanup()
|
Let the executor clean up any cached information.
|
def copy(data, start=0, stop=None, blen=None, storage=None, create='array',
**kwargs):
storage = _util.get_storage(storage)
blen = _util.get_blen_array(data, blen)
if stop is None:
stop = len(data)
else:
stop = min(stop, len(data))
length = stop - start
if length < 0:
raise ValueError('invalid stop/start')
out = None
for i in range(start, stop, blen):
j = min(i+blen, stop)
block = data[i:j]
if out is None:
out = getattr(storage, create)(block, expectedlen=length, **kwargs)
else:
out.append(block)
return out
|
Copy `data` block-wise into a new array.
|
def parse_field(setting, field_name, default):
if isinstance(setting, dict):
return setting.get(field_name, default)
else:
return setting
|
Extract result from single-value or dict-type setting like fallback_values.
|
def update(self) -> None:
mask = self.mask
weights = self.refweights[mask]
self[~mask] = numpy.nan
self[mask] = weights/numpy.sum(weights)
|
Update subclass of |RelSubweightsMixin| based on `refweights`.
|
def contribute_to_class(self, cls, name, **kwargs):
super(PlaceholderField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, name, PlaceholderFieldDescriptor(self.slot))
if not hasattr(cls, '_meta_placeholder_fields'):
cls._meta_placeholder_fields = {}
cls._meta_placeholder_fields[name] = self
if django.VERSION >= (1, 11):
rel = self.remote_field
else:
rel = self.rel
if rel.related_name is None:
rel.related_name = '{app}_{model}_{slot}_FIXME'.format(
app=cls._meta.app_label,
model=cls._meta.object_name.lower(),
slot=self.slot
)
setattr(rel.to, self.rel.related_name, None)
|
Internal Django method to associate the field with the Model; it assigns the descriptor.
|
def close_all_pages(self):
states_to_be_closed = []
for state_identifier in self.tabs:
states_to_be_closed.append(state_identifier)
for state_identifier in states_to_be_closed:
self.close_page(state_identifier, delete=False)
|
Closes all tabs of the states editor
|
def fetch_entities(self):
query = text(
)
response = self.perform_query(query)
entities = {}
domains = set()
for [entity] in response:
domain = entity.split(".")[0]
domains.add(domain)
entities.setdefault(domain, []).append(entity)
self._domains = list(domains)
self._entities = entities
print("There are {} entities with data".format(len(entities)))
|
Fetch entities for which we have data.
|
def DeleteClientActionRequests(self, requests):
if not requests:
return
to_delete = []
for r in requests:
to_delete.append((r.client_id, r.flow_id, r.request_id))
if len(set(to_delete)) != len(to_delete):
raise ValueError(
"Received multiple copies of the same message to delete.")
self._DeleteClientActionRequest(to_delete)
|
Deletes a list of client messages from the db.
|
def get(self, key, default=miss):
if key not in self._dict:
return default
return self[key]
|
Return the value for given key if it exists.
|
def command_queue_worker(self, command_queue):
while True:
try:
command, data = command_queue.get(timeout=3)
try:
self.process_command(command, data)
except Exception as e:
_logger.exception(e)
self.worker_exceptions.append(e)
break
except Empty:
pass
if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()):
break
|
Process commands in command queues.
|
def uniq(seq):
seen = set()
return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
|
Return a copy of seq without duplicates.
|
def validate(self, obj, **kwargs):
obj = stringify(obj)
if obj is None:
return False
return self.DATE_RE.match(obj) is not None
|
Check if a thing is a valid date.
|
def serial_lock(self, lock):
mav = self.master.mav
if lock:
flags = mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE
self.locked = True
else:
flags = 0
self.locked = False
mav.serial_control_send(self.serial_settings.port,
flags,
0, 0, 0, [0]*70)
|
lock or unlock the port
|
def collect_appendvars(ap_, cls):
for key, value in cls.__dict__.items():
if key.startswith('appendvars_'):
varname = key[11:]
if varname not in ap_.appendvars:
ap_.appendvars[varname] = []
if value not in ap_.appendvars[varname]:
if not isinstance(value, list):
value = [value]
ap_.appendvars[varname] += value
|
colleziona elementi per le liste.
|
def _pack_date(self, date):
y, mon, d, h, min_, s = date.timetuple()[:6]
dw1 = 0x0000FFFF & ((y>>6) & 0x0000003F)
dw2 = 0x0000FFFF & ((y & 0x0000003F)<<2 | ((mon>>2) & 0x00000003))
dw3 = 0x0000FFFF & (((mon & 0x0000003)<<6) | ((d & 0x0000001F)<<1) \
| ((h>>4) & 0x00000001))
dw4 = 0x0000FFFF & (((h & 0x0000000F)<<4) | ((min_>>2) & 0x0000000F))
dw5 = 0x0000FFFF & (((min_ & 0x00000003)<<6) | (s & 0x0000003F))
return struct.pack('<5B', dw1, dw2, dw3, dw4, dw5)
|
This method is used to encode dates
|
def main(output):
client = Session().client('cloudresourcemanager', 'v1', 'projects')
results = []
for page in client.execute_paged_query('list', {}):
for project in page.get('projects', []):
if project['lifecycleState'] != 'ACTIVE':
continue
project_info = {
'project_id': project['projectId'],
'name': project['name'],
}
if 'labels' in project:
project_info['tags'] = [
'label:%s:%s' % (k, v) for k, v in project.get('labels', {}).items()]
results.append(project_info)
output.write(
yaml.safe_dump({'projects': results}, default_flow_style=False))
|
Generate a c7n-org gcp projects config file
|
def transpose(attrs, inputs, proto_obj):
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
|
Transpose the input array.
|
def create_issue(self, request, group, form_data, **kwargs):
instance = self.get_option('instance', group.project)
project = (
form_data.get('project') or
self.get_option('default_project', group.project)
)
client = self.get_client(request.user)
title = form_data['title']
description = form_data['description']
link = absolute_uri(group.get_absolute_url(params={'referrer': 'vsts_plugin'}))
try:
created_item = client.create_work_item(
instance=instance,
project=project,
title=title,
comment=markdown(description),
link=link,
)
except Exception as e:
self.raise_error(e, identity=client.auth)
return {
'id': created_item['id'],
'url': created_item['_links']['html']['href'],
'title': title,
}
|
Creates the issue on the remote service and returns an issue ID.
|
def cookies(self) -> Dict[str, str]:
cookies = SimpleCookie()
cookies.load(self.headers.get('Cookie', ''))
return {key: cookie.value for key, cookie in cookies.items()}
|
The parsed cookies attached to this request.
|
def grad_eigh(ans, x, UPLO='L'):
N = x.shape[-1]
w, v = ans
def vjp(g):
wg, vg = g
w_repeated = anp.repeat(w[..., anp.newaxis], N, axis=-1)
off_diag = anp.ones((N, N)) - anp.eye(N)
F = off_diag / (T(w_repeated) - w_repeated + anp.eye(N))
return _dot(v * wg[..., anp.newaxis, :] + _dot(v, F * _dot(T(v), vg)), T(v))
return vjp
|
Gradient for eigenvalues and vectors of a symmetric matrix.
|
def weight_list_to_tuple(data, attr_name):
if len(data['Value']) != len(data['Weight']):
raise ValueError('Number of weights do not correspond to number of '
'attributes in %s' % attr_name)
weight = np.array(data['Weight'])
if fabs(np.sum(weight) - 1.) > 1E-7:
raise ValueError('Weights do not sum to 1.0 in %s' % attr_name)
data_tuple = []
for iloc, value in enumerate(data['Value']):
data_tuple.append((value, weight[iloc]))
return data_tuple
|
Converts a list of values and corresponding weights to a tuple of values
|
def RV_com1(self):
return self.RV * (self.M2 / (self.M1 + self.M2))
|
RVs of star 1 relative to center-of-mass
|
def _create_xml_node(cls):
try:
xml_map = cls._xml_map
except AttributeError:
raise ValueError("This model has no XML definition")
return _create_xml_node(
xml_map.get('name', cls.__name__),
xml_map.get("prefix", None),
xml_map.get("ns", None)
)
|
Create XML node from "_xml_map".
|
def subst_quoted_strings(sql, params):
parts = sql.split('@')
params_dont_match = "number of parameters doesn' match the transformed query"
assert len(parts) == len(params) + 1, params_dont_match
out = []
for i, param in enumerate(params):
out.append(parts[i])
out.append("'%s'" % param.replace('\\', '\\\\').replace("\'", "\\\'"))
out.append(parts[-1])
return ''.join(out)
|
Reverse operation to mark_quoted_strings - substitutes '@' by params.
|
def check_path(file_path):
directory = os.path.dirname(file_path)
if directory != '':
if not os.path.exists(directory):
os.makedirs(directory, 0o775)
|
Checks if the directories for this path exist, and creates them in case.
|
def value_for_keypath(obj, path):
val = obj
for part in path.split('.'):
match = re.match(list_index_re, part)
if match is not None:
val = _extract(val, match.group(1))
if not isinstance(val, list) and not isinstance(val, tuple):
raise TypeError('expected list/tuple')
index = int(match.group(2))
val = val[index]
else:
val = _extract(val, part)
if val is None:
return None
return val
|
Get value from walking key path with start object obj.
|
def del_big_nodes(self, grater_than=215):
G = self._graph
it = G.nodes_iter()
node_paths = []
node_names = []
del_nodes = []
summe = 1
count = 1
for node in it:
l = len(G[node])
if l > grater_than:
del_nodes.append(node)
continue
summe += l
node_names.append(node)
node_paths.append(l)
count += 1
for node in del_nodes:
G.remove_node(node)
if node > 1000000000:
self.valid_user.pop(node)
print("Nodes deleted: {}".format(len(del_nodes)))
|
Delete big nodes with many connections from the graph.
|
def _kpost(url, data):
headers = {"Content-Type": "application/json"}
log.trace("url is: %s, data is: %s", url, data)
ret = http.query(url,
method='POST',
header_dict=headers,
data=salt.utils.json.dumps(data))
if ret.get('error'):
return ret
else:
return salt.utils.json.loads(ret.get('body'))
|
create any object in kubernetes based on URL
|
def parse_datetime(dt_str, format):
t = time.strptime(dt_str, format)
return datetime(t[0], t[1], t[2], t[3], t[4], t[5], t[6], pytz.UTC)
|
Create a timezone-aware datetime object from a datetime string.
|
def api_run_get(run_id):
data = current_app.config["data"]
run = data.get_run_dao().get(run_id)
records_total = 1 if run is not None else 0
if records_total == 0:
return Response(
render_template(
"api/error.js",
error_code=404,
error_message="Run %s not found." % run_id),
status=404,
mimetype="application/json")
records_filtered = records_total
return Response(render_template("api/runs.js", runs=[run], draw=1,
recordsTotal=records_total,
recordsFiltered=records_filtered,
full_object=True),
mimetype="application/json")
|
Return a single run as a JSON object.
|
def line_up_with_epoch(self):
if self.parent.notes.annot is None:
error_dialog = QErrorMessage()
error_dialog.setWindowTitle('Error moving to epoch')
error_dialog.showMessage('No score file loaded')
error_dialog.exec()
return
new_window_start = self.parent.notes.annot.get_epoch_start(
self.parent.value('window_start'))
self.parent.overview.update_position(new_window_start)
|
Go to the start of the present epoch.
|
def read_hdf5_segmentlist(h5f, path=None, gpstype=LIGOTimeGPS, **kwargs):
dataset = io_hdf5.find_dataset(h5f, path=path)
segtable = Table.read(dataset, format='hdf5', **kwargs)
out = SegmentList()
for row in segtable:
start = LIGOTimeGPS(int(row['start_time']), int(row['start_time_ns']))
end = LIGOTimeGPS(int(row['end_time']), int(row['end_time_ns']))
if gpstype is LIGOTimeGPS:
out.append(Segment(start, end))
else:
out.append(Segment(gpstype(start), gpstype(end)))
return out
|
Read a `SegmentList` object from an HDF5 file or group.
|
def _traverse_unobserved(stream,negative_filter,of):
observed = set()
for line in stream:
name = PacBioReadName(_nameprog.match(line).group(1))
if name.get_molecule() not in negative_filter: of.write(line)
observed.add(name.get_molecule())
return negative_filter|observed
|
Go through a stream and print out anything not in observed set
|
def _add_nat(self):
if is_period_dtype(self):
raise TypeError('Cannot add {cls} and {typ}'
.format(cls=type(self).__name__,
typ=type(NaT).__name__))
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
|
Add pd.NaT to self
|
def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar):
pillar_scaling_policies = copy.deepcopy(
__salt__['config.option'](scaling_policies_from_pillar, {})
)
if not scaling_policies and pillar_scaling_policies:
scaling_policies = pillar_scaling_policies
return scaling_policies
|
helper method for present. ensure that scaling_policies are set
|
def ensure_connected(self):
if not self.is_connected():
if not self._auto_connect:
raise DBALConnectionError.connection_closed()
self.connect()
|
Ensures database connection is still open.
|
def RunOnce(self):
from grr_response_server.gui import gui_plugins
if config.CONFIG.Get("AdminUI.django_secret_key", None):
logging.warning(
"The AdminUI.django_secret_key option has been deprecated, "
"please use AdminUI.csrf_secret_key instead.")
|
Import the plugins once only.
|
def read_sheets(archive):
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib
|
Read worksheet titles and ids for a workbook
|
def find_permission(self, name):
return (
self.get_session.query(self.permission_model).filter_by(name=name).first()
)
|
Finds and returns a Permission by name
|
def toy_linear_1d_classification(seed=default_seed):
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}
|
Simple classification data in one dimension for illustrating models.
|
def exec_before_request_actions(actions, **kwargs):
groups = ("before", "before_" + flask.request.method.lower())
return execute_actions(actions, limit_groups=groups, **kwargs)
|
Execute actions in the "before" and "before_METHOD" groups
|
def limit(self, limit: int) -> "QuerySet":
queryset = self._clone()
queryset._limit = limit
return queryset
|
Limits QuerySet to given length.
|
def grandparent_path(self):
return os.path.basename(os.path.join(self.path, '../..'))
|
return grandparent's path string
|
def create(self, client_id, title, unsubscribe_page, confirmed_opt_in,
confirmation_success_page, unsubscribe_setting="AllClientLists"):
body = {
"Title": title,
"UnsubscribePage": unsubscribe_page,
"ConfirmedOptIn": confirmed_opt_in,
"ConfirmationSuccessPage": confirmation_success_page,
"UnsubscribeSetting": unsubscribe_setting}
response = self._post("/lists/%s.json" % client_id, json.dumps(body))
self.list_id = json_to_py(response)
return self.list_id
|
Creates a new list for a client.
|
def _stop_actions(self):
Global.LOGGER.info("stopping actions")
list(map(lambda x: x.stop(), self.actions))
Global.LOGGER.info("actions stopped")
|
Stop all the actions
|
def activate_right(self, token):
watchers.MATCHER.debug(
"Node <%s> activated right with token %r", self, token)
return self._activate_right(token.copy())
|
Make a copy of the received token and call `_activate_right`.
|
def scan(self, string):
return list(self._scanner_to_matches(self.pattern.scanner(string), self.run))
|
Like findall, but also returning matching start and end string locations
|
def filter_delete(self, id):
id = self.__unpack_id(id)
url = '/api/v1/filters/{0}'.format(str(id))
self.__api_request('DELETE', url)
|
Deletes the filter with the given `id`.
|
def _get_caller_supplement(caller, data):
if caller == "mutect":
icaller = tz.get_in(["config", "algorithm", "indelcaller"], data)
if icaller:
caller = "%s/%s" % (caller, icaller)
return caller
|
Some callers like MuTect incorporate a second caller for indels.
|
def _create_optObject(self, **kwargs):
optimizer = kwargs.get('optimizer',
self.config['optimizer']['optimizer'])
if optimizer.upper() == 'MINUIT':
optObject = pyLike.Minuit(self.like.logLike)
elif optimizer.upper() == 'NEWMINUIT':
optObject = pyLike.NewMinuit(self.like.logLike)
else:
optFactory = pyLike.OptimizerFactory_instance()
optObject = optFactory.create(str(optimizer), self.like.logLike)
return optObject
|
Make MINUIT or NewMinuit type optimizer object
|
def load_medium(self, model):
media = self.config.get("medium")
if media is None:
return
definitions = media.get("definitions")
if definitions is None or len(definitions) == 0:
return
path = self.get_path(media, join("data", "experimental", "media"))
for medium_id, medium in iteritems(definitions):
if medium is None:
medium = dict()
filename = medium.get("filename")
if filename is None:
filename = join(path, "{}.csv".format(medium_id))
elif not isabs(filename):
filename = join(path, filename)
tmp = Medium(identifier=medium_id, obj=medium, filename=filename)
tmp.load()
tmp.validate(model)
self.media[medium_id] = tmp
|
Load and validate all media.
|
def _find_topics_with_wrong_rp(topics, zk, default_min_isr):
topics_with_wrong_rf = []
for topic_name, partitions in topics.items():
min_isr = get_min_isr(zk, topic_name) or default_min_isr
replication_factor = len(partitions[0].replicas)
if replication_factor >= min_isr + 1:
continue
topics_with_wrong_rf.append({
'replication_factor': replication_factor,
'min_isr': min_isr,
'topic': topic_name,
})
return topics_with_wrong_rf
|
Returns topics with wrong replication factor.
|
def handle_subscribe(self, request):
ret = self._tree.handle_subscribe(request, request.path[1:])
self._subscription_keys[request.generate_key()] = request
return ret
|
Handle a Subscribe request from outside. Called with lock taken
|
def validate_regexp(ctx, param, value):
if value:
try:
value = re.compile(value)
except ValueError:
raise click.BadParameter('invalid regular expression.')
return value
|
Validate and compile regular expression.
|
def count_terms(geneset, assoc, obo_dag):
term_cnt = Counter()
for gene in (g for g in geneset if g in assoc):
for goid in assoc[gene]:
if goid in obo_dag:
term_cnt[obo_dag[goid].id] += 1
return term_cnt
|
count the number of terms in the study group
|
def __get_ac_row(self, ac: model.AssetClass) -> AssetAllocationViewModel:
view_model = AssetAllocationViewModel()
view_model.depth = ac.depth
view_model.name = ac.name
view_model.set_allocation = ac.allocation
view_model.curr_allocation = ac.curr_alloc
view_model.diff_allocation = ac.alloc_diff
view_model.alloc_diff_perc = ac.alloc_diff_perc
view_model.curr_value = ac.curr_value
view_model.set_value = ac.alloc_value
view_model.diff_value = ac.value_diff
return view_model
|
Formats one Asset Class record
|
def parse_diff_filenames(diff_files):
files = []
for line in diff_files.splitlines():
line = line.strip()
fn = re.findall('[^ ]+\s+(.*.py)', line)
if fn and not line.startswith('?'):
files.append(fn[0])
return files
|
Parse the output of filenames_diff_cmd.
|
def pbar(iter, desc='', **kwargs):
return tqdm(
iter,
desc=('<' + str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + '> ' +
desc),
dynamic_ncols=True,
**kwargs)
|
Wrapper for `tqdm` progress bar.
|
def release_all(self, callback: Callable[[str, Any, Any], None]):
self._all_callbacks.remove(callback)
|
Releases callback from all keys changes
|
def expand_dates(df, columns=[]):
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day)
return df2
|
generate year, month, day features from specified date features
|
def merge(self, other):
for key, value in other.items():
if key in self and (isinstance(self[key], Compound)
and isinstance(value, dict)):
self[key].merge(value)
else:
self[key] = value
|
Recursively merge tags from another compound.
|
def check_dir(directory, newly_created_files):
header_parse_failures = []
for root, dirs, files in os.walk(directory):
for f in files:
if f.endswith('.py') and os.path.basename(f) != '__init__.py':
filename = os.path.join(root, f)
try:
check_header(filename, filename in newly_created_files)
except HeaderCheckFailure as e:
header_parse_failures.append(e.message)
return header_parse_failures
|
Returns list of files that fail the check.
|
def hue_sat_to_cmap(hue, sat):
import colorsys
hue = float(hue) / 360.0
sat = float(sat) / 100.0
res = []
for val in range(256):
hsv_val = float(val) / 255.0
r, g, b = colorsys.hsv_to_rgb(hue, sat, hsv_val)
res.append((r, g, b))
return res
|
Mkae a color map from a hue and saturation value.
|
def StartFileFetch(self, pathspec, request_data=None):
self.state.indexed_pathspecs.append(pathspec)
self.state.request_data_list.append(request_data)
self._TryToStartNextPathspec()
|
The entry point for this flow mixin - Schedules new file transfer.
|
def prune_cached(values):
import os
config_path = os.path.expanduser('~/.config/blockade')
file_path = os.path.join(config_path, 'cache.txt')
if not os.path.isfile(file_path):
return values
cached = [x.strip() for x in open(file_path, 'r').readlines()]
output = list()
for item in values:
hashed = hash_values(item)
if hashed in cached:
continue
output.append(item)
return output
|
Remove the items that have already been cached.
|
def _swap(self, class1, class2, a):
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
|
evaluate cost of moving a from class1 to class2
|
def attrs(self):
return dict((k, v) for k, v in iteritems(self.__dict__) if k is not "sdk")
|
Returns a dictionary of the archive's attributes.
|
def _from_dict(cls, _dict):
args = {}
if 'contentItems' in _dict:
args['content_items'] = [
ContentItem._from_dict(x) for x in (_dict.get('contentItems'))
]
else:
raise ValueError(
'Required property \'contentItems\' not present in Content JSON'
)
return cls(**args)
|
Initialize a Content object from a json dictionary.
|
def checkGeneTreeMatchesSpeciesTree(speciesTree, geneTree, processID):
def fn(tree, l):
if tree.internal:
fn(tree.left, l)
fn(tree.right, l)
else:
l.append(processID(tree.iD))
l = []
fn(speciesTree, l)
l2 = []
fn(geneTree, l2)
for i in l2:
assert i in l
|
Function to check ids in gene tree all match nodes in species tree
|
def zcr(data):
data = np.mean(data, axis=1)
count = len(data)
countZ = np.sum(np.abs(np.diff(np.sign(data)))) / 2
return (np.float64(countZ) / np.float64(count - 1.0))
|
Computes zero crossing rate of segment
|
def _methodInTraceback(self, name, traceback):
foundMethod = False
for frame in self._frames(traceback):
this = frame.f_locals.get('self')
if this is self and frame.f_code.co_name == name:
foundMethod = True
break
return foundMethod
|
Returns boolean whether traceback contains method from this instance
|
def quotes_by_instrument_urls(cls, client, urls):
instruments = ",".join(urls)
params = {"instruments": instruments}
url = "https://api.robinhood.com/marketdata/quotes/"
data = client.get(url, params=params)
results = data["results"]
while "next" in data and data["next"]:
data = client.get(data["next"])
results.extend(data["results"])
return results
|
fetch and return results
|
def delete_edge(self, source: str, target: str):
if self.has_edge(source, target):
self.remove_edge(source, target)
|
Removes an edge if it is in the graph.
|
def add_arguments(cls, parser, sys_arg_list=None):
parser.add_argument('--icmp_check_interval',
dest='icmp_check_interval',
required=False, default=2, type=float,
help="ICMPecho interval in seconds, default 2 "
"(only for 'icmpecho' health monitor plugin)")
return ["icmp_check_interval"]
|
Arguments for the ICMPecho health monitor plugin.
|
def get(self, request, *args, **kwargs):
serializer = self.serializer_reader_class
if request.user.is_authenticated():
return Response(serializer(request.user, context=self.get_serializer_context()).data)
else:
return Response({'detail': _('Authentication credentials were not provided')}, status=401)
|
return profile of current user if authenticated otherwise 401
|
def valid_ipv4(ip):
match = _valid_ipv4.match(ip)
if match is None:
return False
octets = match.groups()
if len(octets) != 4:
return False
first = int(octets[0])
if first < 1 or first > 254:
return False
for i in range(1, 4):
octet = int(octets[i])
if octet < 0 or octet > 255:
return False
return True
|
check if ip is a valid ipv4
|
def add_user(cls, username, email):
sanitized = str(cls.__sanitize_username(username))
logger.debug("Adding user to SeAT with username %s" % sanitized)
password = cls.__generate_random_pass()
ret = cls.exec_request('user', 'post', username=sanitized, email=str(email), password=password)
logger.debug(ret)
if cls._response_ok(ret):
logger.info("Added SeAT user with username %s" % sanitized)
return sanitized, password
logger.info("Failed to add SeAT user with username %s" % sanitized)
return None, None
|
Add user to service
|
def setTimeout(self, time):
self.conversation.SetDDETimeout(round(time))
return self.conversation.GetDDETimeout()
|
Set global timeout value, in seconds, for all DDE calls
|
def add_gateway_router(self, router, body=None):
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': body}})
|
Adds an external network gateway to the specified router.
|
def _sanity_check_registered_locations_parent_locations(query_metadata_table):
for location, location_info in query_metadata_table.registered_locations:
if (location != query_metadata_table.root_location and
not query_metadata_table.root_location.is_revisited_at(location)):
if location_info.parent_location is None:
raise AssertionError(u'Found a location that is not the root location of the query '
u'or a revisit of the root, but does not have a parent: '
u'{} {}'.format(location, location_info))
if location_info.parent_location is not None:
query_metadata_table.get_location_info(location_info.parent_location)
|
Assert that all registered locations' parent locations are also registered.
|
def bernard_auth(func):
@wraps(func)
async def wrapper(request: Request):
def get_query_token():
token_key = settings.WEBVIEW_TOKEN_KEY
return request.query.get(token_key, '')
def get_header_token():
header_key = settings.WEBVIEW_HEADER_NAME
return request.headers.get(header_key, '')
try:
token = next(filter(None, [
get_header_token(),
get_query_token(),
]))
except StopIteration:
token = ''
try:
body = await request.json()
except ValueError:
body = None
msg, platform = await manager.message_from_token(token, body)
if not msg:
return json_response({
'status': 'unauthorized',
'message': 'No valid token found',
}, status=401)
return await func(msg, platform)
return wrapper
|
Authenticates the users based on the query-string-provided token
|
def zscan(self, key, cursor=0, match=None, count=None):
args = []
if match is not None:
args += [b'MATCH', match]
if count is not None:
args += [b'COUNT', count]
fut = self.execute(b'ZSCAN', key, cursor, *args)
def _converter(obj):
return (int(obj[0]), pairs_int_or_float(obj[1]))
return wait_convert(fut, _converter)
|
Incrementally iterate sorted sets elements and associated scores.
|
def to_literal(self):
if not self.nodes:
return (self.tag, self.attrib, self.text, [])
else:
return (self.tag, self.attrib, self.text,
list(map(to_literal, self.nodes)))
|
Convert the node into a literal Python object
|
def process_next_message(self, timeout):
message = self.worker_manager.receive(timeout)
if isinstance(message, Acknowledgement):
self.task_manager.task_start(message.task, message.worker)
elif isinstance(message, Result):
self.task_manager.task_done(message.task, message.result)
|
Processes the next message coming from the workers.
|
def _get_struct_glowfilter(self):
obj = _make_object("GlowFilter")
obj.GlowColor = self._get_struct_rgba()
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
obj.Strength = unpack_fixed8(self._src)
bc = BitConsumer(self._src)
obj.InnerGlow = bc.u_get(1)
obj.Knockout = bc.u_get(1)
obj.CompositeSource = bc.u_get(1)
obj.Passes = bc.u_get(5)
return obj
|
Get the values for the GLOWFILTER record.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.