code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def _sanitise(self):
for k in self.__dict__:
if isinstance(self.__dict__[k], np.float32):
self.__dict__[k] = np.float64(self.__dict__[k])
|
Convert attributes of type npumpy.float32 to numpy.float64 so that they will print properly.
|
def format(tokens, formatter, outfile=None):
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
|
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
|
def watch_project(self, path):
try:
return self.client.query('watch-project', os.path.realpath(path))
finally:
self._attempt_set_timeout(self._timeout)
|
Issues the watch-project command to watchman to begin watching the buildroot.
:param string path: the path to the watchman project root/pants build root.
|
def _set_fields(self, json_dict):
for key, value in json_dict.items():
if not key.startswith("_"):
setattr(self, key, value)
|
Set this object's attributes specified in json_dict
|
def header_expand(headers):
collector = []
if isinstance(headers, dict):
headers = headers.items()
elif isinstance(headers, basestring):
return headers
for i, (value, params) in enumerate(headers):
_params = []
for (p_k, p_v) in params.items():
_params.append('%s=%s' % (p_k, p_v))
collector.append(value)
collector.append('; ')
if len(params):
collector.append('; '.join(_params))
if not len(headers) == i+1:
collector.append(', ')
if collector[-1] in (', ', '; '):
del collector[-1]
return ''.join(collector)
|
Returns an HTTP Header value string from a dictionary.
Example expansion::
{'text/x-dvi': {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}, 'text/x-c': {}}
# Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c
(('text/x-dvi', {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}), ('text/x-c', {}))
# Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c
|
def is_time_variable(varname, var):
satisfied = varname.lower() == 'time'
satisfied |= getattr(var, 'standard_name', '') == 'time'
satisfied |= getattr(var, 'axis', '') == 'T'
satisfied |= units_convertible('seconds since 1900-01-01', getattr(var, 'units', ''))
return satisfied
|
Identifies if a variable is represents time
|
def lookup(self, profile, setting):
for path in profiles():
cfg = SafeConfigParser()
cfg.read(path)
if profile not in cfg.sections():
continue
if not cfg.has_option(profile, setting):
continue
return cfg.get(profile, setting)
|
Check koji.conf.d files for this profile's setting.
:param setting: ``str`` like "server" (for kojihub) or "weburl"
:returns: ``str``, value for this setting
|
def patch(self, spin, header, *args):
spawn(spin, 'DCC %s' % args[0], header, *args[1:])
|
It spawns DCC TYPE as event.
|
def _GetGdbThreadMapping(self, position):
if len(gdb.selected_inferior().threads()) == 1:
return {position[1]: 1}
thread_line_regexp = r'\s*\**\s*([0-9]+)\s+[a-zA-Z]+\s+([x0-9a-fA-F]+)\s.*'
output = gdb.execute('info threads', to_string=True)
matches = [re.match(thread_line_regexp, line) for line
in output.split('\n')[1:]]
return {int(match.group(2), 16): int(match.group(1))
for match in matches if match}
|
Gets a mapping from python tid to gdb thread num.
There's no way to get the thread ident from a gdb thread. We only get the
"ID of the thread, as assigned by GDB", which is completely useless for
everything except talking to gdb. So in order to translate between these
two, we have to execute 'info threads' and parse its output. Note that this
may only work on linux, and only when python was compiled to use pthreads.
It may work elsewhere, but we won't guarantee it.
Args:
position: array of pid, tid, framedepth specifying the requested position.
Returns:
A dictionary of the form {python_tid: gdb_threadnum}.
|
def open(self, options):
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result
|
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
|
def parse_negation_operation(operation: str) -> Tuple[bool, str]:
_operation = operation.strip()
if not _operation:
raise QueryParserException('Operation is not valid: {}'.format(operation))
negation = False
if _operation[0] == '~':
negation = True
_operation = _operation[1:]
return negation, _operation.strip()
|
Parse the negation modifier in an operation.
|
def make_job(job_name, **kwargs):
def wraps(func):
kwargs['process'] = func
job = type(job_name, (Job,), kwargs)
globals()[job_name] = job
return job
return wraps
|
Decorator to create a Job from a function.
Give a job name and add extra fields to the job.
@make_job("ExecuteDecJob",
command=mongoengine.StringField(required=True),
output=mongoengine.StringField(default=None))
def execute(job: Job):
job.log_info('ExecuteJob %s - Executing command...' % job.uuid)
result = subprocess.run(job.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
job.output = result.stdout.decode('utf-8') + " " + result.stderr.decode('utf-8')
|
def _property_set(self, msg):
prop = self._sent_property.get('prop')
if prop and hasattr(self, prop):
setattr(self, prop, self._sent_property.get('val'))
self._sent_property = {}
|
Set command received and acknowledged.
|
def label_clusters(image, min_cluster_size=50, min_thresh=1e-6, max_thresh=1, fully_connected=False):
dim = image.dimension
clust = threshold_image(image, min_thresh, max_thresh)
temp = int(fully_connected)
args = [dim, clust, clust, min_cluster_size, temp]
processed_args = _int_antsProcessArguments(args)
libfn = utils.get_lib_fn('LabelClustersUniquely')
libfn(processed_args)
return clust
|
This will give a unique ID to each connected
component 1 through N of size > min_cluster_size
ANTsR function: `labelClusters`
Arguments
---------
image : ANTsImage
input image e.g. a statistical map
min_cluster_size : integer
throw away clusters smaller than this value
min_thresh : scalar
threshold to a statistical map
max_thresh : scalar
threshold to a statistical map
fully_connected : boolean
boolean sets neighborhood connectivity pattern
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16') )
>>> timageFully = ants.label_clusters( image, 10, 128, 150, True )
>>> timageFace = ants.label_clusters( image, 10, 128, 150, False )
|
def split_by_fname_file(self, fname:PathOrStr, path:PathOrStr=None)->'ItemLists':
"Split the data by using the names in `fname` for the validation set. `path` will override `self.path`."
path = Path(ifnone(path, self.path))
valid_names = loadtxt_str(path/fname)
return self.split_by_files(valid_names)
|
Split the data by using the names in `fname` for the validation set. `path` will override `self.path`.
|
def update_status(self, status):
assert (status in (HightonConstants.WON, HightonConstants.PENDING, HightonConstants.LOST))
from highton.models import Status
status_obj = Status(name=status)
return self._put_request(
data=status_obj.element_to_string(status_obj.encode()),
endpoint=self.ENDPOINT + '/' + str(self.id) + '/status',
)
|
Updates the status of the deal
:param status: status have to be ('won', 'pending', 'lost')
:return: successfull response or raise Exception
:rtype:
|
def process_resource(self, req, resp, resource, uri_kwargs=None):
if 'user' in req.context:
return
identifier = self.identify(req, resp, resource, uri_kwargs)
user = self.try_storage(identifier, req, resp, resource, uri_kwargs)
if user is not None:
req.context['user'] = user
elif self.challenge is not None:
req.context.setdefault(
'challenges', list()
).append(self.challenge)
|
Process resource after routing to it.
This is basic falcon middleware handler.
Args:
req (falcon.Request): request object
resp (falcon.Response): response object
resource (object): resource object matched by falcon router
uri_kwargs (dict): additional keyword argument from uri template.
For ``falcon<1.0.0`` this is always ``None``
|
def doit(self, classes=None, recursive=True, **kwargs):
return super().doit(classes, recursive, **kwargs)
|
Write out commutator
Write out the commutator according to its definition
$[\Op{A}, \Op{B}] = \Op{A}\Op{B} - \Op{A}\Op{B}$.
See :meth:`.Expression.doit`.
|
def _my_pdf_formatter(data, format, ordered_alphabets) :
eps = _my_eps_formatter(data, format, ordered_alphabets).decode()
gs = weblogolib.GhostscriptAPI()
return gs.convert('pdf', eps, format.logo_width, format.logo_height)
|
Generate a logo in PDF format.
Modified from weblogo version 3.4 source code.
|
def pop(self, name, default=SENTINEL):
if default is SENTINEL:
return self.__data__.pop(name)
return self.__data__.pop(name, default)
|
Retrieve and remove a value from the backing store, optionally with a default.
|
def spawn_isolated_child(econtext):
mitogen.parent.upgrade_router(econtext)
if FORK_SUPPORTED:
context = econtext.router.fork()
else:
context = econtext.router.local()
LOG.debug('create_fork_child() -> %r', context)
return context
|
For helper functions executed in the fork parent context, arrange for
the context's router to be upgraded as necessary and for a new child to be
prepared.
The actual fork occurs from the 'virginal fork parent', which does not have
any Ansible modules loaded prior to fork, to avoid conflicts resulting from
custom module_utils paths.
|
def tic(self):
if self.step % self.interval == 0:
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
self.queue = []
self.activated = True
self.step += 1
|
Start collecting stats for current batch.
Call before calling forward.
|
def _stripe_object_field_to_foreign_key(
cls, field, manipulated_data, current_ids=None, pending_relations=None
):
field_data = None
field_name = field.name
raw_field_data = manipulated_data.get(field_name)
refetch = False
skip = False
if issubclass(field.related_model, StripeModel):
id_ = cls._id_from_data(raw_field_data)
if not raw_field_data:
skip = True
elif id_ == raw_field_data:
refetch = True
else:
pass
if id_ in current_ids:
if pending_relations is not None:
object_id = manipulated_data["id"]
pending_relations.append((object_id, field, id_))
skip = True
if not skip:
field_data, _ = field.related_model._get_or_create_from_stripe_object(
manipulated_data,
field_name,
refetch=refetch,
current_ids=current_ids,
pending_relations=pending_relations,
)
else:
skip = True
return field_data, skip
|
This converts a stripe API field to the dj stripe object it references,
so that foreign keys can be connected up automatically.
:param field:
:type field: models.ForeignKey
:param manipulated_data:
:type manipulated_data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:return:
|
def pandas_dtype(dtype):
if isinstance(dtype, np.ndarray):
return dtype.dtype
elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):
return dtype
result = registry.find(dtype)
if result is not None:
return result
try:
npdtype = np.dtype(dtype)
except Exception:
if not isinstance(dtype, str):
raise TypeError("data type not understood")
raise TypeError("data type '{}' not understood".format(
dtype))
if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']:
return npdtype
elif npdtype.kind == 'O':
raise TypeError("dtype '{}' not understood".format(dtype))
return npdtype
|
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
Raises
------
TypeError if not a dtype
|
def echo_via_pager(*args, **kwargs):
try:
restore = 'LESS' not in os.environ
os.environ.setdefault('LESS', '-iXFR')
click.echo_via_pager(*args, **kwargs)
finally:
if restore:
os.environ.pop('LESS', None)
|
Display pager only if it does not fit in one terminal screen.
NOTE: The feature is available only on ``less``-based pager.
|
def _find_feed_language(self):
self.feed_language = (
read_first_available_value(
os.path.join(self.src_dir, 'feed_info.txt'), 'feed_lang') or
read_first_available_value(
os.path.join(self.src_dir, 'agency.txt'), 'agency_lang'))
if not self.feed_language:
raise Exception(
'Cannot find feed language in feed_info.txt and agency.txt')
print('\tfeed language: %s' % self.feed_language)
|
Find feed language based specified feed_info.txt or agency.txt.
|
def _merge_csv_section(sections, pc, csvs):
logger_csvs.info("enter merge_csv_section")
try:
for _name, _section in sections.items():
if "measurementTable" in _section:
sections[_name]["measurementTable"] = _merge_csv_table(_section["measurementTable"], pc, csvs)
if "model" in _section:
sections[_name]["model"] = _merge_csv_model(_section["model"], pc, csvs)
except Exception as e:
print("Error: There was an error merging CSV data into the metadata ")
logger_csvs.error("merge_csv_section: {}".format(e))
logger_csvs.info("exit merge_csv_section")
return sections
|
Add csv data to all paleo data tables
:param dict sections: Metadata
:return dict sections: Metadata
|
def stop_loop(self):
hub.kill(self._querier_thread)
self._querier_thread = None
self._datapath = None
self.logger.info("stopped a querier.")
|
stop QUERY thread.
|
def make(cls, **kwargs):
cls_attrs = {f.name: f for f in attr.fields(cls)}
unknown = {k: v for k, v in kwargs.items() if k not in cls_attrs}
if len(unknown) > 0:
_LOGGER.warning(
"Got unknowns for %s: %s - please create an issue!", cls.__name__, unknown
)
missing = [k for k in cls_attrs if k not in kwargs]
data = {k: v for k, v in kwargs.items() if k in cls_attrs}
for m in missing:
default = cls_attrs[m].default
if isinstance(default, attr.Factory):
if not default.takes_self:
data[m] = default.factory()
else:
raise NotImplementedError
else:
_LOGGER.debug("Missing key %s with no default for %s", m, cls.__name__)
data[m] = None
inst = cls(**data)
setattr(inst, "raw", kwargs)
return inst
|
Create a container.
Reports extra keys as well as missing ones.
Thanks to habnabit for the idea!
|
def overlaps(self, other):
if self > other:
smaller, larger = other, self
else:
smaller, larger = self, other
if larger.empty():
return False
if smaller._upper_value == larger._lower_value:
return smaller._upper == smaller.CLOSED and larger._lower == smaller.CLOSED
return larger._lower_value < smaller._upper_value
|
If self and other have any overlapping values returns True, otherwise returns False
|
def resolveSharedConnections(root: LNode):
for ch in root.children:
resolveSharedConnections(ch)
for ch in root.children:
for p in ch.iterPorts():
portTryReduce(root, p)
|
Walk all ports on all nodes and group subinterface connections
to only parent interface connection if it is possible
|
def _set_body(self, body):
assert isinstance(body, CodeStatement)
if isinstance(body, CodeBlock):
self.body = body
else:
self.body._add(body)
|
Set the main body for this control flow structure.
|
def show(dataset_uri, overlay_name):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
try:
overlay = dataset.get_overlay(overlay_name)
except:
click.secho(
"No such overlay: {}".format(overlay_name),
fg="red",
err=True
)
sys.exit(11)
formatted_json = json.dumps(overlay, indent=2)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
|
Show the content of a specific overlay.
|
def query(self, transport, protocol, *data):
if not self._query:
raise AttributeError('Command is not queryable')
if self.protocol:
protocol = self.protocol
if self._query.data_type:
data = _dump(self._query.data_type, data)
else:
data = ()
if isinstance(transport, SimulatedTransport):
response = self.simulate_query(data)
else:
response = protocol.query(transport, self._query.header, *data)
response = _load(self._query.response_type, response)
return response[0] if len(response) == 1 else response
|
Generates and sends a query message unit.
:param transport: An object implementing the `.Transport` interface.
It is used by the protocol to send the message and receive the
response.
:param protocol: An object implementing the `.Protocol` interface.
:param data: The program data.
:raises AttributeError: if the command is not queryable.
|
def _find_stages(self):
stages = []
end = last_user_found = None
for part in reversed(self.dfp.structure):
if end is None:
end = part
if part['instruction'] == 'USER' and not last_user_found:
last_user_found = part['content']
if part['instruction'] == 'FROM':
stages.insert(0, {'from_structure': part,
'end_structure': end,
'stage_user': last_user_found})
end = last_user_found = None
return stages
|
Find limits of each Dockerfile stage
|
def giant_text_sqltype(dialect: Dialect) -> str:
if dialect.name == SqlaDialectName.SQLSERVER:
return 'NVARCHAR(MAX)'
elif dialect.name == SqlaDialectName.MYSQL:
return 'LONGTEXT'
else:
raise ValueError("Unknown dialect: {}".format(dialect.name))
|
Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server.
|
def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock)
|
Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
|
def callback_result(self):
if self._state in [PENDING, RUNNING]:
self.x
if self._user_callbacks:
return self._callback_result
else:
return self.x
|
Block the main thead until future finish, return the future.callback_result.
|
def validate_no_duplicate_paths(self, resources):
paths = set()
for item in resources:
file_name = item.get('path')
if file_name in paths:
raise ValueError(
'%s path was specified more than once in the metadata' %
file_name)
paths.add(file_name)
|
ensure that the user has not provided duplicate paths in
a list of resources.
Parameters
==========
resources: one or more resources to validate not duplicated
|
def getRoom(self, _id):
if SockJSRoomHandler._room.has_key(self._gcls() + _id):
return SockJSRoomHandler._room[self._gcls() + _id]
return None
|
Retrieve a room from it's id
|
def transpose(self):
graph = self.graph
transposed = DAG()
for node, edges in graph.items():
transposed.add_node(node)
for node, edges in graph.items():
for edge in edges:
transposed.add_edge(edge, node)
return transposed
|
Builds a new graph with the edges reversed.
Returns:
:class:`stacker.dag.DAG`: The transposed graph.
|
def cv(params, dtrain, num_boost_round=10, nfold=3, metrics=(),
obj=None, feval=None, fpreproc=None, as_pandas=True,
show_progress=None, show_stdv=True, seed=0):
results = []
cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc)
for i in range(num_boost_round):
for fold in cvfolds:
fold.update(i, obj)
res = aggcv([f.eval(i, feval) for f in cvfolds],
show_stdv=show_stdv, show_progress=show_progress,
as_pandas=as_pandas)
results.append(res)
if as_pandas:
try:
import pandas as pd
results = pd.DataFrame(results)
except ImportError:
results = np.array(results)
else:
results = np.array(results)
return results
|
Cross-validation with given paramaters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round : int
Number of boosting iterations.
nfold : int
Number of folds in CV.
metrics : list of strings
Evaluation metrics to be watched in CV.
obj : function
Custom objective function.
feval : function
Custom evaluation function.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param) and returns
transformed versions of those.
as_pandas : bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return np.ndarray
show_progress : bool or None, default None
Whether to display the progress. If None, progress will be displayed
when np.ndarray is returned.
show_stdv : bool, default True
Whether to display the standard deviation in progress.
Results are not affected, and always contains std.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
Returns
-------
evaluation history : list(string)
|
def iter_options(self):
for section in self.sections:
name = str(section)
for key, value in section._get_options():
yield name, key, value
|
Iterates configuration sections groups options.
|
def run_preassembly_duplicate(preassembler, beliefengine, **kwargs):
logger.info('Combining duplicates on %d statements...' %
len(preassembler.stmts))
dump_pkl = kwargs.get('save')
stmts_out = preassembler.combine_duplicates()
beliefengine.set_prior_probs(stmts_out)
logger.info('%d unique statements' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out
|
Run deduplication stage of preassembly on a list of statements.
Parameters
----------
preassembler : indra.preassembler.Preassembler
A Preassembler instance
beliefengine : indra.belief.BeliefEngine
A BeliefEngine instance.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of unique statements.
|
def order_derived_parameters(component):
if len(component.derived_parameters) == 0:
return []
ordering = []
dps = []
for dp in component.derived_parameters:
dps.append(dp.name)
maxcount = 5
count = maxcount
while count > 0 and dps != []:
count = count - 1
for dp1 in dps:
value = component.derived_parameters[dp1].value
found = False
for dp2 in dps:
if dp1 != dp2 and dp2 in value:
found = True
if not found:
ordering.append(dp1)
del dps[dps.index(dp1)]
count = maxcount
break
if count == 0:
raise SimBuildError(("Unable to find ordering for derived "
"parameter in component '{0}'").format(component))
return ordering
|
Finds ordering of derived_parameters.
@param component: Component containing derived parameters.
@type component: lems.model.component.Component
@return: Returns ordered list of derived parameters.
@rtype: list(string)
@raise SimBuildError: Raised when a proper ordering of derived
parameters could not be found.
|
def main():
tar_file = TMPDIR + '/' + BINARY_URL.split('/')[-1]
chksum = TMPDIR + '/' + MD5_URL.split('/')[-1]
if precheck() and os_packages(distro.linux_distribution()):
stdout_message('begin download')
download()
stdout_message('begin valid_checksum')
valid_checksum(tar_file, chksum)
return compile_binary(tar_file)
logger.warning('%s: Pre-run dependency check fail - Exit' % inspect.stack()[0][3])
return False
|
Check Dependencies, download files, integrity check
|
def codingthreads(self):
printtime('Extracting CDS features', self.start)
for i in range(self.cpus):
threads = Thread(target=self.codingsequences, args=())
threads.setDaemon(True)
threads.start()
for sample in self.runmetadata.samples:
self.codingqueue.put(sample)
self.codingqueue.join()
self.corethreads()
|
Find CDS features in .gff files to filter out non-coding sequences from the analysis
|
def document(self):
doc = {'mode': self.__mongos_mode}
if self.__tag_sets not in (None, [{}]):
doc['tags'] = self.__tag_sets
if self.__max_staleness != -1:
doc['maxStalenessSeconds'] = self.__max_staleness
return doc
|
Read preference as a document.
|
def _prepare_data_dir(self, data):
logger.debug(__("Preparing data directory for Data with id {}.", data.id))
with transaction.atomic():
temporary_location_string = uuid.uuid4().hex[:10]
data_location = DataLocation.objects.create(subpath=temporary_location_string)
data_location.subpath = str(data_location.id)
data_location.save()
data_location.data.add(data)
output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath)
dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755)
os.mkdir(output_path, mode=dir_mode)
os.chmod(output_path, dir_mode)
return output_path
|
Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str
|
def orbit(self, x1_px, y1_px, x2_px, y2_px):
px_per_deg = self.vport_radius_px / float(self.orbit_speed)
radians_per_px = 1.0 / px_per_deg * np.pi / 180.0
t2p = self.position - self.target
M = Matrix4x4.rotation_around_origin((x1_px - x2_px) * radians_per_px,
self.ground)
t2p = M * t2p
self.up = M * self.up
right = (self.up ^ t2p).normalized()
M = Matrix4x4.rotation_around_origin((y1_px - y2_px) * radians_per_px,
right)
t2p = M * t2p
self.up = M * self.up
self.position = self.target + t2p
|
Causes the camera to "orbit" around the target point.
This is also called "tumbling" in some software packages.
|
def toggle(self):
for device in self:
if isinstance(device, (OutputDevice, CompositeOutputDevice)):
device.toggle()
|
Toggle all the output devices. For each device, if it's on, turn it
off; if it's off, turn it on.
|
def set_data(self, pos=None, color=None, width=None, connect=None):
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
|
Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths < 1 px will be rounded up
to 1 px when using the 'gl' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
|
def c2r(self):
return matrix(a=self.tt.__complex_op('M'), n=_np.concatenate(
(self.n, [2])), m=_np.concatenate((self.m, [2])))
|
Get real matrix from complex one suitable for solving complex linear system with real solver.
For matrix :math:`M(i_1,j_1,\\ldots,i_d,j_d) = \\Re M + i\\Im M` returns (d+1)-dimensional matrix
:math:`\\tilde{M}(i_1,j_1,\\ldots,i_d,j_d,i_{d+1},j_{d+1})` of form
:math:`\\begin{bmatrix}\\Re M & -\\Im M \\\\ \\Im M & \\Re M \\end{bmatrix}`. This function
is useful for solving complex linear system :math:`\\mathcal{A}X = B` with real solver by
transforming it into
.. math::
\\begin{bmatrix}\\Re\\mathcal{A} & -\\Im\\mathcal{A} \\\\
\\Im\\mathcal{A} & \\Re\\mathcal{A} \\end{bmatrix}
\\begin{bmatrix}\\Re X \\\\ \\Im X\\end{bmatrix} =
\\begin{bmatrix}\\Re B \\\\ \\Im B\\end{bmatrix}.
|
def contains_duplicates(values: Iterable[Any]) -> bool:
for v in Counter(values).values():
if v > 1:
return True
return False
|
Does the iterable contain any duplicate values?
|
def flatten_check(out:Tensor, targ:Tensor) -> Tensor:
"Check that `out` and `targ` have the same number of elements and flatten them."
out,targ = out.contiguous().view(-1),targ.contiguous().view(-1)
assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}."
return out,targ
|
Check that `out` and `targ` have the same number of elements and flatten them.
|
def secgroup_info(call=None, kwargs=None):
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
secgroup_id = kwargs.get('secgroup_id', None)
if secgroup_id:
if name:
log.warning(
'Both the \'secgroup_id\' and \'name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif name:
secgroup_id = get_secgroup_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The secgroup_info function requires either a name or a secgroup_id '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
info = {}
response = server.one.secgroup.info(auth, int(secgroup_id))[1]
tree = _get_xml(response)
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info
|
Retrieves information for the given security group. Either a name or a
secgroup_id must be supplied.
.. versionadded:: 2016.3.0
name
The name of the security group for which to gather information. Can be
used instead of ``secgroup_id``.
secgroup_id
The ID of the security group for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f secgroup_info opennebula name=my-secgroup
salt-cloud --function secgroup_info opennebula secgroup_id=5
|
def resize(att_mat, max_length=None):
for i, att in enumerate(att_mat):
if att.ndim == 3:
att = np.expand_dims(att, axis=0)
if max_length is not None:
att = att[:, :, :max_length, :max_length]
row_sums = np.sum(att, axis=2)
att /= row_sums[:, :, np.newaxis]
att_mat[i] = att
return att_mat
|
Normalize attention matrices and reshape as necessary.
|
def clean(self, elements):
cleanelements = []
for i in xrange(len(elements)):
if isempty(elements[i]):
return []
next = elements[i]
if isinstance(elements[i], (list, tuple)):
next = self.clean(elements[i])
if next:
cleanelements.append(elements[i])
return cleanelements
|
Removes empty or incomplete answers.
|
def ENUM(self, _cursor_type):
_decl = _cursor_type.get_declaration()
name = self.get_unique_name(_decl)
if self.is_registered(name):
obj = self.get_registered(name)
else:
log.warning('Was in ENUM but had to parse record declaration ')
obj = self.parse_cursor(_decl)
return obj
|
Handles ENUM typedef.
|
def query_by_assignment(self, assignment_id, end_time=None, start_time=None):
path = {}
data = {}
params = {}
path["assignment_id"] = assignment_id
if start_time is not None:
params["start_time"] = start_time
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/grade_change/assignments/{assignment_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/grade_change/assignments/{assignment_id}".format(**path), data=data, params=params, all_pages=True)
|
Query by assignment.
List grade change events for a given assignment.
|
def construct_sls_str(self, node):
obj = self.construct_scalar(node)
if six.PY2:
obj = obj.encode('utf-8')
return SLSString(obj)
|
Build the SLSString.
|
def ignore_nan_inf(kde_method):
def new_kde_method(events_x, events_y, xout=None, yout=None,
*args, **kwargs):
bad_in = get_bad_vals(events_x, events_y)
if xout is None:
density = np.zeros_like(events_x, dtype=float)
bad_out = bad_in
xo = yo = None
else:
density = np.zeros_like(xout, dtype=float)
bad_out = get_bad_vals(xout, yout)
xo = xout[~bad_out]
yo = yout[~bad_out]
ev_x = events_x[~bad_in]
ev_y = events_y[~bad_in]
density[~bad_out] = kde_method(ev_x, ev_y,
xo, yo,
*args, **kwargs)
density[bad_out] = np.nan
return density
doc_add = "\n Notes\n" +\
" -----\n" +\
" This is a wrapped version that ignores nan and inf values."
new_kde_method.__doc__ = kde_method.__doc__ + doc_add
return new_kde_method
|
Ignores nans and infs from the input data
Invalid positions in the resulting density are set to nan.
|
def emit(self, record):
if self.triggerLevelNo is not None and record.levelno>=self.triggerLevelNo:
self.triggered = True
logging.handlers.BufferingHandler.emit(self,record)
|
Emit record after checking if message triggers later sending of e-mail.
|
def export_to_json(self):
return {
hostname: sorted(self._encode_key(key) for key in pins)
for hostname, pins in self._storage.items()
}
|
Return a JSON dictionary which contains all the pins stored in this
store.
|
def cd(path_to):
if path_to == '-':
if not cd.previous:
raise PathError('No previous directory to return to')
return cd(cd.previous)
if not hasattr(path_to, 'cd'):
path_to = makepath(path_to)
try:
previous = os.getcwd()
except OSError as e:
if 'No such file or directory' in str(e):
return False
raise
if path_to.isdir():
os.chdir(path_to)
elif path_to.isfile():
os.chdir(path_to.parent)
elif not os.path.exists(path_to):
return False
else:
raise PathError('Cannot cd to %s' % path_to)
cd.previous = previous
return True
|
cd to the given path
If the path is a file, then cd to its parent directory
Remember current directory before the cd
so that we can cd back there with cd('-')
|
def get_inline_expression(self, text):
text = text.strip()
if not text.startswith(self.inline_tags[0]) or not text.endswith(self.inline_tags[1]):
return
return text[2:-2]
|
Extract an inline expression from the given text.
|
def clean_zeros(a, b, M):
M2 = M[a > 0, :][:, b > 0].copy()
a2 = a[a > 0]
b2 = b[b > 0]
return a2, b2, M2
|
Remove all components with zeros weights in a and b
|
def findLeftBrace(self, block, column):
block, column = self.findBracketBackward(block, column, '{')
try:
block, column = self.tryParenthesisBeforeBrace(block, column)
except ValueError:
pass
return self._blockIndent(block)
|
Search for a corresponding '{' and return its indentation
If not found return None
|
def peek_string(self, lpBaseAddress, fUnicode = False, dwMaxSize = 0x1000):
if not lpBaseAddress or dwMaxSize == 0:
if fUnicode:
return u''
return ''
if not dwMaxSize:
dwMaxSize = 0x1000
szString = self.peek(lpBaseAddress, dwMaxSize)
if fUnicode:
szString = compat.unicode(szString, 'U16', 'replace')
szString = szString[ : szString.find(u'\0') ]
else:
szString = szString[ : szString.find('\0') ]
return szString
|
Tries to read an ASCII or Unicode string
from the address space of the process.
@see: L{read_string}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type fUnicode: bool
@param fUnicode: C{True} is the string is expected to be Unicode,
C{False} if it's expected to be ANSI.
@type dwMaxSize: int
@param dwMaxSize: Maximum allowed string length to read, in bytes.
@rtype: str, compat.unicode
@return: String read from the process memory space.
It B{doesn't} include the terminating null character.
Returns an empty string on failure.
|
def get_library_name():
from os.path import split, abspath
__lib_name = split(split(abspath(sys.modules[__name__].__file__))[0])[1]
assert __lib_name in ["sframe", "turicreate"]
return __lib_name
|
Returns either sframe or turicreate depending on which library
this file is bundled with.
|
def save(self, obj, id_code):
filestream = open('{0}/{1}'.format(self.data_path, id_code), 'w+')
pickle.dump(obj, filestream)
filestream.close()
|
Save an object, and use id_code in the filename
obj - any object
id_code - unique identifier
|
def _get_body(self):
" Return the Container object for the current CLI. "
new_hash = self.pymux.arrangement.invalidation_hash()
app = get_app()
if app in self._bodies_for_app:
existing_hash, container = self._bodies_for_app[app]
if existing_hash == new_hash:
return container
new_layout = self._build_layout()
self._bodies_for_app[app] = (new_hash, new_layout)
return new_layout
|
Return the Container object for the current CLI.
|
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
|
Logs the time spent while running the task.
|
def GetDefault(self, fd=None, default=None):
if callable(self.default):
return self.default(fd)
if self.default is not None:
if isinstance(self.default, rdfvalue.RDFValue):
default = self.default.Copy()
default.attribute_instance = self
return self(default)
else:
return self(self.default)
if isinstance(default, rdfvalue.RDFValue):
default = default.Copy()
default.attribute_instance = self
return default
|
Returns a default attribute if it is not set.
|
def _exception_free_callback(self, callback, *args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception:
self._logger.exception("An exception occurred while calling a hook! ",exc_info=True)
return None
|
A wrapper that remove all exceptions raised from hooks
|
def to_dict(self, xml):
children = list(xml)
if not children:
return xml.text
else:
out = {}
for node in list(xml):
if node.tag in out:
if not isinstance(out[node.tag], list):
out[node.tag] = [out[node.tag]]
out[node.tag].append(self.to_dict(node))
else:
out[node.tag] = self.to_dict(node)
return out
|
Convert XML structure to dict recursively, repeated keys
entries are returned as in list containers.
|
def copy_shell(self):
cls = self.__class__
new_i = cls()
new_i.uuid = self.uuid
for prop in cls.properties:
if hasattr(self, prop):
if prop in ['members', 'unknown_members']:
setattr(new_i, prop, [])
else:
setattr(new_i, prop, getattr(self, prop))
return new_i
|
Copy the group properties EXCEPT the members.
Members need to be filled after manually
:return: Itemgroup object
:rtype: alignak.objects.itemgroup.Itemgroup
:return: None
|
def timed_call(self, ms, callback, *args, **kwargs):
return self.loop.timed_call(ms, callback, *args, **kwargs)
|
Invoke a callable on the main event loop thread at a
specified time in the future.
Parameters
----------
ms : int
The time to delay, in milliseconds, before executing the
callable.
callback : callable
The callable object to execute at some point in the future.
*args, **kwargs
Any additional positional and keyword arguments to pass to
the callback.
|
def doUpdate(self, timeout=1):
namespace = Fritz.getServiceType("doUpdate")
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "X_AVM-DE_DoUpdate", timeout=timeout)
return results["NewUpgradeAvailable"], results["NewX_AVM-DE_UpdateState"]
|
Do a software update of the Fritz Box if available.
:param float timeout: the timeout to wait for the action to be executed
:return: a list of if an update was available and the update state (bool, str)
:rtype: tuple(bool, str)
|
def get_version(path="src/devpy/__init__.py"):
init_content = open(path, "rt").read()
pattern = r"^__version__ = ['\"]([^'\"]*)['\"]"
return re.search(pattern, init_content, re.M).group(1)
|
Return the version of by with regex intead of importing it
|
def qd2apex(self, qlat, qlon, height):
alat, alon = self._qd2apex(qlat, qlon, height)
return np.float64(alat), np.float64(alon)
|
Converts quasi-dipole to modified apex coordinates.
Parameters
==========
qlat : array_like
Quasi-dipole latitude
qlon : array_like
Quasi-dipole longitude
height : array_like
Altitude in km
Returns
=======
alat : ndarray or float
Modified apex latitude
alon : ndarray or float
Modified apex longitude
Raises
======
ApexHeightError
if apex height < reference height
|
def db_for_read(self, model, **hints):
if model._meta.app_label in self._apps:
return getattr(model, '_db_alias', model._meta.app_label)
return None
|
If the app has its own database, use it for reads
|
def _fill_vao(self):
with self.vao:
self.vbos = []
for loc, verts in enumerate(self.arrays):
vbo = VBO(verts)
self.vbos.append(vbo)
self.vao.assign_vertex_attrib_location(vbo, loc)
|
Put array location in VAO for shader in same order as arrays given to Mesh.
|
def configure_urls(apps, index_view=None, prefixes=None):
prefixes = prefixes or {}
urlpatterns = patterns('')
if index_view:
from django.views.generic.base import RedirectView
urlpatterns += patterns('',
url(r'^$', RedirectView.as_view(pattern_name=index_view, permanent=False)),
)
for app_name in apps:
app_module = importlib.import_module(app_name)
if module_has_submodule(app_module, 'urls'):
module = importlib.import_module("%s.urls" % app_name)
if not hasattr(module, 'urlpatterns'):
continue
app_prefix = prefixes.get(app_name, app_name.replace("_","-"))
urlpatterns += patterns(
'',
url(
r'^%s/' % app_prefix if app_prefix else '',
include("%s.urls" % app_name),
),
)
return urlpatterns
|
Configure urls from a list of apps.
|
def get_proficiency_search_session(self, proxy):
if not self.supports_proficiency_search():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ProficiencySearchSession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the ``OsidSession`` associated with the proficiency search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencySearchSession``
:rtype: ``osid.learning.ProficiencySearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_search()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_search()`` is ``true``.*
|
def popenCLIExecutable(command, **kwargs):
cliExecutable = command[0]
ma = re_slicerSubPath.search(cliExecutable)
if ma:
wrapper = os.path.join(cliExecutable[:ma.start()], 'Slicer')
if sys.platform.startswith('win'):
wrapper += '.exe'
if os.path.exists(wrapper):
command = [wrapper, '--launcher-no-splash', '--launch'] + command
return subprocess.Popen(command, **kwargs)
|
Wrapper around subprocess.Popen constructor that tries to
detect Slicer CLI modules and launches them through the Slicer
launcher in order to prevent potential DLL dependency issues.
Any kwargs are passed on to subprocess.Popen().
If you ever try to use this function to run a CLI, you might want to
take a look at
https://github.com/hmeine/MeVisLab-CLI/blob/master/Modules/Macros/CTK_CLI/CLIModuleBackend.py
(in particular, the CLIExecution class.)
Ideally, more of that code would be extracted and moved here, but
I have not gotten around to doing that yet.
|
def _integer_to_interval(arg, unit='s'):
op = ops.IntervalFromInteger(arg, unit)
return op.to_expr()
|
Convert integer interval with the same inner type
Parameters
----------
unit : {'Y', 'M', 'W', 'D', 'h', 'm', s', 'ms', 'us', 'ns'}
Returns
-------
interval : interval value expression
|
def _plot(self):
for serie in self.series[::-1 if self.stack_from_top else 1]:
self.bar(serie)
for serie in self.secondary_series[::-1 if self.stack_from_top else 1]:
self.bar(serie, True)
|
Draw bars for series and secondary series
|
def stopDtmfAcknowledge():
a = TpPd(pd=0x3)
b = MessageType(mesType=0x32)
packet = a / b
return packet
|
STOP DTMF ACKNOWLEDGE Section 9.3.30
|
def create_model(schema, collection, class_name=None):
if not class_name:
class_name = camelize(str(collection.name))
model_class = type(class_name,
(Model,),
dict(schema=schema, _collection_factory=staticmethod(lambda: collection)))
model_class.__module__ = _module_name_from_previous_frame(1)
return model_class
|
Main entry point to creating a new mongothon model. Both
schema and Pymongo collection objects must be provided.
Returns a new class which can be used as a model class.
The class name of the model class by default is inferred
from the provided collection (converted to camel case).
Optionally, a class_name argument can be provided to
override this.
|
def is_applicable_python_file(rel_path: str) -> bool:
return (rel_path.endswith('.py') and
not any(re.search(pat, rel_path) for pat in IGNORED_FILE_PATTERNS))
|
Determines if a file should be included in incremental coverage analysis.
Args:
rel_path: The repo-relative file path being considered.
Returns:
Whether to include the file.
|
def add_general_optgroup(parser):
g = parser.add_argument_group("General Options")
g.add_argument("-q", "--quiet", dest="silent",
action="store_true", default=False)
g.add_argument("-v", "--verbose", nargs=0, action=_opt_cb_verbose)
g.add_argument("-o", "--output", dest="output", default=None)
g.add_argument("-j", "--json", dest="json",
action="store_true", default=False)
g.add_argument("--show-ignored", action="store_true", default=False)
g.add_argument("--show-unchanged", action="store_true", default=False)
g.add_argument("--ignore", action=_opt_cb_ignore,
help="comma-separated list of ignores")
|
option group for general-use features of all javatool CLIs
|
def RegisterMountPoint(cls, mount_point, path_spec):
if mount_point in cls._mount_points:
raise KeyError('Mount point: {0:s} already set.'.format(mount_point))
cls._mount_points[mount_point] = path_spec
|
Registers a path specification mount point.
Args:
mount_point (str): mount point identifier.
path_spec (PathSpec): path specification of the mount point.
Raises:
KeyError: if the corresponding mount point is already set.
|
def objref(obj):
ref = _objrefs.get(obj)
if ref is None:
clsname = obj.__class__.__name__.split('.')[-1]
seqno = _lastids.setdefault(clsname, 1)
ref = '{}-{}'.format(clsname, seqno)
_objrefs[obj] = ref
_lastids[clsname] += 1
return ref
|
Return a string that uniquely and compactly identifies an object.
|
def _get_norms_of_rows(data_frame, method):
if method == 'vector':
norm_vector = np.linalg.norm(data_frame.values, axis=1)
elif method == 'last':
norm_vector = data_frame.iloc[:, -1].values
elif method == 'mean':
norm_vector = np.mean(data_frame.values, axis=1)
elif method == 'first':
norm_vector = data_frame.iloc[:, 0].values
else:
raise ValueError("no normalization method '{0}'".format(method))
return norm_vector
|
return a column vector containing the norm of each row
|
def date_sorted_sources(*sources):
sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources))
for _, message in sorted_stream:
yield message
|
Takes an iterable of sources, generating namestrings and
piping their output into date_sort.
|
def edit(self,
billing_email=None,
company=None,
email=None,
location=None,
name=None):
json = None
data = {'billing_email': billing_email, 'company': company,
'email': email, 'location': location, 'name': name}
self._remove_none(data)
if data:
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False
|
Edit this organization.
:param str billing_email: (optional) Billing email address (private)
:param str company: (optional)
:param str email: (optional) Public email address
:param str location: (optional)
:param str name: (optional)
:returns: bool
|
def compression_details(self):
event_type = self.findtext("event_type")
if event_type != "compression":
raise AttributeError(
'PREMIS events of type "{}" have no compression'
" details".format(event_type)
)
parsed_compression_event_detail = self.parsed_event_detail
compression_program = _get_event_detail_attr(
"program", parsed_compression_event_detail
)
compression_algorithm = _get_event_detail_attr(
"algorithm", parsed_compression_event_detail
)
compression_program_version = _get_event_detail_attr(
"version", parsed_compression_event_detail
)
archive_tool = {"7z": "7-Zip"}.get(compression_program, compression_program)
return compression_algorithm, compression_program_version, archive_tool
|
Return as a 3-tuple, this PREMIS compression event's program,
version, and algorithm used to perform the compression.
|
def remove_redis_keyword(self, keyword):
redisvr.srem(CMS_CFG['redis_kw'] + self.userinfo.user_name, keyword)
return json.dump({}, self)
|
Remove the keyword for redis.
|
def open(self, target_uri, **kwargs):
target = urlsplit(target_uri, scheme=self.default_opener)
opener = self.get_opener(target.scheme)
query = opener.conform_query(target.query)
target = opener.get_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
**kwargs
)
target.opener_path = target_uri
return target
|
Open target uri.
:param target_uri: Uri to open
:type target_uri: string
:returns: Target object
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.