repository_name stringlengths 7 55 | func_path_in_repository stringlengths 4 223 | func_name stringlengths 1 134 | whole_func_string stringlengths 75 104k | language stringclasses 1
value | func_code_string stringlengths 75 104k | func_code_tokens listlengths 19 28.4k | func_documentation_string stringlengths 1 46.9k | func_documentation_tokens listlengths 1 1.97k | split_name stringclasses 1
value | func_code_url stringlengths 87 315 |
|---|---|---|---|---|---|---|---|---|---|---|
RI-imaging/nrefocus | nrefocus/_autofocus.py | autofocus | def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", padding=True,
ret_d=False, ret_grad=False, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Co... | python | def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", padding=True,
ret_d=False, ret_grad=False, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Co... | [
"def",
"autofocus",
"(",
"field",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"metric",
"=",
"\"average gradient\"",
",",
"padding",
"=",
"True",
",",
"ret_d",
"=",
"False",
",",
"ret_grad",
"=",
"False",
",",
"num_cpus",
"=",
"... | Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
... | [
"Numerical",
"autofocusing",
"of",
"a",
"field",
"using",
"the",
"Helmholtz",
"equation",
"."
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L19-L83 |
RI-imaging/nrefocus | nrefocus/_autofocus.py | autofocus_stack | def autofocus_stack(fieldstack, nm, res, ival, roi=None,
metric="average gradient", padding=True,
same_dist=False, ret_ds=False, ret_grads=False,
num_cpus=_cpu_count, copy=True):
"""Numerical autofocusing of a stack using the Helmholtz equation.
Para... | python | def autofocus_stack(fieldstack, nm, res, ival, roi=None,
metric="average gradient", padding=True,
same_dist=False, ret_ds=False, ret_grads=False,
num_cpus=_cpu_count, copy=True):
"""Numerical autofocusing of a stack using the Helmholtz equation.
Para... | [
"def",
"autofocus_stack",
"(",
"fieldstack",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"metric",
"=",
"\"average gradient\"",
",",
"padding",
"=",
"True",
",",
"same_dist",
"=",
"False",
",",
"ret_ds",
"=",
"False",
",",
"ret_grad... | Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of float... | [
"Numerical",
"autofocusing",
"of",
"a",
"stack",
"using",
"the",
"Helmholtz",
"equation",
"."
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L86-L175 |
RI-imaging/nrefocus | nrefocus/_autofocus.py | minimize_metric | def minimize_metric(field, metric_func, nm, res, ival, roi=None,
coarse_acc=1, fine_acc=.005,
return_gradient=True, padding=True):
"""Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_fu... | python | def minimize_metric(field, metric_func, nm, res, ival, roi=None,
coarse_acc=1, fine_acc=.005,
return_gradient=True, padding=True):
"""Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_fu... | [
"def",
"minimize_metric",
"(",
"field",
",",
"metric_func",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"coarse_acc",
"=",
"1",
",",
"fine_acc",
"=",
".005",
",",
"return_gradient",
"=",
"True",
",",
"padding",
"=",
"True",
")",
... | Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
re... | [
"Find",
"the",
"focus",
"by",
"minimizing",
"the",
"metric",
"of",
"an",
"image"
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L178-L297 |
anteater/anteater | anteater/src/project_scan.py | prepare_project | def prepare_project(project, project_dir, binaries, ips, urls):
"""
Generates blacklists / whitelists
"""
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(proje... | python | def prepare_project(project, project_dir, binaries, ips, urls):
"""
Generates blacklists / whitelists
"""
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(proje... | [
"def",
"prepare_project",
"(",
"project",
",",
"project_dir",
",",
"binaries",
",",
"ips",
",",
"urls",
")",
":",
"# Get Various Lists / Project Waivers",
"lists",
"=",
"get_lists",
".",
"GetLists",
"(",
")",
"# Get file name black list and project waivers",
"file_audit... | Generates blacklists / whitelists | [
"Generates",
"blacklists",
"/",
"whitelists"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L41-L89 |
anteater/anteater | anteater/src/project_scan.py | scan_file | def scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey):
"""
Main scan tasks begin
"""
logger.info("Commencing scan tasks..")
for r... | python | def scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey):
"""
Main scan tasks begin
"""
logger.info("Commencing scan tasks..")
for r... | [
"def",
"scan_file",
"(",
"project",
",",
"project_dir",
",",
"binaries",
",",
"ips",
",",
"urls",
",",
"file_audit_list",
",",
"file_audit_project_list",
",",
"flag_list",
",",
"ignore_list",
",",
"hashlist",
",",
"file_ignore",
",",
"ignore_directories",
",",
"... | Main scan tasks begin | [
"Main",
"scan",
"tasks",
"begin"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L92-L183 |
anteater/anteater | anteater/src/project_scan.py | scan_ipaddr | def scan_ipaddr(ipaddr, line, project, split_path, apikey):
"""
If an IP Address is found, scan it
"""
logger.info('Found what I believe is an IP Address: %s', line.strip())
logger.info('File %s. Parsed IP Address: %s', split_path, ipaddr)
with open(reports_dir + "ips-" + project + ".log", "a") ... | python | def scan_ipaddr(ipaddr, line, project, split_path, apikey):
"""
If an IP Address is found, scan it
"""
logger.info('Found what I believe is an IP Address: %s', line.strip())
logger.info('File %s. Parsed IP Address: %s', split_path, ipaddr)
with open(reports_dir + "ips-" + project + ".log", "a") ... | [
"def",
"scan_ipaddr",
"(",
"ipaddr",
",",
"line",
",",
"project",
",",
"split_path",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"'Found what I believe is an IP Address: %s'",
",",
"line",
".",
"strip",
"(",
")",
")",
"logger",
".",
"info",
"(",
"... | If an IP Address is found, scan it | [
"If",
"an",
"IP",
"Address",
"is",
"found",
"scan",
"it"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L252-L276 |
anteater/anteater | anteater/src/project_scan.py | scan_url | def scan_url(url, line, project, split_path, apikey):
"""
If URL is found, scan it
"""
logger.info('File %s contains what I believe is a URL: %s', split_path, line.strip())
logger.info('Scanning: %s', url)
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
gate_repo... | python | def scan_url(url, line, project, split_path, apikey):
"""
If URL is found, scan it
"""
logger.info('File %s contains what I believe is a URL: %s', split_path, line.strip())
logger.info('Scanning: %s', url)
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
gate_repo... | [
"def",
"scan_url",
"(",
"url",
",",
"line",
",",
"project",
",",
"split_path",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"'File %s contains what I believe is a URL: %s'",
",",
"split_path",
",",
"line",
".",
"strip",
"(",
")",
")",
"logger",
".",
... | If URL is found, scan it | [
"If",
"URL",
"is",
"found",
"scan",
"it"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L279-L327 |
ttinies/sc2gameMapRepo | sc2maptool/index.py | getIndex | def getIndex(folderPath=None):
"""parse the 'Maps' subfolder directory divining criteria for valid maps"""
try: return cache.structure
except AttributeError: pass # if it doesn't exist, generate and cache the map file data
if folderPath == None:
from sc2maptool.startup import setup
fo... | python | def getIndex(folderPath=None):
"""parse the 'Maps' subfolder directory divining criteria for valid maps"""
try: return cache.structure
except AttributeError: pass # if it doesn't exist, generate and cache the map file data
if folderPath == None:
from sc2maptool.startup import setup
fo... | [
"def",
"getIndex",
"(",
"folderPath",
"=",
"None",
")",
":",
"try",
":",
"return",
"cache",
".",
"structure",
"except",
"AttributeError",
":",
"pass",
"# if it doesn't exist, generate and cache the map file data",
"if",
"folderPath",
"==",
"None",
":",
"from",
"sc2m... | parse the 'Maps' subfolder directory divining criteria for valid maps | [
"parse",
"the",
"Maps",
"subfolder",
"directory",
"divining",
"criteria",
"for",
"valid",
"maps"
] | train | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/index.py#L16-L34 |
lablup/backend.ai-common | src/ai/backend/common/types.py | _stringify_number | def _stringify_number(v):
'''
Stringify a number, preventing unwanted scientific notations.
'''
if isinstance(v, (float, Decimal)):
if math.isinf(v) and v > 0:
v = 'Infinity'
elif math.isinf(v) and v < 0:
v = '-Infinity'
else:
v = '{:f}'.format... | python | def _stringify_number(v):
'''
Stringify a number, preventing unwanted scientific notations.
'''
if isinstance(v, (float, Decimal)):
if math.isinf(v) and v > 0:
v = 'Infinity'
elif math.isinf(v) and v < 0:
v = '-Infinity'
else:
v = '{:f}'.format... | [
"def",
"_stringify_number",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"(",
"float",
",",
"Decimal",
")",
")",
":",
"if",
"math",
".",
"isinf",
"(",
"v",
")",
"and",
"v",
">",
"0",
":",
"v",
"=",
"'Infinity'",
"elif",
"math",
".",
"... | Stringify a number, preventing unwanted scientific notations. | [
"Stringify",
"a",
"number",
"preventing",
"unwanted",
"scientific",
"notations",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/types.py#L692-L709 |
lablup/backend.ai-common | src/ai/backend/common/types.py | ImageRef.resolve_alias | async def resolve_alias(cls, alias_key: str, etcd: etcd.AsyncEtcd):
'''
Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account.
'''
alias_target = Non... | python | async def resolve_alias(cls, alias_key: str, etcd: etcd.AsyncEtcd):
'''
Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account.
'''
alias_target = Non... | [
"async",
"def",
"resolve_alias",
"(",
"cls",
",",
"alias_key",
":",
"str",
",",
"etcd",
":",
"etcd",
".",
"AsyncEtcd",
")",
":",
"alias_target",
"=",
"None",
"repeats",
"=",
"0",
"while",
"repeats",
"<",
"8",
":",
"prev_alias_key",
"=",
"alias_key",
"ali... | Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account. | [
"Resolve",
"the",
"tag",
"using",
"etcd",
"so",
"that",
"the",
"current",
"instance",
"indicates",
"a",
"concrete",
"latest",
"image",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/types.py#L249-L269 |
BlueBrain/nat | nat/ontoServ.py | getOntoCategory | def getOntoCategory(curie, alwaysFetch=False):
"""
Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached.
"""
fileName = os.path.join(os.path.dirname(__file__), "ontoCategories.... | python | def getOntoCategory(curie, alwaysFetch=False):
"""
Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached.
"""
fileName = os.path.join(os.path.dirname(__file__), "ontoCategories.... | [
"def",
"getOntoCategory",
"(",
"curie",
",",
"alwaysFetch",
"=",
"False",
")",
":",
"fileName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"ontoCategories.bin\"",
")",
"if",
"not",
"alwaysFet... | Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached. | [
"Accessing",
"web",
"-",
"based",
"ontology",
"service",
"is",
"too",
"long",
"so",
"we",
"cache",
"the",
"information",
"in",
"a",
"pickle",
"file",
"and",
"query",
"the",
"services",
"only",
"if",
"the",
"info",
"has",
"not",
"already",
"been",
"cached",... | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/ontoServ.py#L16-L60 |
anteater/anteater | anteater/main.py | _init_logging | def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(loggi... | python | def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(loggi... | [
"def",
"_init_logging",
"(",
"anteater_log",
")",
":",
"LOG",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"ch",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s - %(name)s - '",
"'%(level... | Setup root logger for package | [
"Setup",
"root",
"logger",
"for",
"package"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L43-L66 |
anteater/anteater | anteater/main.py | check_dir | def check_dir():
""" Creates a directory for scan reports """
try:
os.makedirs(reports_dir)
logger.info('Creating reports directory: %s', reports_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise | python | def check_dir():
""" Creates a directory for scan reports """
try:
os.makedirs(reports_dir)
logger.info('Creating reports directory: %s', reports_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise | [
"def",
"check_dir",
"(",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"reports_dir",
")",
"logger",
".",
"info",
"(",
"'Creating reports directory: %s'",
",",
"reports_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"e... | Creates a directory for scan reports | [
"Creates",
"a",
"directory",
"for",
"scan",
"reports"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L69-L76 |
anteater/anteater | anteater/main.py | main | def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
... | python | def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
... | [
"def",
"main",
"(",
")",
":",
"_init_logging",
"(",
"config",
".",
"get",
"(",
"'config'",
",",
"'anteater_log'",
")",
")",
"check_dir",
"(",
")",
"arguments",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"__version__",
")",
"if",
"arguments",
"["... | Main function, mostly for passing arguments | [
"Main",
"function",
"mostly",
"for",
"passing",
"arguments"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L79-L90 |
hammerlab/stanity | stanity/fit.py | fit | def fit(model_code, *args, **kwargs):
"""
Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
... | python | def fit(model_code, *args, **kwargs):
"""
Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
... | [
"def",
"fit",
"(",
"model_code",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"dict",
"(",
"kwargs",
")",
"kwargs",
"[",
"'model_code'",
"]",
"=",
"model_code",
"if",
"'n_jobs'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'n_j... | Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
mo... | [
"Fit",
"a",
"Stan",
"model",
".",
"Caches",
"the",
"compiled",
"model",
"."
] | train | https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/fit.py#L6-L39 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.enumiter | def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
... | python | def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
... | [
"def",
"enumiter",
"(",
"self",
",",
"other",
",",
"rmax",
",",
"bunch",
"=",
"100000",
")",
":",
"def",
"feeder",
"(",
"process",
")",
":",
"self",
".",
"enum",
"(",
"other",
",",
"rmax",
",",
"process",
",",
"bunch",
")",
"for",
"r",
",",
"i",
... | cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum. | [
"cross",
"correlate",
"with",
"other",
"for",
"all",
"pairs",
"closer",
"than",
"rmax",
"iterate",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L13-L28 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.enum | def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where... | python | def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where... | [
"def",
"enum",
"(",
"self",
",",
"other",
",",
"rmax",
",",
"process",
"=",
"None",
",",
"bunch",
"=",
"100000",
",",
"*",
"*",
"kwargs",
")",
":",
"rall",
"=",
"None",
"if",
"process",
"is",
"None",
":",
"rall",
"=",
"[",
"numpy",
".",
"empty",
... | cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of t... | [
"cross",
"correlate",
"with",
"other",
"for",
"all",
"pairs",
"closer",
"than",
"rmax",
"iterate",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L30-L59 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.count | def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
... | python | def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
... | [
"def",
"count",
"(",
"self",
",",
"other",
",",
"r",
",",
"attrs",
"=",
"None",
",",
"info",
"=",
"{",
"}",
")",
":",
"r",
"=",
"numpy",
".",
"array",
"(",
"r",
",",
"dtype",
"=",
"'f8'",
")",
"return",
"_core",
".",
"KDNode",
".",
"count",
"... | Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None | [
"Gray",
"&",
"Moore",
"based",
"fast",
"dual",
"tree",
"counting",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L61-L77 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.fof | def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | python | def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | [
"def",
"fof",
"(",
"self",
",",
"linkinglength",
",",
"out",
"=",
"None",
",",
"method",
"=",
"'splay'",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"numpy",
".",
"empty",
"(",
"self",
".",
"size",
",",
"dtype",
"=",
"'intp'",
")",
"ret... | Friend-of-Friend clustering with linking length.
Returns: the label | [
"Friend",
"-",
"of",
"-",
"Friend",
"clustering",
"with",
"linking",
"length",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L79-L86 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.integrate | def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
... | python | def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
... | [
"def",
"integrate",
"(",
"self",
",",
"min",
",",
"max",
",",
"attr",
"=",
"None",
",",
"info",
"=",
"{",
"}",
")",
":",
"if",
"numpy",
".",
"isscalar",
"(",
"min",
")",
":",
"min",
"=",
"[",
"min",
"for",
"i",
"in",
"range",
"(",
"self",
"."... | Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points. | [
"Calculate",
"the",
"total",
"number",
"of",
"points",
"between",
"[",
"min",
"max",
")",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L88-L110 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.make_forest | def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
... | python | def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
... | [
"def",
"make_forest",
"(",
"self",
",",
"chunksize",
")",
":",
"heap",
"=",
"[",
"]",
"heappush",
"(",
"heap",
",",
"(",
"-",
"self",
".",
"size",
",",
"self",
")",
")",
"while",
"True",
":",
"w",
",",
"x",
"=",
"heappop",
"(",
"heap",
")",
"if... | Divide a tree branch to a forest,
each subtree of size at most chunksize | [
"Divide",
"a",
"tree",
"branch",
"to",
"a",
"forest",
"each",
"subtree",
"of",
"size",
"at",
"most",
"chunksize"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L112-L129 |
Numigi/gitoo | src/cli.py | _install_one | def _install_one(
repo_url, branch, destination, commit='', patches=None,
exclude_modules=None, include_modules=None, base=False, work_directory=''
):
""" Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to c... | python | def _install_one(
repo_url, branch, destination, commit='', patches=None,
exclude_modules=None, include_modules=None, base=False, work_directory=''
):
""" Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to c... | [
"def",
"_install_one",
"(",
"repo_url",
",",
"branch",
",",
"destination",
",",
"commit",
"=",
"''",
",",
"patches",
"=",
"None",
",",
"exclude_modules",
"=",
"None",
",",
"include_modules",
"=",
"None",
",",
"base",
"=",
"False",
",",
"work_directory",
"=... | Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to checkout.
:param string destination: the folder where the add-on should end up at.
:param string commit: Optional commit rev to checkout to. If mentioned, that ... | [
"Install",
"a",
"third",
"party",
"odoo",
"add",
"-",
"on"
] | train | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/cli.py#L43-L66 |
Numigi/gitoo | src/cli.py | _install_all | def _install_all(destination='', conf_file=''):
"""Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param strin... | python | def _install_all(destination='', conf_file=''):
"""Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param strin... | [
"def",
"_install_all",
"(",
"destination",
"=",
"''",
",",
"conf_file",
"=",
"''",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"destination",
"=",
"destination",
"or"... | Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-... | [
"Use",
"the",
"conf",
"file",
"to",
"list",
"all",
"the",
"third",
"party",
"Odoo",
"add",
"-",
"ons",
"that",
"will",
"be",
"installed",
"and",
"the",
"patches",
"that",
"should",
"be",
"applied",
"."
] | train | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/cli.py#L69-L96 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | find_lt | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | python | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | [
"def",
"find_lt",
"(",
"a",
",",
"x",
")",
":",
"i",
"=",
"bisect",
".",
"bisect_left",
"(",
"a",
",",
"x",
")",
"if",
"i",
":",
"return",
"a",
"[",
"i",
"-",
"1",
"]",
"raise",
"ValueError"
] | Find rightmost value less than x | [
"Find",
"rightmost",
"value",
"less",
"than",
"x"
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L36-L41 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | parse | def parse(isatab_ref):
"""Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file.
"""
if os.path.isdir(isatab_ref):
fnames = glob.glob(os.pat... | python | def parse(isatab_ref):
"""Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file.
"""
if os.path.isdir(isatab_ref):
fnames = glob.glob(os.pat... | [
"def",
"parse",
"(",
"isatab_ref",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"isatab_ref",
")",
":",
"fnames",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"isatab_ref",
",",
"\"i_*.txt\"",
")",
")",
"+",
"glob",
... | Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file. | [
"Entry",
"point",
"to",
"parse",
"an",
"ISA",
"-",
"Tab",
"directory",
".",
"isatab_ref",
"can",
"point",
"to",
"a",
"directory",
"of",
"ISA",
"-",
"Tab",
"data",
"in",
"which",
"case",
"we",
"search",
"for",
"the",
"investigator",
"file",
"or",
"be",
... | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L51-L68 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._parse_region | def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] !=... | python | def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] !=... | [
"def",
"_parse_region",
"(",
"self",
",",
"rec",
",",
"line_iter",
")",
":",
"had_info",
"=",
"False",
"keyvals",
",",
"section",
"=",
"self",
".",
"_parse_keyvals",
"(",
"line_iter",
")",
"if",
"keyvals",
":",
"rec",
".",
"metadata",
"=",
"keyvals",
"["... | Parse a section of an ISA-Tab, assigning information to a supplied record. | [
"Parse",
"a",
"section",
"of",
"an",
"ISA",
"-",
"Tab",
"assigning",
"information",
"to",
"a",
"supplied",
"record",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L109-L129 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._line_iter | def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a singl... | python | def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a singl... | [
"def",
"_line_iter",
"(",
"self",
",",
"in_handle",
")",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"in_handle",
",",
"dialect",
"=",
"\"excel-tab\"",
")",
"for",
"line",
"in",
"reader",
":",
"if",
"len",
"(",
"line",
")",
">",
"0",
"and",
"line"... | Read tab delimited file, handling ISA-Tab special case headers. | [
"Read",
"tab",
"delimited",
"file",
"handling",
"ISA",
"-",
"Tab",
"special",
"case",
"headers",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L131-L140 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._parse_keyvals | def _parse_keyvals(self, line_iter):
"""Generate dictionary from key/value pairs.
"""
out = None
line = None
for line in line_iter:
if len(line) == 1 and line[0].upper() == line[0]:
break
else:
# setup output dictionaries, t... | python | def _parse_keyvals(self, line_iter):
"""Generate dictionary from key/value pairs.
"""
out = None
line = None
for line in line_iter:
if len(line) == 1 and line[0].upper() == line[0]:
break
else:
# setup output dictionaries, t... | [
"def",
"_parse_keyvals",
"(",
"self",
",",
"line_iter",
")",
":",
"out",
"=",
"None",
"line",
"=",
"None",
"for",
"line",
"in",
"line_iter",
":",
"if",
"len",
"(",
"line",
")",
"==",
"1",
"and",
"line",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"=="... | Generate dictionary from key/value pairs. | [
"Generate",
"dictionary",
"from",
"key",
"/",
"value",
"pairs",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L142-L162 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser.parse | def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name... | python | def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name... | [
"def",
"parse",
"(",
"self",
",",
"rec",
")",
":",
"final_studies",
"=",
"[",
"]",
"for",
"study",
"in",
"rec",
".",
"studies",
":",
"source_data",
"=",
"self",
".",
"_parse_study",
"(",
"study",
".",
"metadata",
"[",
"\"Study File Name\"",
"]",
",",
"... | Retrieve row data from files associated with the ISATabRecord. | [
"Retrieve",
"row",
"data",
"from",
"files",
"associated",
"with",
"the",
"ISATabRecord",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L193-L216 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._parse_study | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
... | python | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
... | [
"def",
"_parse_study",
"(",
"self",
",",
"fname",
",",
"node_types",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dir",
",",
"fname",
")",
")",
":",
"return",
"None",
"nodes",
"... | Parse study or assay row oriented file around the supplied base node. | [
"Parse",
"study",
"or",
"assay",
"row",
"oriented",
"file",
"around",
"the",
"supplied",
"base",
"node",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L292-L335 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._finalize_metadata | def _finalize_metadata(self, node):
"""Convert node metadata back into a standard dictionary and list.
"""
final = {}
for key, val in iter(node.metadata.items()):
#val = list(val)
#if isinstance(val[0], tuple):
# val = [dict(v) for v in val]
... | python | def _finalize_metadata(self, node):
"""Convert node metadata back into a standard dictionary and list.
"""
final = {}
for key, val in iter(node.metadata.items()):
#val = list(val)
#if isinstance(val[0], tuple):
# val = [dict(v) for v in val]
... | [
"def",
"_finalize_metadata",
"(",
"self",
",",
"node",
")",
":",
"final",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"iter",
"(",
"node",
".",
"metadata",
".",
"items",
"(",
")",
")",
":",
"#val = list(val)",
"#if isinstance(val[0], tuple):",
"# val... | Convert node metadata back into a standard dictionary and list. | [
"Convert",
"node",
"metadata",
"back",
"into",
"a",
"standard",
"dictionary",
"and",
"list",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L337-L347 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._line_by_type | def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col ... | python | def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col ... | [
"def",
"_line_by_type",
"(",
"self",
",",
"line",
",",
"header",
",",
"hgroups",
",",
"htypes",
",",
"out",
",",
"want_type",
",",
"collapse_quals_fn",
"=",
"None",
")",
":",
"for",
"index",
",",
"htype",
"in",
"(",
"(",
"i",
",",
"t",
")",
"for",
... | Parse out key value pairs for line information based on a group of values. | [
"Parse",
"out",
"key",
"value",
"pairs",
"for",
"line",
"information",
"based",
"on",
"a",
"group",
"of",
"values",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L357-L369 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._collapse_attributes | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
... | python | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
... | [
"def",
"_collapse_attributes",
"(",
"self",
",",
"line",
",",
"header",
",",
"indexes",
")",
":",
"names",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"pat",
"=",
"re",
".",
"compile",
"(",
"\"[\\W]+\"",
")",
"for",
"i",
"in",
"indexes",
":",
"names",
".... | Combine attributes in multiple columns into single named tuple. | [
"Combine",
"attributes",
"in",
"multiple",
"columns",
"into",
"single",
"named",
"tuple",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L371-L381 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._clean_header | def _clean_header(self, header):
"""Remove ISA-Tab specific information from Header[real name] headers.
"""
if header.find("[") >= 0:
header = header.replace("]", "").split("[")[-1]
# ISATab can start with numbers but this is not supported in
# the python datastructur... | python | def _clean_header(self, header):
"""Remove ISA-Tab specific information from Header[real name] headers.
"""
if header.find("[") >= 0:
header = header.replace("]", "").split("[")[-1]
# ISATab can start with numbers but this is not supported in
# the python datastructur... | [
"def",
"_clean_header",
"(",
"self",
",",
"header",
")",
":",
"if",
"header",
".",
"find",
"(",
"\"[\"",
")",
">=",
"0",
":",
"header",
"=",
"header",
".",
"replace",
"(",
"\"]\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\"[\"",
")",
"[",
"-",
"1",... | Remove ISA-Tab specific information from Header[real name] headers. | [
"Remove",
"ISA",
"-",
"Tab",
"specific",
"information",
"from",
"Header",
"[",
"real",
"name",
"]",
"headers",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L383-L395 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._characterize_header | def _characterize_header(self, header, hgroups):
"""Characterize header groups into different data types.
"""
out = []
for h in [header[g[0]] for g in hgroups]:
this_ctype = None
for ctype, names in self._col_types.items():
if h.startswith(names):
... | python | def _characterize_header(self, header, hgroups):
"""Characterize header groups into different data types.
"""
out = []
for h in [header[g[0]] for g in hgroups]:
this_ctype = None
for ctype, names in self._col_types.items():
if h.startswith(names):
... | [
"def",
"_characterize_header",
"(",
"self",
",",
"header",
",",
"hgroups",
")",
":",
"out",
"=",
"[",
"]",
"for",
"h",
"in",
"[",
"header",
"[",
"g",
"[",
"0",
"]",
"]",
"for",
"g",
"in",
"hgroups",
"]",
":",
"this_ctype",
"=",
"None",
"for",
"ct... | Characterize header groups into different data types. | [
"Characterize",
"header",
"groups",
"into",
"different",
"data",
"types",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L397-L408 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._collapse_header | def _collapse_header(self, header):
"""Combine header columns into related groups.
"""
out = []
for i, h in enumerate(header):
if h.startswith(self._col_quals):
out[-1].append(i)
else:
out.append([i])
return out | python | def _collapse_header(self, header):
"""Combine header columns into related groups.
"""
out = []
for i, h in enumerate(header):
if h.startswith(self._col_quals):
out[-1].append(i)
else:
out.append([i])
return out | [
"def",
"_collapse_header",
"(",
"self",
",",
"header",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
",",
"h",
"in",
"enumerate",
"(",
"header",
")",
":",
"if",
"h",
".",
"startswith",
"(",
"self",
".",
"_col_quals",
")",
":",
"out",
"[",
"-",
"1",... | Combine header columns into related groups. | [
"Combine",
"header",
"columns",
"into",
"related",
"groups",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L410-L419 |
ttinies/sc2gameMapRepo | sc2maptool/cli.py | main | def main(): # mini/unit test
"""
PURPOSE: command-line interface for map information
"""
options = optionsParser().parse_args()
params = getSelectionParams(options)
if options.list or options.details:
specifiedMaps = filterMapNames(
options.mapname,
records = filt... | python | def main(): # mini/unit test
"""
PURPOSE: command-line interface for map information
"""
options = optionsParser().parse_args()
params = getSelectionParams(options)
if options.list or options.details:
specifiedMaps = filterMapNames(
options.mapname,
records = filt... | [
"def",
"main",
"(",
")",
":",
"# mini/unit test",
"options",
"=",
"optionsParser",
"(",
")",
".",
"parse_args",
"(",
")",
"params",
"=",
"getSelectionParams",
"(",
"options",
")",
"if",
"options",
".",
"list",
"or",
"options",
".",
"details",
":",
"specifi... | PURPOSE: command-line interface for map information | [
"PURPOSE",
":",
"command",
"-",
"line",
"interface",
"for",
"map",
"information"
] | train | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/cli.py#L56-L90 |
MacHu-GWU/dataIO-project | dataIO/pk.py | is_pickle_file | def is_pickle_file(abspath):
"""Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".pickle", ".pk", ".p"]:
is_pickle = True
eli... | python | def is_pickle_file(abspath):
"""Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".pickle", ".pk", ".p"]:
is_pickle = True
eli... | [
"def",
"is_pickle_file",
"(",
"abspath",
")",
":",
"abspath",
"=",
"abspath",
".",
"lower",
"(",
")",
"fname",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"if",
"ext",
"in",
"[",
"\".pickle\"",
",",
"\".pk\"",
",",
"\".p... | Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file | [
"Parse",
"file",
"extension",
".",
"-",
"*",
".",
"pickle",
":",
"uncompressed",
"utf",
"-",
"8",
"encode",
"pickle",
"file",
"-",
"*",
".",
"gz",
":",
"compressed",
"utf",
"-",
"8",
"encode",
"pickle",
"file"
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L43-L62 |
MacHu-GWU/dataIO-project | dataIO/pk.py | load | def load(abspath, default=None, enable_verbose=True):
"""Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default... | python | def load(abspath, default=None, enable_verbose=True):
"""Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default... | [
"def",
"load",
"(",
"abspath",
",",
"default",
"=",
"None",
",",
"enable_verbose",
"=",
"True",
")",
":",
"if",
"default",
"is",
"None",
":",
"default",
"=",
"dict",
"(",
")",
"prt",
"(",
"\"\\nLoad from '%s' ...\"",
"%",
"abspath",
",",
"enable_verbose",
... | Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
... | [
"Load",
"Pickle",
"from",
"file",
".",
"If",
"file",
"are",
"not",
"exists",
"returns",
"default",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L72-L126 |
MacHu-GWU/dataIO-project | dataIO/pk.py | dump | def dump(data, abspath, pk_protocol=py23.pk_protocol,
overwrite=False, enable_verbose=True):
"""Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extensio... | python | def dump(data, abspath, pk_protocol=py23.pk_protocol,
overwrite=False, enable_verbose=True):
"""Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extensio... | [
"def",
"dump",
"(",
"data",
",",
"abspath",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
",",
"overwrite",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"prt",
"(",
"\"\\nDump to '%s' ...\"",
"%",
"abspath",
",",
"enable_verbose",
")",
... | Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_p... | [
"Dump",
"picklable",
"object",
"to",
"file",
".",
"Provides",
"multiple",
"choice",
"to",
"customize",
"the",
"behavior",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L129-L204 |
MacHu-GWU/dataIO-project | dataIO/pk.py | safe_dump | def safe_dump(data, abspath, pk_protocol=py23.pk_protocol, enable_verbose=True):
"""A stable version of :func:`dump`, this method will silently overwrite
existing file.
There's a issue with :func:`dump`: If your program is interrupted while
writing, you got an incomplete file, and you also lose t... | python | def safe_dump(data, abspath, pk_protocol=py23.pk_protocol, enable_verbose=True):
"""A stable version of :func:`dump`, this method will silently overwrite
existing file.
There's a issue with :func:`dump`: If your program is interrupted while
writing, you got an incomplete file, and you also lose t... | [
"def",
"safe_dump",
"(",
"data",
",",
"abspath",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
",",
"enable_verbose",
"=",
"True",
")",
":",
"abspath",
"=",
"lower_ext",
"(",
"str",
"(",
"abspath",
")",
")",
"abspath_temp",
"=",
"\"%s.tmp\"",
"%",
... | A stable version of :func:`dump`, this method will silently overwrite
existing file.
There's a issue with :func:`dump`: If your program is interrupted while
writing, you got an incomplete file, and you also lose the original file.
So this method write pickle to a temporary file first, then rename... | [
"A",
"stable",
"version",
"of",
":",
"func",
":",
"dump",
"this",
"method",
"will",
"silently",
"overwrite",
"existing",
"file",
".",
"There",
"s",
"a",
"issue",
"with",
":",
"func",
":",
"dump",
":",
"If",
"your",
"program",
"is",
"interrupted",
"while"... | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L207-L229 |
MacHu-GWU/dataIO-project | dataIO/pk.py | obj2bytes | def obj2bytes(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol) | python | def obj2bytes(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol) | [
"def",
"obj2bytes",
"(",
"obj",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
")",
":",
"return",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")"
] | Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr | [
"Convert",
"arbitrary",
"pickable",
"Python",
"Object",
"to",
"bytes",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L232-L239 |
MacHu-GWU/dataIO-project | dataIO/pk.py | obj2str | def obj2str(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串``
"""
return base64.urlsafe_b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | python | def obj2str(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串``
"""
return base64.urlsafe_b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | [
"def",
"obj2str",
"(",
"obj",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
")",
":",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")",
")",
".",
"decode",
"(",
"\"utf-... | Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串`` | [
"Convert",
"arbitrary",
"object",
"to",
"base64",
"encoded",
"string",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L252-L260 |
rainwoodman/kdcount | kdcount/utils.py | bincount | def bincount(dig, weight, minlength):
""" bincount supporting scalar and vector weight """
if numpy.isscalar(weight):
return numpy.bincount(dig, minlength=minlength) * weight
else:
return numpy.bincount(dig, weight, minlength) | python | def bincount(dig, weight, minlength):
""" bincount supporting scalar and vector weight """
if numpy.isscalar(weight):
return numpy.bincount(dig, minlength=minlength) * weight
else:
return numpy.bincount(dig, weight, minlength) | [
"def",
"bincount",
"(",
"dig",
",",
"weight",
",",
"minlength",
")",
":",
"if",
"numpy",
".",
"isscalar",
"(",
"weight",
")",
":",
"return",
"numpy",
".",
"bincount",
"(",
"dig",
",",
"minlength",
"=",
"minlength",
")",
"*",
"weight",
"else",
":",
"r... | bincount supporting scalar and vector weight | [
"bincount",
"supporting",
"scalar",
"and",
"vector",
"weight"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/utils.py#L27-L32 |
timothycrosley/concentration | concentration/run.py | reset_network | def reset_network(message):
"""Resets the users network to make changes take effect"""
for command in settings.RESTART_NETWORK:
try:
subprocess.check_call(command)
except:
pass
print(message) | python | def reset_network(message):
"""Resets the users network to make changes take effect"""
for command in settings.RESTART_NETWORK:
try:
subprocess.check_call(command)
except:
pass
print(message) | [
"def",
"reset_network",
"(",
"message",
")",
":",
"for",
"command",
"in",
"settings",
".",
"RESTART_NETWORK",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"command",
")",
"except",
":",
"pass",
"print",
"(",
"message",
")"
] | Resets the users network to make changes take effect | [
"Resets",
"the",
"users",
"network",
"to",
"make",
"changes",
"take",
"effect"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L15-L22 |
timothycrosley/concentration | concentration/run.py | improve | def improve():
"""Disables access to websites that are defined as 'distractors'"""
with open(settings.HOSTS_FILE, "r+") as hosts_file:
contents = hosts_file.read()
if not settings.START_TOKEN in contents and not settings.END_TOKEN in contents:
hosts_file.write(settings.START_TOKEN + ... | python | def improve():
"""Disables access to websites that are defined as 'distractors'"""
with open(settings.HOSTS_FILE, "r+") as hosts_file:
contents = hosts_file.read()
if not settings.START_TOKEN in contents and not settings.END_TOKEN in contents:
hosts_file.write(settings.START_TOKEN + ... | [
"def",
"improve",
"(",
")",
":",
"with",
"open",
"(",
"settings",
".",
"HOSTS_FILE",
",",
"\"r+\"",
")",
"as",
"hosts_file",
":",
"contents",
"=",
"hosts_file",
".",
"read",
"(",
")",
"if",
"not",
"settings",
".",
"START_TOKEN",
"in",
"contents",
"and",
... | Disables access to websites that are defined as 'distractors | [
"Disables",
"access",
"to",
"websites",
"that",
"are",
"defined",
"as",
"distractors"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L26-L38 |
timothycrosley/concentration | concentration/run.py | lose | def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
... | python | def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
... | [
"def",
"lose",
"(",
")",
":",
"changed",
"=",
"False",
"with",
"open",
"(",
"settings",
".",
"HOSTS_FILE",
",",
"\"r\"",
")",
"as",
"hosts_file",
":",
"new_file",
"=",
"[",
"]",
"in_block",
"=",
"False",
"for",
"line",
"in",
"hosts_file",
":",
"if",
... | Enables access to websites that are defined as 'distractors | [
"Enables",
"access",
"to",
"websites",
"that",
"are",
"defined",
"as",
"distractors"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L42-L61 |
timothycrosley/concentration | concentration/run.py | take_break | def take_break(minutes: hug.types.number=5):
"""Enables temporarily breaking concentration"""
print("")
print("######################################### ARE YOU SURE? #####################################")
try:
for remaining in range(60, -1, -1):
sys.stdout.write("\r")
s... | python | def take_break(minutes: hug.types.number=5):
"""Enables temporarily breaking concentration"""
print("")
print("######################################### ARE YOU SURE? #####################################")
try:
for remaining in range(60, -1, -1):
sys.stdout.write("\r")
s... | [
"def",
"take_break",
"(",
"minutes",
":",
"hug",
".",
"types",
".",
"number",
"=",
"5",
")",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"######################################### ARE YOU SURE? #####################################\"",
")",
"try",
":",
"for",
"r... | Enables temporarily breaking concentration | [
"Enables",
"temporarily",
"breaking",
"concentration"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L65-L97 |
managedbyq/mbq.metrics | mbq/metrics/contrib/django/middleware/connection_stats.py | ConnectionStatsMiddleware.local_port_range | def local_port_range(self):
"""Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
"""
if self._local_port_range is None:
with ope... | python | def local_port_range(self):
"""Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
"""
if self._local_port_range is None:
with ope... | [
"def",
"local_port_range",
"(",
"self",
")",
":",
"if",
"self",
".",
"_local_port_range",
"is",
"None",
":",
"with",
"open",
"(",
"'/proc/sys/net/ipv4/ip_local_port_range'",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"_local_port_range",
"=",
"tuple",
"(",
... | Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound. | [
"Tuple",
"of",
"(",
"low_port",
"high_port",
")",
"reflecting",
"the",
"local",
"port",
"range",
"assigned",
"to",
"outbound",
"connections",
".",
"We",
"use",
"this",
"as",
"part",
"of",
"a",
"heuristic",
"to",
"determine",
"whether",
"a",
"connection",
"is... | train | https://github.com/managedbyq/mbq.metrics/blob/22ce48dbf132f9ddd4adf86d25df6f58e3d7a520/mbq/metrics/contrib/django/middleware/connection_stats.py#L44-L52 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | _si | def _si(number):
"""Format a number using base-2 SI prefixes"""
prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while number > 1024:
number /= 1024.0
prefixes.pop(0)
return '%0.2f%s' % (number, prefixes.pop(0)) | python | def _si(number):
"""Format a number using base-2 SI prefixes"""
prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while number > 1024:
number /= 1024.0
prefixes.pop(0)
return '%0.2f%s' % (number, prefixes.pop(0)) | [
"def",
"_si",
"(",
"number",
")",
":",
"prefixes",
"=",
"[",
"''",
",",
"'K'",
",",
"'M'",
",",
"'G'",
",",
"'T'",
",",
"'P'",
",",
"'E'",
",",
"'Z'",
",",
"'Y'",
"]",
"while",
"number",
">",
"1024",
":",
"number",
"/=",
"1024.0",
"prefixes",
"... | Format a number using base-2 SI prefixes | [
"Format",
"a",
"number",
"using",
"base",
"-",
"2",
"SI",
"prefixes"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L72-L78 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | _get_url | def _get_url(url):
"""Retrieve requested URL"""
try:
data = HTTP_SESSION.get(url, stream=True)
data.raise_for_status()
except requests.exceptions.RequestException as exc:
raise FetcherException(exc)
return data | python | def _get_url(url):
"""Retrieve requested URL"""
try:
data = HTTP_SESSION.get(url, stream=True)
data.raise_for_status()
except requests.exceptions.RequestException as exc:
raise FetcherException(exc)
return data | [
"def",
"_get_url",
"(",
"url",
")",
":",
"try",
":",
"data",
"=",
"HTTP_SESSION",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"data",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
... | Retrieve requested URL | [
"Retrieve",
"requested",
"URL"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L81-L89 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | _extract_file | def _extract_file(zip_fp, info, path):
"""Extract files while explicitly setting the proper permissions"""
zip_fp.extract(info.filename, path=path)
out_path = os.path.join(path, info.filename)
perm = info.external_attr >> 16
perm |= stat.S_IREAD # make sure we're not accidentally setting this to 0... | python | def _extract_file(zip_fp, info, path):
"""Extract files while explicitly setting the proper permissions"""
zip_fp.extract(info.filename, path=path)
out_path = os.path.join(path, info.filename)
perm = info.external_attr >> 16
perm |= stat.S_IREAD # make sure we're not accidentally setting this to 0... | [
"def",
"_extract_file",
"(",
"zip_fp",
",",
"info",
",",
"path",
")",
":",
"zip_fp",
".",
"extract",
"(",
"info",
".",
"filename",
",",
"path",
"=",
"path",
")",
"out_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"info",
".",
"filen... | Extract files while explicitly setting the proper permissions | [
"Extract",
"files",
"while",
"explicitly",
"setting",
"the",
"proper",
"permissions"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L110-L117 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildFlags.build_string | def build_string(self):
"""
Taskcluster denotes builds in one of two formats - i.e. linux64-asan or linux64-asan-opt
The latter is generated. If it fails, the caller should try the former.
"""
return (('-ccov' if self.coverage else '') +
('-fuzzing' if self.fuzzin... | python | def build_string(self):
"""
Taskcluster denotes builds in one of two formats - i.e. linux64-asan or linux64-asan-opt
The latter is generated. If it fails, the caller should try the former.
"""
return (('-ccov' if self.coverage else '') +
('-fuzzing' if self.fuzzin... | [
"def",
"build_string",
"(",
"self",
")",
":",
"return",
"(",
"(",
"'-ccov'",
"if",
"self",
".",
"coverage",
"else",
"''",
")",
"+",
"(",
"'-fuzzing'",
"if",
"self",
".",
"fuzzing",
"else",
"''",
")",
"+",
"(",
"'-asan'",
"if",
"self",
".",
"asan",
... | Taskcluster denotes builds in one of two formats - i.e. linux64-asan or linux64-asan-opt
The latter is generated. If it fails, the caller should try the former. | [
"Taskcluster",
"denotes",
"builds",
"in",
"one",
"of",
"two",
"formats",
"-",
"i",
".",
"e",
".",
"linux64",
"-",
"asan",
"or",
"linux64",
"-",
"asan",
"-",
"opt",
"The",
"latter",
"is",
"generated",
".",
"If",
"it",
"fails",
"the",
"caller",
"should",... | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L129-L138 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Platform.auto_name_prefix | def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIA... | python | def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIA... | [
"def",
"auto_name_prefix",
"(",
"self",
")",
":",
"# if the platform is not native, auto_name would clobber native downloads.",
"# make a prefix to avoid this",
"native_system",
"=",
"std_platform",
".",
"system",
"(",
")",
"native_machine",
"=",
"self",
".",
"CPU_ALIASES",
"... | Generate platform prefix for cross-platform downloads. | [
"Generate",
"platform",
"prefix",
"for",
"cross",
"-",
"platform",
"downloads",
"."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L171-L186 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildTask.iterall | def iterall(cls, build, branch, flags, platform=None):
"""Generator for all possible BuildTasks with these parameters"""
# Prepare build type
if platform is None:
platform = Platform()
target_platform = platform.gecko_platform
is_namespace = False
if cls.RE_D... | python | def iterall(cls, build, branch, flags, platform=None):
"""Generator for all possible BuildTasks with these parameters"""
# Prepare build type
if platform is None:
platform = Platform()
target_platform = platform.gecko_platform
is_namespace = False
if cls.RE_D... | [
"def",
"iterall",
"(",
"cls",
",",
"build",
",",
"branch",
",",
"flags",
",",
"platform",
"=",
"None",
")",
":",
"# Prepare build type",
"if",
"platform",
"is",
"None",
":",
"platform",
"=",
"Platform",
"(",
")",
"target_platform",
"=",
"platform",
".",
... | Generator for all possible BuildTasks with these parameters | [
"Generator",
"for",
"all",
"possible",
"BuildTasks",
"with",
"these",
"parameters"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L220-L265 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildTask._pushdate_urls | def _pushdate_urls(cls, pushdate, branch, target_platform):
"""Multiple entries exist per push date. Iterate over all until a working entry is found"""
url_base = cls.URL_BASE + '/namespaces/gecko.v2.mozilla-' + branch + '.pushdate.' + pushdate
try:
base = HTTP_SESSION.post(url_base... | python | def _pushdate_urls(cls, pushdate, branch, target_platform):
"""Multiple entries exist per push date. Iterate over all until a working entry is found"""
url_base = cls.URL_BASE + '/namespaces/gecko.v2.mozilla-' + branch + '.pushdate.' + pushdate
try:
base = HTTP_SESSION.post(url_base... | [
"def",
"_pushdate_urls",
"(",
"cls",
",",
"pushdate",
",",
"branch",
",",
"target_platform",
")",
":",
"url_base",
"=",
"cls",
".",
"URL_BASE",
"+",
"'/namespaces/gecko.v2.mozilla-'",
"+",
"branch",
"+",
"'.pushdate.'",
"+",
"pushdate",
"try",
":",
"base",
"="... | Multiple entries exist per push date. Iterate over all until a working entry is found | [
"Multiple",
"entries",
"exist",
"per",
"push",
"date",
".",
"Iterate",
"over",
"all",
"until",
"a",
"working",
"entry",
"is",
"found"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L273-L286 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildTask._revision_url | def _revision_url(cls, rev, branch, target_platform):
"""Retrieve the URL for revision based builds"""
namespace = 'gecko.v2.mozilla-' + branch + '.revision.' + rev
product = 'mobile' if 'android' in target_platform else 'firefox'
return cls.URL_BASE + '/task/' + namespace + '.' + produc... | python | def _revision_url(cls, rev, branch, target_platform):
"""Retrieve the URL for revision based builds"""
namespace = 'gecko.v2.mozilla-' + branch + '.revision.' + rev
product = 'mobile' if 'android' in target_platform else 'firefox'
return cls.URL_BASE + '/task/' + namespace + '.' + produc... | [
"def",
"_revision_url",
"(",
"cls",
",",
"rev",
",",
"branch",
",",
"target_platform",
")",
":",
"namespace",
"=",
"'gecko.v2.mozilla-'",
"+",
"branch",
"+",
"'.revision.'",
"+",
"rev",
"product",
"=",
"'mobile'",
"if",
"'android'",
"in",
"target_platform",
"e... | Retrieve the URL for revision based builds | [
"Retrieve",
"the",
"URL",
"for",
"revision",
"based",
"builds"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L289-L293 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.iterall | def iterall(cls, target, branch, build, flags, platform=None):
"""Return an iterable for all available builds matching a particular build type"""
flags = BuildFlags(*flags)
for task in BuildTask.iterall(build, branch, flags, platform):
yield cls(target, branch, task, flags, platform) | python | def iterall(cls, target, branch, build, flags, platform=None):
"""Return an iterable for all available builds matching a particular build type"""
flags = BuildFlags(*flags)
for task in BuildTask.iterall(build, branch, flags, platform):
yield cls(target, branch, task, flags, platform) | [
"def",
"iterall",
"(",
"cls",
",",
"target",
",",
"branch",
",",
"build",
",",
"flags",
",",
"platform",
"=",
"None",
")",
":",
"flags",
"=",
"BuildFlags",
"(",
"*",
"flags",
")",
"for",
"task",
"in",
"BuildTask",
".",
"iterall",
"(",
"build",
",",
... | Return an iterable for all available builds matching a particular build type | [
"Return",
"an",
"iterable",
"for",
"all",
"available",
"builds",
"matching",
"a",
"particular",
"build",
"type"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L386-L390 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._artifacts | def _artifacts(self):
"""Retrieve the artifacts json object"""
if '_artifacts' not in self._memo:
json = _get_url(self._artifacts_url).json()
self._memo['_artifacts'] = json['artifacts']
return self._memo['_artifacts'] | python | def _artifacts(self):
"""Retrieve the artifacts json object"""
if '_artifacts' not in self._memo:
json = _get_url(self._artifacts_url).json()
self._memo['_artifacts'] = json['artifacts']
return self._memo['_artifacts'] | [
"def",
"_artifacts",
"(",
"self",
")",
":",
"if",
"'_artifacts'",
"not",
"in",
"self",
".",
"_memo",
":",
"json",
"=",
"_get_url",
"(",
"self",
".",
"_artifacts_url",
")",
".",
"json",
"(",
")",
"self",
".",
"_memo",
"[",
"'_artifacts'",
"]",
"=",
"j... | Retrieve the artifacts json object | [
"Retrieve",
"the",
"artifacts",
"json",
"object"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L393-L398 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._artifact_base | def _artifact_base(self):
"""
Build the artifact basename
Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip...
"""
if '_artifact_base' not in self._memo:
for artifact in self._artifacts:
if self.re_target.search(artifact['name']) is... | python | def _artifact_base(self):
"""
Build the artifact basename
Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip...
"""
if '_artifact_base' not in self._memo:
for artifact in self._artifacts:
if self.re_target.search(artifact['name']) is... | [
"def",
"_artifact_base",
"(",
"self",
")",
":",
"if",
"'_artifact_base'",
"not",
"in",
"self",
".",
"_memo",
":",
"for",
"artifact",
"in",
"self",
".",
"_artifacts",
":",
"if",
"self",
".",
"re_target",
".",
"search",
"(",
"artifact",
"[",
"'name'",
"]",... | Build the artifact basename
Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip... | [
"Build",
"the",
"artifact",
"basename",
"Builds",
"are",
"base",
".",
"tar",
".",
"bz2",
"info",
"is",
"base",
".",
"json",
"shell",
"is",
"base",
".",
"jsshell",
".",
"zip",
"..."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L401-L414 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.build_info | def build_info(self):
"""Return the build's info"""
if 'build_info' not in self._memo:
self._memo['build_info'] = _get_url(self.artifact_url('json')).json()
return self._memo['build_info'] | python | def build_info(self):
"""Return the build's info"""
if 'build_info' not in self._memo:
self._memo['build_info'] = _get_url(self.artifact_url('json')).json()
return self._memo['build_info'] | [
"def",
"build_info",
"(",
"self",
")",
":",
"if",
"'build_info'",
"not",
"in",
"self",
".",
"_memo",
":",
"self",
".",
"_memo",
"[",
"'build_info'",
"]",
"=",
"_get_url",
"(",
"self",
".",
"artifact_url",
"(",
"'json'",
")",
")",
".",
"json",
"(",
")... | Return the build's info | [
"Return",
"the",
"build",
"s",
"info"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L432-L436 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.moz_info | def moz_info(self):
"""Return the build's mozinfo"""
if 'moz_info' not in self._memo:
self._memo['moz_info'] = _get_url(self.artifact_url('mozinfo.json')).json()
return self._memo['moz_info'] | python | def moz_info(self):
"""Return the build's mozinfo"""
if 'moz_info' not in self._memo:
self._memo['moz_info'] = _get_url(self.artifact_url('mozinfo.json')).json()
return self._memo['moz_info'] | [
"def",
"moz_info",
"(",
"self",
")",
":",
"if",
"'moz_info'",
"not",
"in",
"self",
".",
"_memo",
":",
"self",
".",
"_memo",
"[",
"'moz_info'",
"]",
"=",
"_get_url",
"(",
"self",
".",
"artifact_url",
"(",
"'mozinfo.json'",
")",
")",
".",
"json",
"(",
... | Return the build's mozinfo | [
"Return",
"the",
"build",
"s",
"mozinfo"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L444-L448 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_build | def extract_build(self, path='.', tests=None, full_symbols=False):
"""
Download and extract the build and requested extra artifacts
@type path:
@param path:
@type tests:
@param tests:
@type full_symbols:
@param full_symbols:
"""
if self.... | python | def extract_build(self, path='.', tests=None, full_symbols=False):
"""
Download and extract the build and requested extra artifacts
@type path:
@param path:
@type tests:
@param tests:
@type full_symbols:
@param full_symbols:
"""
if self.... | [
"def",
"extract_build",
"(",
"self",
",",
"path",
"=",
"'.'",
",",
"tests",
"=",
"None",
",",
"full_symbols",
"=",
"False",
")",
":",
"if",
"self",
".",
"_target",
"==",
"'js'",
":",
"self",
".",
"extract_zip",
"(",
"'jsshell.zip'",
",",
"path",
"=",
... | Download and extract the build and requested extra artifacts
@type path:
@param path:
@type tests:
@param tests:
@type full_symbols:
@param full_symbols: | [
"Download",
"and",
"extract",
"the",
"build",
"and",
"requested",
"extra",
"artifacts"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L485-L569 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._layout_for_domfuzz | def _layout_for_domfuzz(self, path):
"""
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
"""
old_dir = os.getcwd()
os.chdir(os.path.join(path))
try:
os.mkdir('dist')
... | python | def _layout_for_domfuzz(self, path):
"""
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
"""
old_dir = os.getcwd()
os.chdir(os.path.join(path))
try:
os.mkdir('dist')
... | [
"def",
"_layout_for_domfuzz",
"(",
"self",
",",
"path",
")",
":",
"old_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
")",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"'dist'",
")",
... | Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path | [
"Update",
"directory",
"to",
"work",
"with",
"DOMFuzz"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L571-L596 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._write_fuzzmanagerconf | def _write_fuzzmanagerconf(self, path):
"""
Write fuzzmanager config file for selected build
@type path: basestring
@param path: A string representation of the fuzzmanager config path
"""
output = configparser.RawConfigParser()
output.add_section('Main')
... | python | def _write_fuzzmanagerconf(self, path):
"""
Write fuzzmanager config file for selected build
@type path: basestring
@param path: A string representation of the fuzzmanager config path
"""
output = configparser.RawConfigParser()
output.add_section('Main')
... | [
"def",
"_write_fuzzmanagerconf",
"(",
"self",
",",
"path",
")",
":",
"output",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"output",
".",
"add_section",
"(",
"'Main'",
")",
"output",
".",
"set",
"(",
"'Main'",
",",
"'platform'",
",",
"self",
"."... | Write fuzzmanager config file for selected build
@type path: basestring
@param path: A string representation of the fuzzmanager config path | [
"Write",
"fuzzmanager",
"config",
"file",
"for",
"selected",
"build"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L598-L635 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_zip | def extract_zip(self, suffix, path='.'):
"""
Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path:
"""
zip_fd, zip_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.zip')
os.close(zip_fd)
try:
... | python | def extract_zip(self, suffix, path='.'):
"""
Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path:
"""
zip_fd, zip_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.zip')
os.close(zip_fd)
try:
... | [
"def",
"extract_zip",
"(",
"self",
",",
"suffix",
",",
"path",
"=",
"'.'",
")",
":",
"zip_fd",
",",
"zip_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.zip'",
")",
"os",
".",
"close",
"(",
"zip_fd",
"... | Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path: | [
"Download",
"and",
"extract",
"a",
"zip",
"artifact"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L637-L656 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_tar | def extract_tar(self, suffix, path='.'):
"""
Extract builds with .tar.(*) extension
When unpacking a build archive, only extract the firefox directory
@type suffix:
@param suffix:
@type path:
@param path:
"""
mode = suffix.split('.')[-1]
... | python | def extract_tar(self, suffix, path='.'):
"""
Extract builds with .tar.(*) extension
When unpacking a build archive, only extract the firefox directory
@type suffix:
@param suffix:
@type path:
@param path:
"""
mode = suffix.split('.')[-1]
... | [
"def",
"extract_tar",
"(",
"self",
",",
"suffix",
",",
"path",
"=",
"'.'",
")",
":",
"mode",
"=",
"suffix",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"tar_fd",
",",
"tar_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch... | Extract builds with .tar.(*) extension
When unpacking a build archive, only extract the firefox directory
@type suffix:
@param suffix:
@type path:
@param path: | [
"Extract",
"builds",
"with",
".",
"tar",
".",
"(",
"*",
")",
"extension",
"When",
"unpacking",
"a",
"build",
"archive",
"only",
"extract",
"the",
"firefox",
"directory"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L658-L686 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.download_apk | def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
sh... | python | def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
sh... | [
"def",
"download_apk",
"(",
"self",
",",
"path",
"=",
"'.'",
")",
":",
"apk_fd",
",",
"apk_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.apk'",
")",
"os",
".",
"close",
"(",
"apk_fd",
")",
"try",
":"... | Download Android .apk
@type path:
@param path: | [
"Download",
"Android",
".",
"apk"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L688-L701 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_dmg | def extract_dmg(self, path='.'):
"""
Extract builds with .dmg extension
Will only work if `hdiutil` is available.
@type path:
@param path:
"""
dmg_fd, dmg_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.dmg')
os.close(dmg_fd)
out_tmp = tempfi... | python | def extract_dmg(self, path='.'):
"""
Extract builds with .dmg extension
Will only work if `hdiutil` is available.
@type path:
@param path:
"""
dmg_fd, dmg_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.dmg')
os.close(dmg_fd)
out_tmp = tempfi... | [
"def",
"extract_dmg",
"(",
"self",
",",
"path",
"=",
"'.'",
")",
":",
"dmg_fd",
",",
"dmg_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.dmg'",
")",
"os",
".",
"close",
"(",
"dmg_fd",
")",
"out_tmp",
... | Extract builds with .dmg extension
Will only work if `hdiutil` is available.
@type path:
@param path: | [
"Extract",
"builds",
"with",
".",
"dmg",
"extension"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L703-L731 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.from_args | def from_args(cls, args=None, skip_dir_check=False):
"""
Construct a Fetcher from given command line arguments.
@type args: list(str)
@param args: Command line arguments (optional). Default is to use args from sys.argv
@type skip_dir_check: bool
@param skip_dir_check: B... | python | def from_args(cls, args=None, skip_dir_check=False):
"""
Construct a Fetcher from given command line arguments.
@type args: list(str)
@param args: Command line arguments (optional). Default is to use args from sys.argv
@type skip_dir_check: bool
@param skip_dir_check: B... | [
"def",
"from_args",
"(",
"cls",
",",
"args",
"=",
"None",
",",
"skip_dir_check",
"=",
"False",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"set_defaults",
"(",
"target",
"=",
"'firefox'",
",",
"build",
"=",
"'late... | Construct a Fetcher from given command line arguments.
@type args: list(str)
@param args: Command line arguments (optional). Default is to use args from sys.argv
@type skip_dir_check: bool
@param skip_dir_check: Boolean identifying whether to check for existing build directory
... | [
"Construct",
"a",
"Fetcher",
"from",
"given",
"command",
"line",
"arguments",
"."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L734-L849 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.main | def main(cls):
"""
fuzzfetch main entry point
Run with --help for usage
"""
log_level = logging.INFO
log_fmt = '[%(asctime)s] %(message)s'
if bool(os.getenv('DEBUG')):
log_level = logging.DEBUG
log_fmt = '%(levelname).1s %(name)s [%(asctim... | python | def main(cls):
"""
fuzzfetch main entry point
Run with --help for usage
"""
log_level = logging.INFO
log_fmt = '[%(asctime)s] %(message)s'
if bool(os.getenv('DEBUG')):
log_level = logging.DEBUG
log_fmt = '%(levelname).1s %(name)s [%(asctim... | [
"def",
"main",
"(",
"cls",
")",
":",
"log_level",
"=",
"logging",
".",
"INFO",
"log_fmt",
"=",
"'[%(asctime)s] %(message)s'",
"if",
"bool",
"(",
"os",
".",
"getenv",
"(",
"'DEBUG'",
")",
")",
":",
"log_level",
"=",
"logging",
".",
"DEBUG",
"log_fmt",
"="... | fuzzfetch main entry point
Run with --help for usage | [
"fuzzfetch",
"main",
"entry",
"point"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L852-L888 |
ClericPy/torequests | torequests/crawlers.py | CommonRequests.init_original_response | def init_original_response(self):
"""Get the original response for comparing, confirm ``is_cookie_necessary``"""
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
self.encoding)
r1 = self.req.request(
retry=self... | python | def init_original_response(self):
"""Get the original response for comparing, confirm ``is_cookie_necessary``"""
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
self.encoding)
r1 = self.req.request(
retry=self... | [
"def",
"init_original_response",
"(",
"self",
")",
":",
"if",
"'json'",
"in",
"self",
".",
"request",
":",
"self",
".",
"request",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"request",
".",
"pop",
"(",
"'json'",
")",
")",
".",
... | Get the original response for comparing, confirm ``is_cookie_necessary`` | [
"Get",
"the",
"original",
"response",
"for",
"comparing",
"confirm",
"is_cookie_necessary"
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L75-L87 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.init_original_response | def init_original_response(self):
"""Get the original response for comparing, confirm is_cookie_necessary"""
no_cookie_resp = None
self.is_cookie_necessary = True
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
se... | python | def init_original_response(self):
"""Get the original response for comparing, confirm is_cookie_necessary"""
no_cookie_resp = None
self.is_cookie_necessary = True
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
se... | [
"def",
"init_original_response",
"(",
"self",
")",
":",
"no_cookie_resp",
"=",
"None",
"self",
".",
"is_cookie_necessary",
"=",
"True",
"if",
"'json'",
"in",
"self",
".",
"request",
":",
"self",
".",
"request",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps"... | Get the original response for comparing, confirm is_cookie_necessary | [
"Get",
"the",
"original",
"response",
"for",
"comparing",
"confirm",
"is_cookie_necessary"
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L145-L171 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.sort_url_qsl | def sort_url_qsl(cls, raw_url, **kwargs):
"""Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``.
"""
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
return cls._join_ur... | python | def sort_url_qsl(cls, raw_url, **kwargs):
"""Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``.
"""
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
return cls._join_ur... | [
"def",
"sort_url_qsl",
"(",
"cls",
",",
"raw_url",
",",
"*",
"*",
"kwargs",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"raw_url",
")",
"qsl",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"return",
"cls",
".",
"_join_url",
"(",
"parsed_url",... | Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``. | [
"Do",
"nothing",
"but",
"sort",
"the",
"params",
"of",
"url",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L174-L182 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_url | def clean_url(self):
"""Only clean the url params and return self."""
raw_url = self.request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
for qs in qsl:
new_url = self._join_url(parsed_url,
[i for i in qs... | python | def clean_url(self):
"""Only clean the url params and return self."""
raw_url = self.request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
for qs in qsl:
new_url = self._join_url(parsed_url,
[i for i in qs... | [
"def",
"clean_url",
"(",
"self",
")",
":",
"raw_url",
"=",
"self",
".",
"request",
"[",
"'url'",
"]",
"parsed_url",
"=",
"urlparse",
"(",
"raw_url",
")",
"qsl",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"for",
"qs",
"in",
"qsl",
":",
"n... | Only clean the url params and return self. | [
"Only",
"clean",
"the",
"url",
"params",
"and",
"return",
"self",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L201-L212 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_post_data | def clean_post_data(self):
"""Only clean the post-data and return self.
Including form-data / bytes-data / json-data."""
data = self.request.get('data')
if not (data and self.request['method'] == 'post'):
return self
# case of total_data
new_request ... | python | def clean_post_data(self):
"""Only clean the post-data and return self.
Including form-data / bytes-data / json-data."""
data = self.request.get('data')
if not (data and self.request['method'] == 'post'):
return self
# case of total_data
new_request ... | [
"def",
"clean_post_data",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'data'",
")",
"if",
"not",
"(",
"data",
"and",
"self",
".",
"request",
"[",
"'method'",
"]",
"==",
"'post'",
")",
":",
"return",
"self",
"# case o... | Only clean the post-data and return self.
Including form-data / bytes-data / json-data. | [
"Only",
"clean",
"the",
"post",
"-",
"data",
"and",
"return",
"self",
".",
"Including",
"form",
"-",
"data",
"/",
"bytes",
"-",
"data",
"/",
"json",
"-",
"data",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L214-L248 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_cookie | def clean_cookie(self):
"""Only clean the cookie from headers and return self."""
if not self.is_cookie_necessary:
return self
headers = self.request.get('headers', {})
cookies = SimpleCookie(headers['Cookie'])
for k, v in cookies.items():
new_cookie = '; ... | python | def clean_cookie(self):
"""Only clean the cookie from headers and return self."""
if not self.is_cookie_necessary:
return self
headers = self.request.get('headers', {})
cookies = SimpleCookie(headers['Cookie'])
for k, v in cookies.items():
new_cookie = '; ... | [
"def",
"clean_cookie",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_cookie_necessary",
":",
"return",
"self",
"headers",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
"cookies",
"=",
"SimpleCookie",
"(",
"headers"... | Only clean the cookie from headers and return self. | [
"Only",
"clean",
"the",
"cookie",
"from",
"headers",
"and",
"return",
"self",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L250-L262 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_headers | def clean_headers(self):
"""Only clean the headers (cookie include) and return self."""
if not isinstance(self.request.get('headers'), dict):
return self
headers = self.request['headers']
if 'Cookie' in headers:
self.clean_cookie()
for key in headers:
... | python | def clean_headers(self):
"""Only clean the headers (cookie include) and return self."""
if not isinstance(self.request.get('headers'), dict):
return self
headers = self.request['headers']
if 'Cookie' in headers:
self.clean_cookie()
for key in headers:
... | [
"def",
"clean_headers",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"request",
".",
"get",
"(",
"'headers'",
")",
",",
"dict",
")",
":",
"return",
"self",
"headers",
"=",
"self",
".",
"request",
"[",
"'headers'",
"]",
"if",
"'... | Only clean the headers (cookie include) and return self. | [
"Only",
"clean",
"the",
"headers",
"(",
"cookie",
"include",
")",
"and",
"return",
"self",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L264-L280 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.reset_new_request | def reset_new_request(self):
"""Remove the non-sense args from the self.ignore, return self.new_request"""
raw_url = self.new_request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
new_url = self._join_url(
parsed_url, [i for i in qsl if i not... | python | def reset_new_request(self):
"""Remove the non-sense args from the self.ignore, return self.new_request"""
raw_url = self.new_request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
new_url = self._join_url(
parsed_url, [i for i in qsl if i not... | [
"def",
"reset_new_request",
"(",
"self",
")",
":",
"raw_url",
"=",
"self",
".",
"new_request",
"[",
"'url'",
"]",
"parsed_url",
"=",
"urlparse",
"(",
"raw_url",
")",
"qsl",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"new_url",
"=",
"self",
"... | Remove the non-sense args from the self.ignore, return self.new_request | [
"Remove",
"the",
"non",
"-",
"sense",
"args",
"from",
"the",
"self",
".",
"ignore",
"return",
"self",
".",
"new_request"
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L282-L323 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.result | def result(self):
"""Whole task, clean_all + reset_new_request, return self.new_request."""
if not self.tasks:
self.clean_all()
tasks_length = len(self.tasks)
self.logger_function(
'%s tasks of request, will cost at least %s seconds.' %
(tasks_length,
... | python | def result(self):
"""Whole task, clean_all + reset_new_request, return self.new_request."""
if not self.tasks:
self.clean_all()
tasks_length = len(self.tasks)
self.logger_function(
'%s tasks of request, will cost at least %s seconds.' %
(tasks_length,
... | [
"def",
"result",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"tasks",
":",
"self",
".",
"clean_all",
"(",
")",
"tasks_length",
"=",
"len",
"(",
"self",
".",
"tasks",
")",
"self",
".",
"logger_function",
"(",
"'%s tasks of request, will cost at least %s ... | Whole task, clean_all + reset_new_request, return self.new_request. | [
"Whole",
"task",
"clean_all",
"+",
"reset_new_request",
"return",
"self",
".",
"new_request",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L329-L344 |
ClericPy/torequests | torequests/crawlers.py | Seed.as_json | def as_json(self, ensure_ascii=False):
"""Property return key-value json-string from __slots__."""
return json.dumps(self.as_dict, ensure_ascii=ensure_ascii) | python | def as_json(self, ensure_ascii=False):
"""Property return key-value json-string from __slots__."""
return json.dumps(self.as_dict, ensure_ascii=ensure_ascii) | [
"def",
"as_json",
"(",
"self",
",",
"ensure_ascii",
"=",
"False",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"as_dict",
",",
"ensure_ascii",
"=",
"ensure_ascii",
")"
] | Property return key-value json-string from __slots__. | [
"Property",
"return",
"key",
"-",
"value",
"json",
"-",
"string",
"from",
"__slots__",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L382-L384 |
vsoch/helpme | helpme/client/__init__.py | main | def main():
'''the main entry point for the HelpMe Command line application. Currently,
the user can request help or set config values for a particular helper.
'''
# Customize parser
parser = get_parser()
subparsers = get_subparsers(parser)
def help(return_code=0):
'''print hel... | python | def main():
'''the main entry point for the HelpMe Command line application. Currently,
the user can request help or set config values for a particular helper.
'''
# Customize parser
parser = get_parser()
subparsers = get_subparsers(parser)
def help(return_code=0):
'''print hel... | [
"def",
"main",
"(",
")",
":",
"# Customize parser",
"parser",
"=",
"get_parser",
"(",
")",
"subparsers",
"=",
"get_subparsers",
"(",
"parser",
")",
"def",
"help",
"(",
"return_code",
"=",
"0",
")",
":",
"'''print help, including the software version and active clien... | the main entry point for the HelpMe Command line application. Currently,
the user can request help or set config values for a particular helper. | [
"the",
"main",
"entry",
"point",
"for",
"the",
"HelpMe",
"Command",
"line",
"application",
".",
"Currently",
"the",
"user",
"can",
"request",
"help",
"or",
"set",
"config",
"values",
"for",
"a",
"particular",
"helper",
"."
] | train | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/client/__init__.py#L95-L153 |
mjirik/io3d | io3d/cachefile.py | CacheFile.get_or_save_default | def get_or_save_default(self, key, default_value):
"""
Get value stored in cache file or store there default value.
:param key:
:param default_value:
:return:
"""
val = self.get_or_none(key)
if val is None:
self.update(key, default_value)
... | python | def get_or_save_default(self, key, default_value):
"""
Get value stored in cache file or store there default value.
:param key:
:param default_value:
:return:
"""
val = self.get_or_none(key)
if val is None:
self.update(key, default_value)
... | [
"def",
"get_or_save_default",
"(",
"self",
",",
"key",
",",
"default_value",
")",
":",
"val",
"=",
"self",
".",
"get_or_none",
"(",
"key",
")",
"if",
"val",
"is",
"None",
":",
"self",
".",
"update",
"(",
"key",
",",
"default_value",
")",
"val",
"=",
... | Get value stored in cache file or store there default value.
:param key:
:param default_value:
:return: | [
"Get",
"value",
"stored",
"in",
"cache",
"file",
"or",
"store",
"there",
"default",
"value",
".",
":",
"param",
"key",
":",
":",
"param",
"default_value",
":",
":",
"return",
":"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/cachefile.py#L46-L57 |
mjirik/io3d | io3d/datawriter.py | saveOverlayToDicomCopy | def saveOverlayToDicomCopy(input_dcmfilelist, output_dicom_dir, overlays,
crinfo, orig_shape):
""" Save overlay to dicom. """
from . import datawriter as dwriter
# import qmisc
if not os.path.exists(output_dicom_dir):
os.makedirs(output_dicom_dir)
import imtools.... | python | def saveOverlayToDicomCopy(input_dcmfilelist, output_dicom_dir, overlays,
crinfo, orig_shape):
""" Save overlay to dicom. """
from . import datawriter as dwriter
# import qmisc
if not os.path.exists(output_dicom_dir):
os.makedirs(output_dicom_dir)
import imtools.... | [
"def",
"saveOverlayToDicomCopy",
"(",
"input_dcmfilelist",
",",
"output_dicom_dir",
",",
"overlays",
",",
"crinfo",
",",
"orig_shape",
")",
":",
"from",
".",
"import",
"datawriter",
"as",
"dwriter",
"# import qmisc",
"if",
"not",
"os",
".",
"path",
".",
"exists"... | Save overlay to dicom. | [
"Save",
"overlay",
"to",
"dicom",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L527-L542 |
mjirik/io3d | io3d/datawriter.py | DataWriter.__get_segmentation_path | def __get_segmentation_path(self, path):
""" Create path with "_segmentation" suffix and keep extension.
:param path:
:return:
"""
startpath, ext = os.path.splitext(path)
segmentation_path = startpath + "_segmentation" + ext
return segmentation_path | python | def __get_segmentation_path(self, path):
""" Create path with "_segmentation" suffix and keep extension.
:param path:
:return:
"""
startpath, ext = os.path.splitext(path)
segmentation_path = startpath + "_segmentation" + ext
return segmentation_path | [
"def",
"__get_segmentation_path",
"(",
"self",
",",
"path",
")",
":",
"startpath",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"segmentation_path",
"=",
"startpath",
"+",
"\"_segmentation\"",
"+",
"ext",
"return",
"segmentation_path"... | Create path with "_segmentation" suffix and keep extension.
:param path:
:return: | [
"Create",
"path",
"with",
"_segmentation",
"suffix",
"and",
"keep",
"extension",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L48-L56 |
mjirik/io3d | io3d/datawriter.py | DataWriter.Write3DData | def Write3DData(self, data3d, path, filetype='auto', metadata=None, progress_callback=None, sfin=True):
"""
:param data3d: input ndarray data
:param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used
Check function filename_format() for more ... | python | def Write3DData(self, data3d, path, filetype='auto', metadata=None, progress_callback=None, sfin=True):
"""
:param data3d: input ndarray data
:param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used
Check function filename_format() for more ... | [
"def",
"Write3DData",
"(",
"self",
",",
"data3d",
",",
"path",
",",
"filetype",
"=",
"'auto'",
",",
"metadata",
"=",
"None",
",",
"progress_callback",
"=",
"None",
",",
"sfin",
"=",
"True",
")",
":",
"self",
".",
"orig_path",
"=",
"path",
"path",
"=",
... | :param data3d: input ndarray data
:param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used
Check function filename_format() for more details.
:param metadata: {'voxelsize_mm': [1, 1, 1]}
:param filetype: dcm, vtk, rawiv, image_stack
... | [
":",
"param",
"data3d",
":",
"input",
"ndarray",
"data",
":",
"param",
"path",
":",
"output",
"path",
"to",
"specify",
"slice",
"number",
"advanced",
"formatting",
"options",
"(",
"like",
"{",
":",
"06d",
"}",
")",
"can",
"be",
"used",
"Check",
"function... | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L59-L137 |
mjirik/io3d | io3d/datawriter.py | DataWriter._fix_sitk_bug | def _fix_sitk_bug(self, path, metadata):
"""
There is a bug in simple ITK for Z axis in 3D images. This is a fix
:param path:
:param metadata:
:return:
"""
ds = dicom.read_file(path)
ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16]
d... | python | def _fix_sitk_bug(self, path, metadata):
"""
There is a bug in simple ITK for Z axis in 3D images. This is a fix
:param path:
:param metadata:
:return:
"""
ds = dicom.read_file(path)
ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16]
d... | [
"def",
"_fix_sitk_bug",
"(",
"self",
",",
"path",
",",
"metadata",
")",
":",
"ds",
"=",
"dicom",
".",
"read_file",
"(",
"path",
")",
"ds",
".",
"SpacingBetweenSlices",
"=",
"str",
"(",
"metadata",
"[",
"\"voxelsize_mm\"",
"]",
"[",
"0",
"]",
")",
"[",
... | There is a bug in simple ITK for Z axis in 3D images. This is a fix
:param path:
:param metadata:
:return: | [
"There",
"is",
"a",
"bug",
"in",
"simple",
"ITK",
"for",
"Z",
"axis",
"in",
"3D",
"images",
".",
"This",
"is",
"a",
"fix",
":",
"param",
"path",
":",
":",
"param",
"metadata",
":",
":",
"return",
":"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L150-L159 |
mjirik/io3d | io3d/datawriter.py | DataWriter.DataCopyWithOverlay | def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays):
"""
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
"""
dcmlist = dcm... | python | def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays):
"""
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
"""
dcmlist = dcm... | [
"def",
"DataCopyWithOverlay",
"(",
"self",
",",
"dcmfilelist",
",",
"out_dir",
",",
"overlays",
")",
":",
"dcmlist",
"=",
"dcmfilelist",
"# data3d = []",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dcmlist",
")",
")",
":",
"onefile",
"=",
"dcmlist",
"[",
... | Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory | [
"Function",
"make",
"3D",
"data",
"from",
"dicom",
"file",
"slices"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L208-L237 |
mjirik/io3d | io3d/datawriter.py | DataWriter.add_overlay_to_slice_file | def add_overlay_to_slice_file(
self,
filename,
overlay,
i_overlay,
filename_out=None
):
""" Function adds overlay to existing file.
"""
if filename_out is None:
filename_out = filename
filename = op.expanduser(filename)
data... | python | def add_overlay_to_slice_file(
self,
filename,
overlay,
i_overlay,
filename_out=None
):
""" Function adds overlay to existing file.
"""
if filename_out is None:
filename_out = filename
filename = op.expanduser(filename)
data... | [
"def",
"add_overlay_to_slice_file",
"(",
"self",
",",
"filename",
",",
"overlay",
",",
"i_overlay",
",",
"filename_out",
"=",
"None",
")",
":",
"if",
"filename_out",
"is",
"None",
":",
"filename_out",
"=",
"filename",
"filename",
"=",
"op",
".",
"expanduser",
... | Function adds overlay to existing file. | [
"Function",
"adds",
"overlay",
"to",
"existing",
"file",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L240-L254 |
mjirik/io3d | io3d/deprecation.py | deprecated | def deprecated(instructions):
"""
Flags a method as deprecated.
:param instructions: A human-friendly string of instructions, such as: 'Please migrate to add_proxy() ASAP.'
:return: DeprecatedWarning
"""
def decorator(func):
"""This is a decorator which can be used to mark functions as ... | python | def deprecated(instructions):
"""
Flags a method as deprecated.
:param instructions: A human-friendly string of instructions, such as: 'Please migrate to add_proxy() ASAP.'
:return: DeprecatedWarning
"""
def decorator(func):
"""This is a decorator which can be used to mark functions as ... | [
"def",
"deprecated",
"(",
"instructions",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"\"\"\"This is a decorator which can be used to mark functions as deprecated.\n\n It will result in a warning being emitted when the function is used.\n \"\"\"",
"@",
"functools",... | Flags a method as deprecated.
:param instructions: A human-friendly string of instructions, such as: 'Please migrate to add_proxy() ASAP.'
:return: DeprecatedWarning | [
"Flags",
"a",
"method",
"as",
"deprecated",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/deprecation.py#L15-L38 |
SetBased/py-stratum | pystratum/style/PyStratumStyle.py | PyStratumStyle.log_verbose | def log_verbose(self, message):
"""
Logs a message only when logging level is verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERBOSE:
self.writeln(message) | python | def log_verbose(self, message):
"""
Logs a message only when logging level is verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERBOSE:
self.writeln(message) | [
"def",
"log_verbose",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"get_verbosity",
"(",
")",
">=",
"Output",
".",
"VERBOSITY_VERBOSE",
":",
"self",
".",
"writeln",
"(",
"message",
")"
] | Logs a message only when logging level is verbose.
:param str|list[str] message: The message. | [
"Logs",
"a",
"message",
"only",
"when",
"logging",
"level",
"is",
"verbose",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/style/PyStratumStyle.py#L50-L57 |
SetBased/py-stratum | pystratum/style/PyStratumStyle.py | PyStratumStyle.log_very_verbose | def log_very_verbose(self, message):
"""
Logs a message only when logging level is very verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERY_VERBOSE:
self.writeln(message) | python | def log_very_verbose(self, message):
"""
Logs a message only when logging level is very verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERY_VERBOSE:
self.writeln(message) | [
"def",
"log_very_verbose",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"get_verbosity",
"(",
")",
">=",
"Output",
".",
"VERBOSITY_VERY_VERBOSE",
":",
"self",
".",
"writeln",
"(",
"message",
")"
] | Logs a message only when logging level is very verbose.
:param str|list[str] message: The message. | [
"Logs",
"a",
"message",
"only",
"when",
"logging",
"level",
"is",
"very",
"verbose",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/style/PyStratumStyle.py#L60-L67 |
PlaidWeb/Pushl | pushl/feeds.py | get_feed | async def get_feed(config, url):
""" Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed
"""
LOGGER.debug("++WAIT: cache get feed %s", url)
previous = config.cache.get(
'feed', url, schema_version=SCH... | python | async def get_feed(config, url):
""" Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed
"""
LOGGER.debug("++WAIT: cache get feed %s", url)
previous = config.cache.get(
'feed', url, schema_version=SCH... | [
"async",
"def",
"get_feed",
"(",
"config",
",",
"url",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"++WAIT: cache get feed %s\"",
",",
"url",
")",
"previous",
"=",
"config",
".",
"cache",
".",
"get",
"(",
"'feed'",
",",
"url",
",",
"schema_version",
"=",
"S... | Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed | [
"Get",
"a",
"feed"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L95-L137 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.archive_namespace | def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
... | python | def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
... | [
"def",
"archive_namespace",
"(",
"self",
")",
":",
"try",
":",
"for",
"ns_prefix",
",",
"url",
"in",
"self",
".",
"feed",
".",
"namespaces",
".",
"items",
"(",
")",
":",
"if",
"url",
"==",
"'http://purl.org/syndication/history/1.0'",
":",
"return",
"ns_prefi... | Returns the known namespace of the RFC5005 extension, if any | [
"Returns",
"the",
"known",
"namespace",
"of",
"the",
"RFC5005",
"extension",
"if",
"any"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L33-L41 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.entry_links | def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')} | python | def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')} | [
"def",
"entry_links",
"(",
"self",
")",
":",
"return",
"{",
"entry",
"[",
"'link'",
"]",
"for",
"entry",
"in",
"self",
".",
"feed",
".",
"entries",
"if",
"entry",
"and",
"entry",
".",
"get",
"(",
"'link'",
")",
"}"
] | Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions) | [
"Given",
"a",
"parsed",
"feed",
"return",
"the",
"links",
"to",
"its",
"entries",
"including",
"ones",
"which",
"disappeared",
"(",
"as",
"a",
"quick",
"-",
"and",
"-",
"dirty",
"way",
"to",
"support",
"deletions",
")"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L44-L48 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.is_archive | def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
i... | python | def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
i... | [
"def",
"is_archive",
"(",
"self",
")",
":",
"ns_prefix",
"=",
"self",
".",
"archive_namespace",
"if",
"ns_prefix",
":",
"if",
"ns_prefix",
"+",
"'_archive'",
"in",
"self",
".",
"feed",
".",
"feed",
":",
"# This is declared to be an archive view",
"return",
"True... | Given a parsed feed, returns True if this is an archive feed | [
"Given",
"a",
"parsed",
"feed",
"returns",
"True",
"if",
"this",
"is",
"an",
"archive",
"feed"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L51-L70 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.update_websub | async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'... | python | async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'... | [
"async",
"def",
"update_websub",
"(",
"self",
",",
"config",
",",
"hub",
")",
":",
"try",
":",
"LOGGER",
".",
"debug",
"(",
"\"WebSub: Notifying %s of %s\"",
",",
"hub",
",",
"self",
".",
"url",
")",
"request",
"=",
"await",
"utils",
".",
"retry_post",
"... | Update WebSub hub to know about this feed | [
"Update",
"WebSub",
"hub",
"to",
"know",
"about",
"this",
"feed"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L72-L92 |
SetBased/py-stratum | pystratum/DocBlockReflection.py | DocBlockReflection.get_tags | def get_tags(self, name):
"""
Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str]
"""
tags = list()
for tag in self._tags:
if tag[0] == name:
tags.append(tag[1])
return tags | python | def get_tags(self, name):
"""
Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str]
"""
tags = list()
for tag in self._tags:
if tag[0] == name:
tags.append(tag[1])
return tags | [
"def",
"get_tags",
"(",
"self",
",",
"name",
")",
":",
"tags",
"=",
"list",
"(",
")",
"for",
"tag",
"in",
"self",
".",
"_tags",
":",
"if",
"tag",
"[",
"0",
"]",
"==",
"name",
":",
"tags",
".",
"append",
"(",
"tag",
"[",
"1",
"]",
")",
"return... | Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str] | [
"Returns",
"a",
"list",
"of",
"tags",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L68-L81 |
SetBased/py-stratum | pystratum/DocBlockReflection.py | DocBlockReflection.__remove_leading_empty_lines | def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
... | python | def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
... | [
"def",
"__remove_leading_empty_lines",
"(",
"lines",
")",
":",
"tmp",
"=",
"list",
"(",
")",
"empty",
"=",
"True",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"lines",
")",
")",
":",
"empty",
"=",
"empty",
"and",
"lines",
"[",
"i",
"]",
... | Removes leading empty lines from a list of lines.
:param list[str] lines: The lines. | [
"Removes",
"leading",
"empty",
"lines",
"from",
"a",
"list",
"of",
"lines",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L94-L107 |
SetBased/py-stratum | pystratum/DocBlockReflection.py | DocBlockReflection.__remove_trailing_empty_lines | def __remove_trailing_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
lines.reverse()
tmp = DocBlockReflection.__remove_leading_empty_lines(lines)
lines.reverse()
tmp.reverse()
retu... | python | def __remove_trailing_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
lines.reverse()
tmp = DocBlockReflection.__remove_leading_empty_lines(lines)
lines.reverse()
tmp.reverse()
retu... | [
"def",
"__remove_trailing_empty_lines",
"(",
"lines",
")",
":",
"lines",
".",
"reverse",
"(",
")",
"tmp",
"=",
"DocBlockReflection",
".",
"__remove_leading_empty_lines",
"(",
"lines",
")",
"lines",
".",
"reverse",
"(",
")",
"tmp",
".",
"reverse",
"(",
")",
"... | Removes leading empty lines from a list of lines.
:param list[str] lines: The lines. | [
"Removes",
"leading",
"empty",
"lines",
"from",
"a",
"list",
"of",
"lines",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L111-L122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.