query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Audit names in commit metadata. Names which do not have a first name and a surname are extremely uncommon and when present are therefore generally invalid. As we want people to use their actual name when committing we do some checks to make sure that what looks like an actual name is present. | def audit_names_in_metadata(self):
# Iterate over commits....
for commit in self.repository.commits.values():
for name in [ commit.committer_name, commit.author_name ]:
# Is the name whitelisted?
if name in self.FullNameWhitelist:
continue
# As a special case, allow the name 'GitHub' for certain repositories
if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:
self.__log_warning(commit.sha1, "Commit has username 'GitHub' (web merge of PR); allowing anyway")
continue
# Check to see if the name contains spaces - if not - it is probably misconfigured....
if " " not in name.strip():
self.__log_failure(commit.sha1, "Non-full name: " + name)
continue | [
"def sanitize_names(self):\n self.first_name = self._sanitize_name(self.first_name)\n self.last_name = self._sanitize_name(self.last_name)",
"def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Audit commit metadata. Invalid hostnames such as localhost or (none) will be caught by this auditor. This will ensure that invalid email addresses or users will not show up in commits. | def audit_emails_in_metadata(self):
# Iterate over commits....
disallowed_domains = ["localhost", "localhost.localdomain", "(none)", "bombardier.com", "rail.bombardier.com"]
for commit in self.repository.commits.values():
for email_address in [ commit.committer_email, commit.author_email ]:
# Extract the email address, and reject them if extraction fails....
extraction = re.match("^(\S+)@(\S+)$", email_address)
if not extraction:
self.__log_failure(commit.sha1, "Seemingly invalid email address: " + email_address)
continue
# Don't allow domains which are disallowed...
domain = extraction.group(2)
if domain in disallowed_domains:
self.__log_failure(commit.sha1, "Email address using a blocked domain: " + email_address)
continue
# Ensure they have a valid MX/A entry in DNS....
try:
dns.resolver.query(domain, "MX")
except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel):
try:
dns.resolver.query(domain, "A")
except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN):
self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address)
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address) | [
"def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to construct an address header for emails as Python stuffs it up | def address_header(self, name, email):
fixed_name = Header( name ).encode()
return unicode("{0} <{1}>").format(fixed_name, email) | [
"def construct_from_header():\n\n # The tenant properties will not be set if the call to this method\n # does not come via a django request => we need to setup the tenant\n # properties first.\n # properties.tenant_properties will be an empty dict if the tenant\n # properties has not be initialised y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse special keywords in commits to determine further postcommit actions. | def determine_keywords(self):
split = dict()
split['email_cc'] = re.compile("^\s*CC[-_]?MAIL[:=]\s*(.*)")
split['email_cc2'] = re.compile("^\s*C[Cc][:=]\s*(.*)")
split['fixed_in'] = re.compile("^\s*FIXED[-_]?IN[:=]\s*(.*)")
numeric = dict()
numeric['bug_fixed'] = re.compile("^\s*(?:BUGS?|FEATURE)[:=]\s*(.+)")
numeric['bug_cc'] = re.compile("^\s*CCBUGS?[:=]\s*(.+)")
presence = dict()
presence['email_gui'] = re.compile("^\s*GUI:")
presence['silent'] = re.compile("(?:CVS|SVN|GIT|SCM).?SILENT")
presence['notes'] = re.compile("(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')")
results = defaultdict(list)
for line in self.commit.message.split("\n"):
# If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off
# This allows for people to fill keywords in the Differential Summary and have this work smoothly for them
line = re.sub("^Summary: (.+)", "\g<1>", line)
# Start processing our keywords...
for (name, regex) in split.iteritems():
match = re.match( regex, line )
if match:
results[name] += [result.strip() for result in match.group(1).split(",")]
for (name, regex) in numeric.iteritems():
match = re.match( regex, line )
if match:
results[name] += re.findall("(\d{1,10})", match.group(1))
for (name, regex) in presence.iteritems():
if re.match( regex, line ):
results[name] = True
self.keywords = results | [
"def is_commit(tokens):\n return tokens[0].lower() == COMMIT",
"def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send the commmit notification to CIA. The message is created incrementally using lxml's "E" builder. | def notify(self, builder):
# Build the <files> section for the template...
commit = builder.commit
files = E.files()
commit_msg = commit.message.strip()
commit_msg = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', commit_msg)
for filename in commit.files_changed:
safe_filename = re.sub(r'[\x00-\x09\x0B-\x1f\x7f-\xff]', '', filename)
file_element = E.file(safe_filename)
files.append(file_element)
# Build the message
cia_message = self.MESSAGE()
cia_message.append(self._generator)
source = self.SOURCE(E.project("KDE"))
source.append(E.module(self.repository.path))
source.append(E.branch(self.repository.ref_name))
cia_message.append(source)
cia_message.append(self.TIMESTAMP(commit.date))
body = self.BODY()
commit_data = self.COMMIT()
commit_data.append(E.author(commit.author_name))
commit_data.append(E.revision(commit.description))
commit_data.append(files)
commit_data.append(E.log(commit_msg))
commit_data.append(E.url(commit.url))
body.append(commit_data)
cia_message.append(body)
# Convert to a string
commit_xml = etree.tostring(cia_message)
# Craft the email....
message = MIMEText( commit_xml, 'xml', 'utf-8' )
message['Subject'] = "DeliverXML"
message['From'] = "sysadmin@kde.org"
message['To'] = "commits@platna.kde.org"
# Send email...
self.smtp.sendmail("sysadmin@kde.org", ["commits@platna.kde.org"],
message.as_string()) | [
"def notify(self, id, command, data = None):\n print \"sending:\", id, command, data\n if command == Code.START: data = [id]\n try:\n msg = Message(command = command, data = data)\n self.contacts[id].send(msg.encode())\n except:\n print \"msg failed\"",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for potential problems in a commit. | def check_commit_problems(self, commit, diff):
# Initialise
self._license_problem = False
self._commit_problem = False
self._commit_notes = defaultdict(list)
# Unsafe regex checks...
unsafe_matches = list()
unsafe_matches.append( r"\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\b\s*[\(\r\n]" )
unsafe_matches.append( r"\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\b\s*[\(\r\n]" )
unsafe_matches.append( r"(scanf)\b\s*[\(\r\n]" )
valid_filename_regex = r"\.(cpp|cc|cxx|C|c\+\+|c|l|y||h|H|hh|hxx|hpp|h\+\+|qml)$"
# Retrieve the diff and do the problem checks...
filename = unicode("")
filediff = list()
for line in diff:
file_change = re.match( "^diff --(cc |git a\/.+ b\/)(.+)$", line )
if file_change:
# Are we changing file? If so, we have the full diff, so do a license check....
if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename):
self.check_commit_license(filename, ''.join(filediff))
filediff = list()
filename = file_change.group(2)
continue
# Diff headers are bogus
if re.match("@@ -\d+,\d+ \+\d+ @@", line):
filediff = list()
continue
# Do an incremental check for *.desktop syntax errors....
if re.search("\.desktop$", filename) and re.search("[^=]+=.*[ \t]$", line) and line.startswith("+") and not re.match("^\+#", line):
self._commit_notes[filename].append( "[TRAILING SPACE] **" )
self._commit_problem = True
# Check for things which are unsafe...
for safety_match in unsafe_matches:
match = re.match(safety_match, line)
if match:
note = "[POSSIBLY UNSAFE: {0}] **".format( match.group(1) )
self._commit_notes[filename].append(note)
self._commit_problem = True
# Store the diff....
filediff.append(line)
if filename != "" and commit.files_changed[ filename ]["change"] in ['A'] and re.search(valid_filename_regex, filename):
self.check_commit_license(filename, ''.join(filediff)) | [
"def validate_commit(commit, branch_version):\n\n # this returns headers, followed by a empty line, then the message, so\n # we strip the headers\n message = subprocess.check_output([\n 'git', 'cat-file', 'commit', commit]).split('\\n\\n', 1)[1]\n\n match = RESOLVES_RE.search(message)\n if not... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns index of the resource to use for making requests to get data if none of the resources are available, then send number of seconds until the resource is not available | def get_resource_index(self):
result = -1
max_sleep_time = self.time_window
with self._lock:
while result == -1:
for i in range(0, self.num_keys):
curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0)
max_sleep_time = min(max_sleep_time, curr_sleep_time)
if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time():
self.timers[i][0] = 0
self.timers[i][1] = 0
if self.timers[i][1] < self.window_limit:
result = i
break
if result == -1: # case when all streams are rate limited
# logging.warning('sleeping for %d seconds.' % max_sleep_time)
# time.sleep(max_sleep_time)
return -1 * max_sleep_time
if self.timers[result][0] == 0:
self.timers[result][0] = time.time()
self.timers[result][1] += 1
return result | [
"def perform_get_start(self):\n\t\treturn 0",
"def _get_task_index(self):\n\n if self._is_chief:\n self._server_socket = self._start_socket_server()\n self._server_socket.settimeout(5)\n users = []\n t_end = time.time() + self._wait_time\n\n while time... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Chronos GR Config plugin writes new config when config has changed | def test_chronos_gr_config_changed(self, mock_run_command, mock_safely_write):
# Create the plugin
plugin = ChronosGRConfigPlugin({})
# Set up the config strings to be tested
old_config_string = "Old Chronos GR config"
new_config_string = "New Chronos GR config"
# Call 'on_config_changed' with file.open mocked out
with mock.patch('clearwater_etcd_plugins.chronos.chronos_gr_config_plugin.open', \
mock.mock_open(read_data=old_config_string), create=True) as mock_open:
plugin.on_config_changed(new_config_string, None)
# Test assertions
mock_open.assert_called_once_with(plugin.file(), "r")
mock_safely_write.assert_called_once_with(plugin.file(), new_config_string)
mock_run_command.assert_called_once_with("/usr/share/clearwater/clearwater-queue-manager/scripts/modify_nodes_in_queue add apply_chronos_gr_config") | [
"def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"",
"def test_write_config(self):\n config = Config()\n config.co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the draft results. | def test_load_draft(league):
draft = league.draft_results()
assert(len(draft) == 144)
#mcdavid 1st
assert(draft[0]['player_key'] == '396.p.6743')
# carter hart 67th
assert(draft[66]['player_key'] == '396.p.7156')
# zadorov last
assert(draft[-1]['player_key'] == '396.p.5995') | [
"def load(self):\n _path = glob(join_path(self.results_dir,'%s.results'%self.name)).pop()\n with open(_path,'r') as _f:\n self._loaded_data = load(_f)",
"def load(self):\n self.results = pickle_load('results', self.main_dir)",
"def load_draft_records(self):\n\n src_path = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return team roster at given date. | def get_team_roster(league):
pass | [
"async def roster(\n self, ctx: commands.Context, season: Optional[YearFinder] = None, *, search: HockeyTeams\n ) -> None:\n season_str = None\n season_url = \"\"\n if season:\n if season.group(3):\n if (int(season.group(3)) - int(season.group(1))) > 1:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calibrate the chemical shifts of each spin in the peak list. | def calibrate_peaklist(peaklist, calibration, attr='shift'):
if len(calibration) != peaklist.dims:
raise ValueError('incorrect calibration list length')
for peak in peaklist:
for spin, cal in zip(peak, calibration):
shift = getattr(spin, attr)
shift -= cal
setattr(spin, attr, shift)
return peaklist | [
"def updatePeakShifts(peak):\n\n for peakDim in peak.peakDims:\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n updateResonShift(contrib.resonance,peakDim)",
"def calibrate(self, data):\n self.shift = data[7]\n lower_count_to_current = (data[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Map each unique spin link to all of its corresponding peaks. NOESY peak lists represent spin links between Hydrogen atoms. Whether 2D, 3D or 4D, each peak in a NOESY peak list has exactly two Hydrogen spins. Here, a spin link is represented by a frozenset containing the spin.assignment tuples for each Hydrogen atom. This function returns a dictionary mapping each unique spin link to a list of the Peaks in the PeakList that contain those two Hydrogen atoms. Examples >>> spin_link_dict = peaklist.spin_link_dict() >>> spin_link, peaks = spin_link_dict.popitem() >>> spin_link frozenset([Assignment(res_type='Q', res_num=21, atom='HN'), Assignment( res_type='G', res_num=17, atom='HN')]) >>> print(peaks[0]) Peak(spins=[ Spin(res_type=G, res_num=17, atom=HN), Spin(res_type=G, res_num=17, atom=N), Spin(res_type=Q, res_num=21, atom=HN)]) >>> print(peaks[1]) Peak(spins=[ Spin(res_type=Q, res_num=21, atom=HN), Spin(res_type=Q, res_num=21, atom=N), Spin(res_type=G, res_num=17, atom=HN)]) Returns | def get_spin_link_dict(peaklist):
spin_link_dict = {}
for peak in peaklist:
spins = [spin for spin in peak
if spin.atom is not None and spin.atom[0] == 'H']
if len(spins) != 2:
err = ('expected 2 Hydrogens in each peak, '
'found %d' % len(spins))
raise ValueError(err)
link = frozenset(spin.assignment for spin in spins)
spin_link_dict.setdefault(link, []).append(peak)
return spin_link_dict | [
"def spinnaker_links(self):\n return iter(self._spinnaker_links.items())",
"def list_to_dict(links):\n dic = defaultdict(list)\n for link in links:\n dic[int(link[0][1:])].append(int(link[1][1:]))\n if int(link[1][1:]) not in dic:\n dic[int(link[1][1:])] = []\n return dic"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort peaks by the assignments of their constituent spins. Sort the peaks by the assignments of spins in particular dimensions. The default order sorts the peaks by the dimensions associated with spin anchors first then by the remaining dimensions in the order they appear in each peak. Optionally place all commented peaks at the end of the peak list. | def sort_by_assignments(peaklist, order=None, commented_at_end=False):
anchors = peaklist.anchors
anchored = tuple(i for anchor in anchors for i in anchor)
unanchored = set(range(peaklist.dims)) - set(anchored)
default_order = anchored + tuple(sorted(unanchored))
order = order if order is not None else default_order
peaklist.sort(key=lambda peak: tuple(peak[i] for i in order))
if commented_at_end:
peaklist.sort(key=lambda peak: peak.commented)
return peaklist | [
"def sort_merge_peak(peaks, min_gap=None, no_smt=False):\n ch2peaks = {}\n for peak in peaks:\n ch = peak[0]\n if ch in ch2peaks:\n ch2peaks[ch].append(peak[1:5])\n else:\n ch2peaks[ch] = [peak[1:5]]\n\n for ch in ch2peaks:\n ch2peaks[ch].sort(key=lambda x:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an shellescaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the | def DoubleQuote(s):
if not s:
return '""'
elif all(c in _SafeShellChars for c in s):
return s
else:
return '"' + s.replace('"', '\\"') + '"' | [
"def __shellquote(s):\n return \"'\" + s.replace(\"'\", \"'\\\\''\") + \"'\"",
"def shell_quote(s):\n return \"\\\"%s\\\"\" % s.replace('\"', '\\\"')",
"def quoted(s):\n return '\"%s\"' % s",
"def wrap_with_in_single_quote(s):\n return \"'{}'\".format(s)",
"def shquote(text):\n\treturn \"'%s'\" % ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen. | def ShrinkToSnippet(cmd_parts, var_name, var_value):
def shrink(value):
parts = (x and SingleQuote(x) for x in value.split(var_value))
with_substitutions = ('"$%s"' % var_name).join(parts)
return with_substitutions or "''"
return ' '.join(shrink(part) for part in cmd_parts) | [
"def make_shell_cmd(self, locals):\n\t\tdef cmd_shell():\n\t\t\timport code\n\t\t\tcode.interact(banner=self.shell_banner, local=locals, exitmsg='Returning to command shell...')\n\n\t\treturn cmd_shell",
"def shellify(val):\n\n if val==None:\n s=''\n elif not isinstance(val,str):\n s=str(val)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
An fcntlbased implementation of _IterProcessStdout. | def _IterProcessStdoutFcntl(process,
iter_timeout=None,
timeout=None,
buffer_size=4096,
poll_interval=1):
# pylint: disable=too-many-nested-blocks
import fcntl
try:
# Enable non-blocking reads from the child's stdout.
child_fd = process.stdout.fileno()
fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)
fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
end_time = (time.time() + timeout) if timeout else None
iter_end_time = (time.time() + iter_timeout) if iter_timeout else None
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
if iter_end_time and time.time() > iter_end_time:
yield None
iter_end_time = time.time() + iter_timeout
if iter_end_time:
iter_aware_poll_interval = min(poll_interval,
max(0, iter_end_time - time.time()))
else:
iter_aware_poll_interval = poll_interval
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if not data:
break
yield data
if process.poll() is not None:
# If process is closed, keep checking for output data (because of timing
# issues).
while True:
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if data:
yield data
continue
break
break
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait() | [
"def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in stdout_iterator:\n self._log(\"raw\", \"stdout : {0}\".format(line.strip()))\n self.stdout_queue.put_nowait(line.strip())\n self.stdout_q... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a SparseAutoEncoder object. | def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',
out_transfer='identity', reconstruct_loss='squared',
c_sparsity=1, sparsity_loss='bern_bern_kl',
sparsity_target=0.01,
tied_weights=True, batch_size=None,
optimizer='lbfgs', max_iter=1000, verbose=False):
super(SparseAutoEncoder, self).__init__(
n_inpt, n_hidden, hidden_transfer, out_transfer,
reconstruct_loss, c_sparsity, sparsity_loss, sparsity_target,
tied_weights)
self.batch_size = batch_size
self.optimizer = optimizer
self.f_transform = None
self.f_reconstruct = None
self.parameters.data[:] = np.random.standard_normal(
self.parameters.data.shape).astype(theano.config.floatX)
self.max_iter = max_iter
self.verbose = verbose | [
"def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')",
"def make_sparse(data):\n assert data.train_pos_edge_index is not None\n\n (row, col), N = data.train_pos_edge_index, data.num_nodes\n perm = (col * N + row).argsort()\n row, co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a DenoisingAutoEncoder object. | def __init__(self, n_inpt, n_hidden, hidden_transfer='sigmoid',
out_transfer='identity', reconstruct_loss='squared',
noise_type='gauss', c_noise=.2,
tied_weights=True, batch_size=None,
optimizer='lbfgs', max_iter=1000, verbose=False):
super(DenoisingAutoEncoder, self).__init__(
n_inpt, n_hidden, hidden_transfer, out_transfer,
reconstruct_loss, noise_type, c_noise,
tied_weights)
self.batch_size = batch_size
self.optimizer = optimizer
self.f_transform = None
self.f_reconstruct = None
climin.initialize.randomize_normal(self.parameters.data)
self.max_iter = max_iter
self.verbose = verbose | [
"def _define_encoder(self):\n raise NotImplementedError",
"def get_model(*args, **kwargs):\n return AutoEncoder(*args, **kwargs)",
"def create_autoencoder():\n\n model = create_model()\n model.compile(optimizer=Adam(), loss=binary_crossentropy)\n model.summary()\n model.save('autoencoder.h5')"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
根据一系列离散的版本猜测版本范围 会把 group_digits 位的版本分为同一段 Examples: (digits=1) "1.1|1.2|1.3|1.4" > ">=1.1, ">=1.1,=2.1, '>=1.1.1,=1.2,=2.0,<=2.0.2|3.0' | def guess_range(versions, digits=2):
if isinstance(versions, six.string_types):
versions = [Version(x) for x in versions.split('|')]
else:
versions = [Version(x) for x in versions]
versions.sort()
if not versions:
raise ValueError('must given at least one version')
sections = []
group_buff = [versions[0]]
for version in versions[1:]:
if version.version[:digits] == group_buff[0].version[:digits]:
group_buff.append(version)
else:
sections.append(_internal_guess_range(group_buff))
group_buff = [version]
# 最后一组
sections.append(_internal_guess_range(group_buff))
version_ranges = []
for low, high in sections:
if low == high:
cg = low.vstring
else:
cg = ">={},<={}".format(low, high)
version_ranges.append(cg)
vr = VersionRange(version_ranges)
return vr | [
"def make_version_sortable(groups):\n postfix = groups.get('postfix1') or groups.get('postfix2') or \"\"\n sortable_version = \"0.0.0.0.0\"\n sortable_postfix = None\n if postfix.startswith(\"a\"):\n sortable_postfix = postfix.replace('a', '0.')\n if postfix and postfix.startswith(\"b\"):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fixture for setting up configuration parser | def setup_config():
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
return config | [
"def test_polarion_config_parser(polarion_config):\n assert polarion_config.test_case_url() == 'https://127.0.0.1/polarion/import/testcase'\n assert polarion_config.test_run_url() == 'https://127.0.0.1/polarion/import/xunit'\n assert polarion_config.username() == 'my_user'\n assert polarion_config.passw... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fixture for retrieving mock event | def get_mock_event():
event = {
"httpMethod": "GET",
"//body": "{\"name\": \"Sam\"}",
"resource": "/{proxy+}",
"queryStringParameters": {},
"pathParameters": {
"proxy": "users"
},
"requestContext": {
"accountId": "222222222",
"identity": {
"sourceIp": "2a02:a445:6d36:1:1e3:a188:313c:1d31",
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_1_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2743.116 Safari/537.36",
},
"resourcePath": "/{proxy+}",
"httpMethod": "GET",
"apiId": "xxxxxxxxxx"
}
}
return event | [
"def test_get_event(self):\n pass",
"def test_describe_event(self):\n pass",
"def test_future_event(self):\n pass",
"def test_track_event(self):\n\n created_at = datetime.now()\n\n mock_payload = {\n 'user_id': 1,\n 'event_name': 'Event',\n '... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit test get_ip_type_by_address method of the Bad Bots class | def test_get_ip_type_by_address(setup_config, get_mock_event):
# !ARRANGE!
bad_bots = BadBots(setup_config, get_mock_event)
ipv4_address_1 = '1.1.1.1'
ipv4_address_2 = '11.22.33.44'
ipv4_address_3 = '123.123.123.123'
ipv6_address_1 = '2a02:a445:6d36:1:1e3:a188:313c:1d31'
ipv6_address_2 = '3731:54:65fe:2::a7'
ipv6_address_3 = 'fd07:a47c:3742:823e:3b02:76:982b:463'
# !ACT!
# Detect the IP type of provided IP addresses
ipv4_address_1_type = bad_bots.get_ip_type_by_address(ipv4_address_1)
ipv4_address_2_type = bad_bots.get_ip_type_by_address(ipv4_address_2)
ipv4_address_3_type = bad_bots.get_ip_type_by_address(ipv4_address_3)
ipv6_address_1_type = bad_bots.get_ip_type_by_address(ipv6_address_1)
ipv6_address_2_type = bad_bots.get_ip_type_by_address(ipv6_address_2)
ipv6_address_3_type = bad_bots.get_ip_type_by_address(ipv6_address_3)
# !ASSERT!
# Assert IP addresses are of type IPv4
assert ipv4_address_1_type.value == BadBots.SourceIPType.IPV4.value
assert ipv4_address_2_type.value == BadBots.SourceIPType.IPV4.value
assert ipv4_address_3_type.value == BadBots.SourceIPType.IPV4.value
# Assert IP addresses are of type IPv6
assert ipv6_address_1_type.value == BadBots.SourceIPType.IPV6.value
assert ipv6_address_2_type.value == BadBots.SourceIPType.IPV6.value
assert ipv6_address_3_type.value == BadBots.SourceIPType.IPV6.value | [
"def testIpAddress(self):\n self.assertRaises(ValueError,\n basictypes.build,\n \"SNIMPY-MIB\", \"snimpyIpAddress\", \"999.5.6.4\")\n a = basictypes.build(\"SNIMPY-MIB\", \"snimpyIpAddress\", \"10.0.4.5\")\n self.assert_(isinstance(a, basictypes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unit test check_bot_confidence method of the Bad Bots class | def test_check_bot_confidence(setup_config, get_mock_event):
# !ARRANGE!
bad_bots = BadBots(setup_config, get_mock_event)
bot_1 = Bot()
bot_1.source_ip = '1.1.1.1'
bot_1.http_query_string_parameters = '<script></script>'
bot_1.http_body = 'EXEC'
bot_1.geolocation = 'United States'
bot_1.source_ip_type = BadBots.SourceIPType.IPV4
bot_1.http_method = "CONNECT"
bot_1.http_user_agent = "Mozilla/5.0 (compatible; Sosospider/2.0; +http://help.soso.com/webspider.htm)"
bot_2 = Bot()
bot_2.source_ip = '77.168.51.231'
bot_2.http_query_string_parameters = 'hello'
bot_2.http_body = 'hello!'
bot_2.geolocation = 'Netherlands'
bot_2.source_ip_type = BadBots.SourceIPType.IPV4
bot_2.http_method = "GET"
bot_2.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
bot_3 = Bot()
bot_3.source_ip = '2a02:a445:6d36:1:1e3:a188:313c:1d33'
bot_3.http_query_string_parameters = 'param=true'
bot_3.http_body = 'username=xxx'
bot_3.geolocation = 'United States'
bot_3.source_ip_type = BadBots.SourceIPType.IPV6
bot_3.http_method = "GET"
bot_3.http_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
# !ACT!
# Do confidence check on potential bots
confidence_score_bot_1 = bad_bots.check_bot_confidence(bot_1)
confidence_score_bot_2 = bad_bots.check_bot_confidence(bot_2)
confidence_score_bot_3 = bad_bots.check_bot_confidence(bot_3)
# !ASSERT!
# Assert IP addresses are of type IPv4
assert(confidence_score_bot_1 == 25)
assert(confidence_score_bot_2 == 0)
assert(confidence_score_bot_3 == 5) | [
"def test_word_confidences(self):\n self._api.SetImageFile(self._image_file)\n words = self._api.AllWords()\n self.assertEqual(words, [])\n self._api.Recognize()\n words = self._api.AllWords()\n confidences = self._api.AllWordConfidences()\n self.assertEqual(len(word... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates IDL files from a template for user and system marshaling. | def _Main():
cmd_parser = argparse.ArgumentParser(
description='Tool to generate IDL from template.')
cmd_parser.add_argument('--idl_template_file',
dest='idl_template_file',
type=str,
required=True,
help='Input IDL template file.')
cmd_parser.add_argument('--idl_output_file',
type=str,
required=True,
help='Output IDL file.')
flags = cmd_parser.parse_args()
_GenerateIDLFile(flags.idl_template_file, flags.idl_output_file) | [
"def generate_files_for_template(env, template_file, input_files, output_dir):\n # Open template\n with open(template_file, \"r\") as template_contents:\n template_object = env.from_string(template_contents.read())\n _, template_extension = os.path.splitext(template_file)\n\n # Create out... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add rankig to each node using google pagerank algorithm | def add_pagerank(self):
query = '''
MATCH (c1:)-[r:INTERACTS]->(c2:)
RETURN c1.name, c2.name, r.weight AS weight
'''
ig = IGraph.TupleList(self.graph.run(query), weights=True)
pg = ig.pagerank()
pgvs = []
for p in zip(ig.vs, pg):
print(p)
pgvs.append({"name": p[0]["name"], "pg": p[1]})
write_clusters_query = '''
UNWIND {nodes} AS n
MATCH (c:) WHERE c.name = n.name
SET c.pagerank = n.pg
'''
self.graph.run(write_clusters_query, nodes=pgvs) | [
"def _run_pagerank_iteration(self):\r\n\r\n sink_nodes = self.recipients - self.senders\r\n S = sum([sink.pagerank for sink in sink_nodes])\r\n\r\n number_nodes = len(self.nodes)\r\n\r\n # The LHS of the PageRank addition is constant for each node, so can be\r\n # precomputed.\r\n random_jump_nume... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
add community membership to each node using walktrap algorithm implemented in igraph | def add_communites(self):
query = '''
MATCH (c1:)-[r:INTERACTS]->(c2:)
RETURN c1.name, c2.name, r.weight AS weight
'''
ig = IGraph.TupleList(self.graph.run(query), weights=True)
clusters = IGraph.community_walktrap(ig, weights="weight").as_clustering()
nodes = [{"name": node["name"]} for node in ig.vs]
for node in nodes:
idx = ig.vs.find(name=node["name"]).index
node["community"] = clusters.membership[idx]
write_clusters_query = '''
UNWIND {nodes} AS n
MATCH (c:) WHERE c.name = n.name
SET c.community = toInt(n.community)
'''
self.graph.run(write_clusters_query, nodes=nodes) | [
"def insert(self, node, community, incident_weight):\n self.community_degrees[community] += self.degrees[node]\n self.community_self_loops[community] += incident_weight + self.self_loops[node]\n self.node_to_community_map[node] = community",
"def assign_communities(graph):\n communitie... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Advance the time reference by the given amount. | def advance_by(self, amount: float):
if amount < 0:
raise ValueError("cannot retreat time reference: amount {} < 0"
.format(amount))
self.__delta += amount | [
"def advanceTime(self, amount):\n self.currentSeconds += amount",
"def advance(self, amount):\n right_now = self.rightNow + amount\n self._sortCalls()\n while self.calls and self.calls[0].getTime() <= right_now:\n self.rightNow = self.calls[0].getTime()\n call = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Advance the time reference so that now is the given timestamp. | def advance_to(self, timestamp: float):
now = self.__original_time()
if timestamp < now:
raise ValueError("cannot retreat time reference: "
"target {} < now {}"
.format(timestamp, now))
self.__delta = timestamp - now | [
"def advance(self, **kwargs):\n self._now = self._now + timedelta(**kwargs)",
"def _update_time(self):\n self.prev_time = time.time()",
"def change_time(self, new_time):\r\n self.when = new_time",
"def setTimepoint(self, tp):\n\t\tif tp != self.timepoint:\n\t\t\tself.renew = True\n\t\tsel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Email the given document to the given email address. | def email_document(document, to, template='django_dms/email.txt', subject=''):
# Start a new thread to email the document
# This avoids a frozen screen while the email is being sent (particularly if the document is big).
t = threading.Thread(target=_email_document, args=[document, to, template, subject])
t.setDaemon(True)
t.start() | [
"def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)\n # Create the message\n message = EmailMes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to email document in another thread. | def _email_document(document, to, template='django_dms/email.txt', subject=''):
# TODO: A really cool system would delay sending the email for 10 seconds or so,
# to allow the user to quickly undo :-) This could probably also be done client-side (ie JS)
# Create the message
message = EmailMessage(to=to, subject=subject)
message.to = to
message.subject = subject
message.body = render_to_string(template, {'document': document})
message.attach(document.friendly_filename, document.file.read(), document.file_mimetype)
# Send the message
message.send() | [
"def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subj... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send the specified document to the user's email address (AJAX version). | def send_ajax(self, request, id, tribe_slug):
document = self.get_document(id, tribe_slug)
form = self._set_user_email_address(request)
email = self._get_user_email_address(request)
if not email and not form:
form = EmailForm()
if form:
content = '<form class="ajax_update_email" action="%s" method="post">' % reverse('%s_document_send' % self.name, args=[getattr(document, self.url_identifier_field)])
content += '%s<input type="submit" value="Send"/></form>' % form['email']
return HttpResponse(content)
print "Sending email to %s" % email
#email_document(document, to=[email], subject='Document: %s' % document.title)
# Send a signal to let everyone know about this document interaction
document_interaction.send(sender=self, document=document, mode="sent", request=request, recipient=email)
return HttpResponse('Email sent to %s' % email) | [
"def send_document_by_email(self, send_document_by_email):\n\n self._send_document_by_email = send_document_by_email",
"def _email_document(document, to, template='django_dms/email.txt', subject=''): \n # TODO: A really cool system would delay sending the email for 10 seconds or so, \n # to allow the... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a custom defined or default email address for the current user. | def _get_user_email_address(self, request):
return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email) | [
"def get_user_email():\n try:\n return auth.get_current_user().email\n except Exception:\n return ''",
"def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')",
"def __default_email(self):\n email_default = lambda n, s: \"{n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a new email address is posted, remember it. | def _set_user_email_address(self, request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email']
else:
return form | [
"def duplicate_email(user):\n return user.email",
"def email(self, email):\n if email == self.email:\n return\n\n email = email.lower()\n if self._email is None:\n self._email = email\n self.require_email_confirmation()\n else:\n self.emai... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print list of instances with their attached volume id/size to console, ie | def list_ebss_by_instance():
ec2 = u.create_ec2_resource()
instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]
sorted_instances = sorted(instances, key=itemgetter(0))
for (seconds, instance) in sorted_instances:
volumes = instance.volumes.all()
volume_strs = []
for v in volumes:
volume_strs.append("%s (%s)"%(v.id, v.size))
print("%s: %s" % (u.get_name(instance.tags), ','.join(volume_strs))) | [
"def do_show(cs, args):\n instance = _find_instance(cs, args.instance)\n instance._info['flavor'] = instance.flavor['id']\n if hasattr(instance, 'volume'):\n instance._info['volume'] = instance.volume['size']\n if hasattr(instance, 'ip'):\n instance._info['ip'] = ', '.join(instance.ip)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Grows EBS volume for given task. | def grow_ebs_for_task(task_fragment, target_size_gb):
ec2 = u.create_ec2_resource()
client = u.create_ec2_client()
# todo: don't crash on missing/duplicate names
instances = {u.get_name(i.tags): i for i in ec2.instances.all()}
ec2 = u.create_ec2_resource()
instances = [(u.seconds_from_datetime(i.launch_time), i) for i in ec2.instances.all()]
sorted_instances = reversed(sorted(instances, key=itemgetter(0)))
for (seconds, instance) in sorted_instances:
task_name = u.get_name(instance.tags)
hours_ago = (time.time()-seconds)/3600
hours_ago+=8 # adjust for time being in UTC
if task_fragment in task_name:
print("Found instance %s launched %.1f hours ago" %( task_name, hours_ago))
break
print(instance.id)
volumes = list(instance.volumes.all())
assert len(volumes)==1, "Must have 1 volume"
print("Growing %s to %s"%(volumes[0].id, target_size_gb))
response = client.modify_volume(
VolumeId=volumes[0].id,
Size=target_size_gb,
)
assert u.is_good_response(response) | [
"def extend_volume(self, volume, new_size):\n if isinstance(new_size, dict):\n new_size = random.randint(new_size[\"min\"], new_size[\"max\"])\n\n aname = \"cinder_v%s.extend_volume\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volumes.exten... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This class tests the PyTorchYolo object detector. | def get_pytorch_yolo(get_default_cifar10_subset):
import cv2
import torch
from pytorchyolo import models
from pytorchyolo.utils.loss import compute_loss
from art.estimators.object_detection.pytorch_yolo import PyTorchYolo
model_path = "/tmp/PyTorch-YOLOv3/config/yolov3.cfg"
weights_path = "/tmp/PyTorch-YOLOv3/weights/yolov3.weights"
model = models.load_model(model_path=model_path, weights_path=weights_path)
class YoloV3(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, targets=None):
if self.training:
outputs = self.model(x)
# loss is averaged over a batch. Thus, for patch generation use batch_size = 1
loss, loss_components = compute_loss(outputs, targets, self.model)
loss_components_dict = {"loss_total": loss}
return loss_components_dict
else:
return self.model(x)
model = YoloV3(model)
object_detector = PyTorchYolo(
model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=("loss_total",)
)
n_test = 10
(_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset
x_test_cifar10 = x_test_cifar10[0:n_test]
x_test = cv2.resize(
x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC
).transpose((2, 0, 1))
x_test = np.expand_dims(x_test, axis=0)
x_test = np.repeat(x_test, repeats=2, axis=0)
# Create labels
result = object_detector.predict(x=x_test)
y_test = [
{
"boxes": result[0]["boxes"],
"labels": result[0]["labels"],
"scores": np.ones_like(result[0]["labels"]),
},
{
"boxes": result[1]["boxes"],
"labels": result[1]["labels"],
"scores": np.ones_like(result[1]["labels"]),
},
]
yield object_detector, x_test, y_test | [
"def yolo_test_file(self):\n # Detect objects\n annotatedImage, predictedObjects = self.detect_from_file(\n self.inputFile)\n # Show image\n if self.showImage:\n cv2.imshow('YOLO Detection', annotatedImage)\n cv2.waitKey(10)\n # Save annotated imag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits image into tiles by size of tile. tile_w tile width tile_h tile height | def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):
x_axis = -1
y_axis = -2
arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]
x_ntiles = (
arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1
)
y_ntiles = (
arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1
)
tiles = []
# row
for i in range(0, y_ntiles):
# height of this tile
ver_f = tile_h * i
ver_t = ver_f + tile_h
# col
for j in range(0, x_ntiles):
# width of this tile
hor_f = tile_w * j
hor_t = hor_f + tile_w
tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)
tiles.append(tile)
tile_shape = [tile_h, tile_w]
ntiles = dict(x=x_ntiles, y=y_ntiles)
padding = dict(left=0, right=0, top=0, bottom=0)
if arr_width % tile_w == 0:
padding["right"] = 0
else:
padding["right"] = tile_w - (arr_width % tile_w)
if arr_height % tile_h == 0:
padding["bottom"] = 0
else:
padding["bottom"] = tile_h - (arr_height % tile_h)
info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)
return tiles, info | [
"def splitImgs(self, tile_size, n_tiles):\n\n if n_tiles%2!=0 or tile_size%16!=0:\n print(\"Incorrect number of tiles or tile size not divisible by 16.\\nAborting\")\n exit()\n\n\n path_train = self.train_path\n path_label = self.label_path\n path_raw = self.raw_pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Splits image into tiles by number of tile. x_ntiles number of tiles horizontally y_ntiles number of tiles vertically | def split_image_into_number_of_tiles(
arr: Image, x_ntiles: int, y_ntiles: int, overlap: int
):
img_width, img_height = arr.shape[-1], arr.shape[-2]
tile_w = img_width // x_ntiles
tile_h = img_height // y_ntiles
return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap) | [
"def splitImgs(self, tile_size, n_tiles):\n\n if n_tiles%2!=0 or tile_size%16!=0:\n print(\"Incorrect number of tiles or tile size not divisible by 16.\\nAborting\")\n exit()\n\n\n path_train = self.train_path\n path_label = self.label_path\n path_raw = self.raw_pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates an array of ppxf_util.gaussian emission lines to be used as gas templates in PPXF. Generally, these templates represent the instrumental line spread function (LSF) at the set of wavelengths of each emission line. In this case, pPXF will return the intrinsic (i.e. astrophysical) dispersion of the gas lines. Alternatively, one can input FWHM_gal=0, in which case the emission lines are deltafunctions and pPXF will return a dispersion which includes both the intrumental and the intrinsic disperson. Additional lines can be easily added by editing the code of this procedure, which is meant as a template to be modified by the users where needed. For accuracy the ppxf_util.gaussians are integrated over the pixels boundaries. This can be changed by setting `pixel`=False. The [OI], [OIII] and [NII] doublets are fixed at theoretical flux ratio~3. The [OII] and [SII] doublets can be restricted to physical range of ratios. The Balmet Series can be fixed to the theoretically predicted decrement. | def emission_lines(logLam_temp, lamRange_gal, FWHM_gal, pixel=True,
tie_balmer=False, limit_doublets=False, vacuum=False):
if tie_balmer:
# Balmer decrement for Case B recombination (T=1e4 K, ne=100 cm^-3)
# Table 4.4 of Dopita & Sutherland 2003 https://www.amazon.com/dp/3540433627
# Balmer: Htheta Heta Hzeta Heps Hdelta Hgamma Hbeta Halpha
wave = np.array([3797.90, 3835.39, 3889.05, 3970.07, 4101.76, 4340.47, 4861.33, 6562.80]) # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
ratios = np.array([0.0530, 0.0731, 0.105, 0.159, 0.259, 0.468, 1, 2.86])
ratios *= wave[-2]/wave # Account for varying pixel size in Angstrom
emission_lines = gauss @ ratios
line_names = ['Balmer']
w = (wave > lamRange_gal[0]) & (wave < lamRange_gal[1])
line_wave = np.mean(wave[w]) if np.any(w) else np.mean(wave)
else:
# Use fewer lines here, as the weak ones are difficult to measure
# Balmer: Hdelta Hgamma Hbeta Halpha
line_wave = [4101.76, 4340.47, 4861.33, 6562.80] # air wavelengths
if vacuum:
line_wave = ppxf_util.air_to_vac(line_wave)
line_names = ['Hdelta', 'Hgamma', 'Hbeta', 'Halpha']
emission_lines = ppxf_util.gaussian(logLam_temp, line_wave, FWHM_gal, pixel)
if limit_doublets:
# The line ratio of this doublet lam3729/lam3726 is constrained by
# atomic physics to lie in the range 0.28--1.47 (e.g. fig.5.8 of
# Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/).
# We model this doublet as a linear combination of two doublets with the
# maximum and minimum ratios, to limit the ratio to the desired range.
# -----[OII]-----
wave = [3726.03, 3728.82] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
names = ['[OII]3726_d1', '[OII]3726_d2']
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
doublets = gauss @ [[1, 1], [0.28, 1.47]] # produces *two* doublets
emission_lines = np.column_stack([emission_lines, doublets])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, wave)
# The line ratio of this doublet lam6716/lam6731 is constrained by
# atomic physics to lie in the range 0.44--1.43 (e.g. fig.5.8 of
# Osterbrock & Ferland 2005 https://www.amazon.co.uk/dp/1891389343/).
# We model this doublet as a linear combination of two doublets with the
# maximum and minimum ratios, to limit the ratio to the desired range.
# -----[SII]-----
wave = [6716.47, 6730.85] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
names = ['[SII]6731_d1', '[SII]6731_d2']
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
doublets = gauss @ [[0.44, 1.43], [1, 1]] # produces *two* doublets
emission_lines = np.column_stack([emission_lines, doublets])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, wave)
else:
# Here the doublets are free to have any ratio
# -----[OII]----- -----[SII]-----
wave = [3726.03, 3728.82, 6716.47, 6730.85] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
names = ['[OII]3726', '[OII]3729', '[SII]6716', '[SII]6731']
gauss = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel)
emission_lines = np.column_stack([emission_lines, gauss])
line_names = np.append(line_names, names)
line_wave = np.append(line_wave, wave)
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[OIII]-----
wave = [4958.92, 5006.84] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[OIII]5007_d') # single template for this doublet
line_wave = np.append(line_wave, wave[1])
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[OI]-----
wave = [6300.30, 6363.67] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.33]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[OI]6300_d') # single template for this doublet
line_wave = np.append(line_wave, wave[0])
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[NII]-----
wave = [6548.03, 6583.41] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [0.33, 1]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[NII]6583_d') # single template for this doublet
line_wave = np.append(line_wave, wave[1])
#added by anja to ppxf_util.emission_lines version
# To keep the flux ratio of a doublet fixed, we place the two lines in a single template
# -----[NI]-----
wave = [5197.90, 5200.39] # air wavelengths
if vacuum:
wave = ppxf_util.air_to_vac(wave)
doublet = ppxf_util.gaussian(logLam_temp, wave, FWHM_gal, pixel) @ [1, 0.7]
emission_lines = np.column_stack([emission_lines, doublet])
line_names = np.append(line_names, '[NI]5200_d') # single template for this doublet
line_wave = np.append(line_wave, wave[1])
#----------------------
# Only include lines falling within the estimated fitted wavelength range.
#
w = (line_wave > lamRange_gal[0]) & (line_wave < lamRange_gal[1])
emission_lines = emission_lines[:, w]
line_names = line_names[w]
line_wave = line_wave[w]
print('Emission lines included in gas templates:')
print(line_names)
return emission_lines, line_names, line_wave | [
"def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Combine SSP traces to have mass/luminosity weighted properties | def weighted_traces(parnames, trace, nssps):
weights = np.array([trace["w_{}".format(i+1)].data for i in range(
nssps)])
wtrace = []
for param in parnames:
data = np.array([trace["{}_{}".format(param, i+1)].data
for i in range(nssps)])
t = np.average(data, weights=weights, axis=0)
wtrace.append(Table([t], names=["{}_weighted".format(param)]))
return hstack(wtrace) | [
"def modifyTraces(self,traces,samplerate,noiseinfo,smodel):\n noiseshape,noise = noiseinfo\n newtraces = []\n for trace in traces:\n sampledtrace = self.sampleTrace(trace,smodel,samplerate)\n if noise != 0.0:\n newtraces.append(self.makeTracePartial(sampledtr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
update learning rate of optimizers | def updatelearningrate(self, epoch):
self.lr = getlearningrate(epoch=epoch, opt=self.opt)
# update learning rate of model optimizer
if isinstance(self.model, list):
count = 0
for param_group in self.optimzer.param_groups:
# if type(model) is <list> then update modules with different learning rate
param_group['lr'] = self.lr
count += 1
# print ">>> count is:", count-1
else:
for param_group in self.optimzer.param_groups:
param_group['lr'] = self.lr | [
"def update_learning_rate(self):\n self._lr *= self._lr_decay\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self._lr\n\n # Display status message\n success_message = 'Learning rate updated to {:.1e}'.format(self._lr)\n print(success_format(su... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return apitools message object for give message name. | def GetApiMessage(message_name):
messages = apis.GetMessagesModule(_BQ_API, _BQ_API_VERSION)
return getattr(messages, message_name) | [
"def get_message_by_name(self, name):\r\n mh = ct.c_void_p(None)\r\n dll.kvaDbGetMsgByName(self._handle, name.encode('utf-8'), ct.byref(mh))\r\n message = Message(self, mh)\r\n return message",
"def get_message(self, id=None, name=None):\r\n message = None\r\n if (id is n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds a bigquery AccessValueListEntry array from input file. Expects YAML or JSON formatted file. | def PermissionsFileProcessor(input_file):
access_value_msg = GetApiMessage('Dataset').AccessValueListEntry
try:
permissions_array = []
permissions_from_file = yaml.load(input_file[0])
permissions_from_file = permissions_from_file.get('access', None)
if not permissions_from_file or not isinstance(permissions_from_file, list):
raise PermissionsFileError(
'Error parsing permissions file: no access list defined in file')
for access_yaml in permissions_from_file:
permission = encoding.PyValueToMessage(access_value_msg, access_yaml)
if _ValidatePermission(permission):
permissions_array.append(permission)
else:
raise PermissionsFileError(('Error parsing permissions file:'
' invalid permission definition'
' [{}]'.format(permission)))
return sorted(permissions_array, key=lambda x: x.role)
except yaml.YAMLParseError as ype:
raise PermissionsFileError('Error parsing permissions file [{}]'.format(
ype)) | [
"def build_accession_parser(rules_file):\n\n rules_data = json.load(rules_file)\n rules_by_prefix_len = {}\n for prefix_list, database, molecule_type, type_description in rules_data:\n for prefix in prefix_list:\n prefix_length = len(prefix)\n if REFSEQ_PREFIX_RE.match(prefix) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set projectId value for a BigQueryXXXRequests. | def SetProjectId(ref, args, request):
del ref
project = args.project or properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project,
collection='bigquery.projects')
request.projectId = project_ref.Name()
return request | [
"def qtest_project_id(self, value):\n self._qtest_project_id = value",
"def setProjectId(self, id):\n self.__current_project_id = id",
"def set_project_quotas(self, project_id, request_model, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.clien... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that view parameters are set properly tables create request. | def SetViewParameters(ref, args, request):
del ref # unused
if not args.view:
request.table.view = None
return request | [
"def test_create(self):\n session = self.prepare(user_table=True)\n\n result = list(session.execute((\"SELECT * FROM system_schema.views \"\n \"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING\")))\n assert len(result) == 1, \"Expecting ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process the overwrite flag on tables create. | def ProcessTableOverwrite(ref, args, request):
dataset_id = ref.datasetId
table_id = ref.Name()
project_id = ref.projectId
if args.overwrite:
if _TableExists(dataset_id, table_id, project_id):
_TryDeleteTable(dataset_id, table_id, project_id)
return request | [
"def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process the overwrite flag on tables copy. | def ProcessTableCopyOverwrite(ref, args, request):
del ref # Unused
if args.overwrite:
request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'
return request | [
"def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request",
"def process_override... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build JobConfigurationTableCopy from request resource args. | def ProcessTableCopyConfiguration(ref, args, request):
del ref # Unused
source_ref = args.CONCEPTS.source.Parse()
destination_ref = args.CONCEPTS.destination.Parse()
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.datasetId',
destination_ref.Parent().Name())
arg_utils.SetFieldInMessage(
request, 'job.configuration.copy.destinationTable.projectId',
destination_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.destinationTable.tableId',
destination_ref.Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.datasetId',
source_ref.Parent().Name())
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.projectId',
source_ref.projectId)
arg_utils.SetFieldInMessage(request,
'job.configuration.copy.sourceTable.tableId',
source_ref.Name())
return request | [
"def GetTableCopyResourceArgs():\n table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')\n arg_specs = [\n resource_args.GetResourcePresentationSpec(\n verb='to copy from', name='source', required=True, prefixes=True,\n attribute_overrides={'table': 'source'}, positional=False,\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process schema Updates (additions/mode changes) for the request. Retrieves the current table schema for ref and attempts to merge in the schema provided in the requests. This is necessary since the API backend does not handle PATCH semantics for schema updates (e.g. process the deltas) so we must always send the fully updated schema in the requests. | def ProcessSchemaUpdate(ref, args, request):
table = request.table
relaxed_columns = args.relax_columns
if not table.schema and not relaxed_columns: # if not updating schema,
return request # then just return.
original_schema = _TryGetCurrentSchema(ref.Parent().Name(),
ref.Name(),
ref.projectId)
new_schema_columns = table.schema
updated_fields = _GetUpdatedSchema(original_schema,
new_schema_columns,
relaxed_columns)
table_schema_type = GetApiMessage('TableSchema')
request.table.schema = table_schema_type(fields=updated_fields)
return request | [
"def UpdateSchema(request, schema):\n handler = DetermineHandlerModule(request)\n \n result = handler.UpdateSchema(request, schema)\n \n return result",
"async def upgradeSchema(self) -> None:",
"def merge_schema_entry(\n self,\n old_schema_entry,\n new_schema_entry,\n base_path... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to retrieve the current BigQuery TableSchema for a table_ref. Tries to fetch the schema of an existing table. Raises SchemaUpdateError if table is not found or if table is not of type 'TABLE'. | def _TryGetCurrentSchema(dataset_id, table_id, project_id):
client = GetApiClient()
service = client.tables
get_request_type = GetApiMessage('BigqueryTablesGetRequest')
get_request = get_request_type(datasetId=dataset_id,
tableId=table_id,
projectId=project_id)
try:
table = service.Get(get_request)
if not table or table.type != 'TABLE':
raise SchemaUpdateError('Schema modifications only supported '
'on TABLE objects received [{}]'.format(
table))
except apitools_exceptions.HttpNotFoundError:
raise SchemaUpdateError('Table with id [{}:{}:{}] not found.'.format(
project_id, dataset_id, table_id))
return table.schema | [
"def get_table_schema(dataset_id, table_id):\n logging.info('getting table schema')\n bigquery_client = bigquery.Client()\n dataset_ref = bigquery_client.dataset(dataset_id)\n bg_tableref = bigquery.table.TableReference(dataset_ref, table_id)\n bg_table = bigquery_client.get_table(bg_tableref)\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change mode to `NULLABLE` for columns in existing schema. Tries set mode on existing columns in orig_schema_map to `NULLABLE`. Raises SchemaUpdateError if column is not found in orig_schema_map. | def _GetRelaxedCols(relaxed_columns, orig_schema_map):
updated_schema_map = orig_schema_map.copy()
for col in relaxed_columns:
if col in orig_schema_map:
updated_schema_map[col].mode = 'NULLABLE'
else:
raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)
return updated_schema_map | [
"def merge_mode(self, old_schema_entry, new_schema_entry, base_path):\n old_info = old_schema_entry['info']\n new_info = new_schema_entry['info']\n old_mode = old_info['mode']\n old_name = old_info['name']\n old_type = old_info['type']\n old_status = old_schema_entry['statu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new columns to an existing schema. Tries add new fields to an existing schema. Raises SchemaUpdateError if column already exists in the orig_schema_map. | def _AddNewColsToSchema(new_fields, orig_schema_map):
updated_schema_map = orig_schema_map.copy()
for new_field in new_fields:
if new_field.name in orig_schema_map:
raise SchemaUpdateError(_INVALID_SCHEMA_UPDATE_MESSAGE)
updated_schema_map[new_field.name] = new_field
return updated_schema_map | [
"def add_column(self, schema):\n self[schema.name] = schema.copy()",
"def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()",
"def test_add_columns(self):\n schema = 'test_schema'\n table = 'test_table'\n adding_columns = {'col1': 'type1', ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to delete a dataset, propagating error on failure. | def _TryDeleteDataset(dataset_id, project_id):
client = GetApiClient()
service = client.datasets
delete_request_type = GetApiMessage('BigqueryDatasetsDeleteRequest')
delete_request = delete_request_type(datasetId=dataset_id,
projectId=project_id,
deleteContents=True)
service.Delete(delete_request)
log.info('Deleted dataset [{}:{}]'.format(project_id, dataset_id)) | [
"def delete_dataset(self, dataset: DatasetDB):\n try:\n self._es.delete_index(dataset_records_index(dataset.id))\n finally:\n self._es.delete_document(index=DATASETS_INDEX_NAME, doc_id=dataset.id)",
"def delete_dataset(dataset_id: int):\n db = get_db()\n cur = db.execute(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get Table resource args (source, destination) for copy command. | def GetTableCopyResourceArgs():
table_spec_data = yaml_data.ResourceYAMLData.FromPath('bq.table')
arg_specs = [
resource_args.GetResourcePresentationSpec(
verb='to copy from', name='source', required=True, prefixes=True,
attribute_overrides={'table': 'source'}, positional=False,
resource_data=table_spec_data.GetData()),
resource_args.GetResourcePresentationSpec(
verb='to copy to', name='destination',
required=True, prefixes=True,
attribute_overrides={'table': 'destination'}, positional=False,
resource_data=table_spec_data.GetData())]
fallthroughs = {
'--source.dataset': ['--destination.dataset'],
'--destination.dataset': ['--source.dataset']
}
return [concept_parsers.ConceptParser(arg_specs, fallthroughs)] | [
"def ProcessTableCopyConfiguration(ref, args, request):\n del ref # Unused\n source_ref = args.CONCEPTS.source.Parse()\n destination_ref = args.CONCEPTS.destination.Parse()\n arg_utils.SetFieldInMessage(\n request, 'job.configuration.copy.destinationTable.datasetId',\n destination_ref.Parent().Name(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show all the models that were printed. | def show_completed_models(completed_models):
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model) | [
"def show_models(completed_models):\r\n\t\r\n\tprint(\"\\nPrinted models: \")\r\n\t\r\n\tfor design in completed_models:\r\n\t\tprint(design)",
"def show_completed_models(completed_models):\n print(\"\\nThe following models have been printed:\")\n for model in completed_models:\n print(model)",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Register the message handlers that every journal should support. | def register_message_handlers(journal):
journal.dispatcher.register_message_handler(
DumpQuorumMessage, _dumpquorumhandler) | [
"def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DumpJournalBlocksMessage,\n _dumpjournalblockshandler)\n journal.dispatcher.register_message_handler(\n DumpJournalValueMessage,\n _dumpjournalvaluehandler)",
"def register_message_handlers(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor for DumpQuorumMessage class. | def __init__(self, minfo=None):
if minfo is None:
minfo = {}
super(DumpQuorumMessage, self).__init__(minfo)
self.IsSystemMessage = False
self.IsForward = True
self.IsReliable = True | [
"def dump(self):\n result = super(DumpQuorumMessage, self).dump()\n return result",
"def __init__(self, minfo=None):\n if minfo is None:\n minfo = {}\n super(DumpJournalValueMessage, self).__init__(minfo)\n\n self.IsSystemMessage = False\n self.IsForward = True... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dict with information about the dump quorum message. | def dump(self):
result = super(DumpQuorumMessage, self).dump()
return result | [
"def dumps(self) -> Dict[str, Any]:\n return {\n \"commitId\": self.commit_id,\n \"parentCommitId\": self.parent_commit_id,\n \"message\": self.message,\n \"committer\": self.committer.dumps(),\n }",
"def dump(self):\n result = super(QuorumTransacti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve a known stored filter object from the db | def retrieve_filter(self, filter_id):
LOG.debug("Retrieve filter {}".format(filter_id))
filter_obj = self.filter_collection.find_one({"_id": ObjectId(filter_id)})
# use _id to preselect the currently loaded filter, and drop it while we are at it
filter_obj.update([("filters", filter_obj.pop("_id", None))])
return filter_obj | [
"def get(cls, **filters: Dict[str, Any]) -> \"Model\":\n pks = communicator.filter_objects(\n cls._app_name, cls._model_name, filters, [\"id\"]\n )\n if len(pks) > 1:\n raise RuntimeError(\n f\"Exactly one object should match the given criteria, received {le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtain a cursor for all filters available to an institute in a category. | def filters(self, institute_id, category="snv"):
filters_res = self.filter_collection.find(
{"institute_id": institute_id, "category": category}
)
return filters_res | [
"def get_filters(self):",
"def get_queryset(self):\n queryset = Article.objects.all()\n category = self.request.query_params.get('category')\n if category is not None:\n queryset = queryset.filter(category=category)\n return queryset",
"def filter( self, trans, user, query... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy `in_tree` to `out_tree`, checking selection(in_tree) for each event. | def tree_copy_selection(in_tree, out_tree, selection):
for entry in in_tree:
if selection(entry):
out_tree.Fill() | [
"def copy_tree_checker(src, dst):\n copy_tree(src, dst)\n return True",
"def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy `in` to `out` for events where event.`key` does not exist in `keys` `keys` is the set of keys seen so far. | def tree_copy_duplicate_removal(in_tree, out_tree, key, keys):
for entry in in_tree:
key_value = getattr(entry, key)
if not key_value in keys:
out_tree.Fill()
keys.add(key_value) | [
"def remove_missing_values(events):\n ret = deepcopy(events)\n srchd, key_events = [], []\n for evt in events:\n _tmp = [(j, e) for j, e in enumerate(events) if e['key']\n == evt['key'] and not e['key'] in srchd]\n if _tmp != []:\n key_events.append(_tmp)\n sr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert the numpy array representing the GOL grid to a QImage. | def numpy_to_qimage(np_array: np.ndarray, show_age: bool):
# Only support 2D array of bytes
assert len(np_array.shape) == 2 and np_array.dtype == np.uint8
width = np_array.shape[1]
height = np_array.shape[0]
bytes_per_line = width
image = QImage(np_array, width, height, bytes_per_line, QImage.Format_Indexed8)
# Maps array values to color
if show_age:
image.setColorTable(colors.AGE_COLOR_TABLE)
else:
image.setColorTable(colors.BINARY_COLOR_TABLE)
return image | [
"def rgb2qimage(rgb):\n if len(rgb.shape) != 3:\n raise ValueError(\"rgb2QImage can only convert 3D arrays\")\n if rgb.shape[2] not in (3, 4):\n raise ValueError(\"rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels\")\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Prepare paths specified as config. The input is a list of either strings, or 2tuples (source, target). Where single strings are supplied, the basenames are used as targets. Where targets are given explicitly, they must not be absolute paths. Returns a list of 2tuples, or throws ConfigError if something is wrong in the input. | def process_path_specs(specs):
processedSpecs = []
for spec in specs:
if not isinstance(spec, (list, tuple)):
source = spec
target = None
elif len(spec) != 2:
raise ConfigError("path spec must be a list or tuple of "
"length two")
else:
source, target = spec
source = os.path.normpath(source)
if not target:
target = os.path.basename(source)
elif os.path.isabs(target):
raise ConfigError("target path for include file may not be "
"an absolute path")
processedSpecs.append((source, target))
return processedSpecs | [
"def _prepare_files(self, targets, storage_folder=None):\n target_root = self.env.target\n\n targets = [\n helpers.get_relative_path(target_root, target, base=target_root)\n for target in targets\n ]\n\n if storage_folder:\n storage_folder = helpers.get_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the paths of directories which contain files that should not be included, generally because they contain standard system libraries. | def _GetDefaultBinPathExcludes(self):
if sys.platform == "win32":
import cx_Freeze.util
systemDir = cx_Freeze.util.GetSystemDir()
windowsDir = cx_Freeze.util.GetWindowsDir()
return [windowsDir, systemDir, os.path.join(windowsDir, "WinSxS")]
elif sys.platform == "darwin":
return ["/lib", "/usr/lib", "/System/Library/Frameworks"]
else:
return ["/lib", "/lib32", "/lib64", "/usr/lib", "/usr/lib32",
"/usr/lib64"] | [
"def _GetDefaultBinPathExcludes(self):\r\n if sys.platform == \"win32\":\r\n import cx_Freeze.util\r\n systemDir = cx_Freeze.util.GetSystemDir()\r\n windowsDir = cx_Freeze.util.GetWindowsDir()\r\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if the file should be copied to the target machine. This is done by checking the binPathIncludes, binPathExcludes, binIncludes and binExcludes configuration variables using first the full file name, then just the base file name, then the file name without any version numbers. Files are included unless specifically excluded but inclusions take precedence over exclusions. | def _ShouldCopyFile(self, path):
# check for C runtime, if desired
path = os.path.normcase(path)
dirName, fileName = os.path.split(path)
if fileName.startswith("msvcr") and fileName.endswith(".dll"):
self.msvcRuntimeDir = dirName
return self.includeMSVCR
# check the full path
if path in self.binIncludes:
return True
if path in self.binExcludes:
return False
# check the file name by itself (with any included version numbers)
if fileName in self.binIncludes:
return True
if fileName in self.binExcludes:
return False
# check the file name by itself (version numbers removed)
name = self._RemoveVersionNumbers(fileName)
if name in self.binIncludes:
return True
if name in self.binExcludes:
return False
# check the path for inclusion/exclusion
for path in self.binPathIncludes:
if dirName.startswith(path):
return True
for path in self.binPathExcludes:
if dirName.startswith(path):
return False
return True | [
"def _ShouldCopyFile(self, path):\r\n\r\n # check for C runtime, if desired\r\n path = os.path.normcase(path)\r\n dirName, fileName = os.path.split(path)\r\n if fileName.startswith(\"msvcr\") and fileName.endswith(\".dll\"):\r\n self.msvcRuntimeDir = dirName\r\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a user and a group name, and returns `True` if the user is in that group. | def is_in_group(user, group_name):
return is_in_group_user_id(user.id, group_name) | [
"def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False",
"def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()",
"def is_user_in_group(user, group):\n return find_group(user, group)",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
'If you create a Lambda function that processes events from streambased services (Amazon Kinesis Streams), the number of shards per stream is the unit of concurrency. If your stream has 100 active shards, there will be 100 Lambda functions running concurrently. Then, each Lambda function processes events on a shard in the order that they arrive.' Therefore, for checkpointing logic, we should make the primary | def handler(event, context):
debug = False
rewind = False
dry_run = False
table = _ensure_dynamo_table()
consumer_id = 'test-consumer'
if debug:
state = table.scan()
print "Active leases in Dynamo:", state["Count"]
for item in state["Items"]:
print json.dumps(item, indent=4, sort_keys=True)
lease = None
shard = None
try:
visitors = set()
last_timestamp = None
for i, record in enumerate(event.get('Records', [])):
event_id, data = (record['eventID'], record['kinesis']['data'])
shard, checkpoint = event_id.split(u':')
if rewind:
print "Rewinding to checkpoint 0"
_clear_consumer_lease(table, consumer_id, shard)
rewind = False
if lease is None:
lease = _get_consumer_lease(table, consumer_id, shard) \
or {"checkpoint": "0"}
if checkpoint <= lease["checkpoint"]:
# replayed event, we should skip it
print "Replayed event; skipping"
continue
# => decode from b64
raw_event = base64.b64decode(data)
# => parse from JSON
json_event = json.loads(raw_event)
# => extract out visitor id and timestamp if present
visitor = json_event.get("visitor_site_id", "N/A")
visitors.add(visitor)
last_timestamp = json_event.get("ts_action", "N/A")
# => do something with the data
result = process(json_event)
if result:
pass
# => checkpoint the shard
lease["checkpoint"] = checkpoint
logger.info("Saw {} unique visitors in batch ending with {}".format(
len(visitors), last_timestamp))
if not dry_run:
_put_consumer_lease(table, consumer_id, shard, lease)
except Exception as ex:
# do not save consumer checkpoints because error happened
# instead, we should probably log something about the error
# in the consumer lease, to allow the Lambda to retry a fixed
# number of times, before finally "giving up" and skipping
# the records
raise
"^ some form of error handling required"
if ex:
pass | [
"def lambda_handler(event, context):\n\n mytime, lambda_name, env_vars = lambda_init.init_lambda(context)\n stage = env_vars[\"stage\"]\n consumer_master_past_lambda = env_vars[\"consumer_master_past_name\"]\n\n apps, test_params = init_apps_from_test_params(event)\n filters = init_filters()\n\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test if the path holder contains a shot render. | def test(cls, pathHolder, parentCrawler):
if not super(ShotRenderCrawler, cls).test(pathHolder, parentCrawler):
return False
renderType = pathHolder.baseName().split(".")[0].split("_")[-1]
return renderType == "sr" | [
"def is_screenshot(self):\n\n if os.path.basename(self.path).endswith('.png'):\n try:\n if magic.from_file(self.path, mime=True) == 'image/png':\n return True\n except:\n pass\n\n return False",
"def point_is_shot(self, point: Po... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find links in jsoncompatible data. | def find_links(obj):
if isinstance(obj, dict):
for key, value in obj.iteritems():
for url in find_links(value):
yield url
elif isinstance(obj, list):
for item in obj:
for url in find_links(item):
yield url
else:
try:
if is_link(str(obj)):
yield obj
except Exception:
pass | [
"def get_data_urls(response):\n return [link['href'] for link in response.json()['links'] if link.get('rel', 'data') == 'data']",
"def extract_links(input, output):\n rspecs = json.load(input)\n links = _extract_links(rspecs)\n output.write(json.dumps(links, sort_keys=True, indent=4, default=serialize_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the correct backend driver for data persistent. | def _load_driver(backend, **kargs):
bk_module = importlib.import_module('backend', __package__)
driver_cls = getattr(bk_module, str.capitalize(backend) + 'Backend')
return driver_cls(**kargs) | [
"def _load_driver(self):\r\n name = self.config.backend\r\n if not name:\r\n #Raise some exception, bail out we are done.\r\n raise RuntimeError('config item db.backend not set')\r\n if '.' in name:\r\n importname = name\r\n else:\r\n importnam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the table name to save data from the url. | def _get_table_name(url):
try:
return urlparse(url).path.strip('/').split('/')[1]
except IndexError:
return None | [
"def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]",
"def table_name(self) -> str:\n return pulumi.get(self, \"table_name\")",
"def get_table_name(self):\n return self._config['table']",
"def table_name(self) -> pulumi.Input[str]:\n return pulumi.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save data from response to backend persistent driver. Only save the detail item from a url, filter out the overall items like | def save(self, response):
url = response.url
if self.item_url(url):
table_name = self._get_table_name(url)
if table_name:
data = response.json()
self.backend.save(table_name, data) | [
"def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a keyvault with 0 access policies is processed properly and doesn't raise an exception. | def test_whitelist_zero_access_policies(self):
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault2*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 0) | [
"def test_reject_units_when_auth_keys_is_empty(self):\n self._propose('my.config.unit', 'myvalue')\n\n self._expect_get('hashblock.units.vote.authorized_keys')\n self._expect_get('hashblock.units.vote.approval_threshold')\n\n self._expect_invalid_transaction()",
"def test_get_authz_fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filters a list of elements. 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'elements' is the list of elements to filter. Returns a list containing only those elements for which 'select' returns True. | def filter(self, viewer, parent, elements):
return [e for e in elements if self.select(viewer, parent, e)] | [
"def filterSelection(filters):\n result = simpleFilter(pm.selected(), filters)\n return result",
"def select(self, viewer, parent, element):\n\n return True",
"def filter_by_reviewers(reviews, selected_reviewers):\n return [x for x in reviews if x.reviewer in selected_reviewers]",
"def sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the element is 'allowed' (ie. NOT filtered). 'viewer' is the viewer that we are filtering elements for. 'parent' is the parent element. 'element' is the element to select. By default we return True. | def select(self, viewer, parent, element):
return True | [
"def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None",
"def tag_visible(element):\n \n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Is the filter affected by changes to an element's trait? 'element' is the element. 'trait_name' is the name of the trait. Returns True if the filter would be affected by changes to the trait named 'trait_name' on the specified element. By default we return False. | def is_filter_trait(self, element, trait_name):
return False | [
"def have_traits_changed(self, name_or_uuid, traits):\n with self.lock:\n provider = self._find_with_lock(name_or_uuid)\n return provider.have_traits_changed(traits)",
"def trait_is_defined(obj, trait_name):\n return obj.has_trait(trait_name) and trait_name in obj._trait_values",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a binary tree, find its minimum depth. The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node. | def minDepth(self, root: TreeNode) -> int:
return self.bfs(root) | [
"def mindepth(l):\n m = [depth(x) for x in l if len(x)]\n if not m:\n return 0\n return min(m)",
"def mindepth(l):\n m = [depth(x) for x in l if length(x)]\n if not m:\n return 0\n return min(m)",
"def max_depth(root):\n # basic case\n if root is None:\n return 0\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts the complex number `c` to a string in Fortranformat, i.e. (Re c, Im c). If c is iterable, it returns a string of the form [(Re c_1, Im c_1), ...]. | def str_complex(c, kindstr=''):
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr) | [
"def complex_vct_str ( vct , format = '%.5g%-+.5gj' ) :\n try :\n lst = [] \n for c in vct :\n cc = complex ( c )\n item = format % ( cc.real , cc.imag )\n lst.append ( cc ) \n return '[ ' + ', '.join ( lst ) + ' ]' \n except TypeError :\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select PORT update events, notify the observers upon a port update in APPL_DB/CONFIG_DB or a XCVR insertion/removal in STATE_DB | def handle_port_update_event(sel, asic_context, stop_event, logger, port_change_event_handler):
if not stop_event.is_set():
(state, _) = sel.select(SELECT_TIMEOUT_MSECS)
if state == swsscommon.Select.TIMEOUT:
return
if state != swsscommon.Select.OBJECT:
logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')
return
for port_tbl in asic_context.keys():
while True:
(key, op, fvp) = port_tbl.pop()
if not key:
break
if not validate_port(key):
continue
fvp = dict(fvp) if fvp is not None else {}
if 'index' not in fvp:
fvp['index'] = '-1'
port_index = int(fvp['index'])
port_change_event = None
if op == swsscommon.SET_COMMAND:
port_change_event = PortChangeEvent(key,
port_index,
asic_context[port_tbl],
PortChangeEvent.PORT_SET,
fvp)
elif op == swsscommon.DEL_COMMAND:
port_change_event = PortChangeEvent(key,
port_index,
asic_context[port_tbl],
PortChangeEvent.PORT_DEL,
fvp)
if port_change_event is not None:
port_change_event_handler(port_change_event) | [
"def on_dpport_config(self, evt):\n if fibclog.dump_msg():\n _LOG.debug(\"%s\", evt.msg)\n\n try:\n port = fibcdbm.portmap().find_by_dp(dp_id=evt.dp_id, port_id=evt.port_id)\n if evt.enter:\n port.update_dp(evt.enter)\n self.send_port_stat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select CONFIG_DB PORT table changes, once there is a port configuration add/remove, notify observers | def handle_port_config_change(sel, asic_context, stop_event, port_mapping, logger, port_change_event_handler):
if not stop_event.is_set():
(state, _) = sel.select(SELECT_TIMEOUT_MSECS)
if state == swsscommon.Select.TIMEOUT:
return
if state != swsscommon.Select.OBJECT:
logger.log_warning('sel.select() did not return swsscommon.Select.OBJECT')
return
read_port_config_change(asic_context, port_mapping, logger, port_change_event_handler) | [
"def on_dpport_config(self, evt):\n if fibclog.dump_msg():\n _LOG.debug(\"%s\", evt.msg)\n\n try:\n port = fibcdbm.portmap().find_by_dp(dp_id=evt.dp_id, port_id=evt.port_id)\n if evt.enter:\n port.update_dp(evt.enter)\n self.send_port_stat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get port mapping from CONFIG_DB | def get_port_mapping(namespaces):
port_mapping = PortMapping()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
config_db = daemon_base.db_connect("CONFIG_DB", namespace=namespace)
port_table = swsscommon.Table(config_db, swsscommon.CFG_PORT_TABLE_NAME)
for key in port_table.getKeys():
if not validate_port(key):
continue
_, port_config = port_table.get(key)
port_config_dict = dict(port_config)
port_change_event = PortChangeEvent(key, port_config_dict['index'], asic_id, PortChangeEvent.PORT_ADD)
port_mapping.handle_port_change_event(port_change_event)
return port_mapping | [
"def getDbPort():\n\n if \"DB_PORT\" in controller.CONF.keys():\n return controller.CONF[\"DB_PORT\"]\n\n return basedefs.DB_PORT",
"def get_bridge_port_map(db):\n db.connect('ASIC_DB')\n if_br_oid_map = {}\n br_port_str = db.keys('ASIC_DB', \"ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:*\")\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a class that doesn't descend from Pickleable to the pickle whitelist | def addClassToPickleWhitelist(cls):
unpickleWhitelist_.add(cls) | [
"def make_class_serializable(cls):\n global _registered_serializable_classes\n if cls not in _registered_serializable_classes:\n _registered_serializable_classes.append(cls)",
"def _check_pickleable(obj):\n def recurse(obj):\n if isinstance(obj, (list, tuple, set)):\n return [rec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively searches for 'datacubedefinition.prj' in a level2 directory and returns its parent directory. | def _get_datacubeprj_dir(directory):
prj_path = []
for path in Path(directory).rglob('datacube-definition.prj'):
prj_path.append(path)
if len(prj_path) < 1:
raise FileNotFoundError(f"'datacube-definition.prj' not found in {directory}")
elif len(prj_path) > 1:
raise RuntimeError(f"'datacube-definition.prj' multiple copies found in {directory}")
else:
return prj_path[0].parent | [
"def _find_parent(api, project, name):\n cur_folder = None\n for f in [x for x in name.split(\"/\") if x]:\n if not cur_folder:\n cur_folder = list(api.files.query(project, names=[f]).all())[0]\n else:\n cur_folder = list(api.files.query(parent=cur_folder.id, names=[f]).all... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a vocabulary from the training directory return a sorted vocabulary list | def create_vocabulary(directory, cutoff):
top_level = os.listdir(directory)
a = cutoff
vocab = {}
for d in top_level:
subdir = d if d[-1] == '/' else d+'/'
files = os.listdir(directory+subdir)
for f in files:
with open(directory+subdir+f,'r', encoding="utf-8") as doc:
for word in doc:
word = word.strip()
if not word in vocab and len(word) > 0:
vocab[word] = 1
elif len(word) > 0:
vocab[word] += 1
return sorted([word for word in vocab if vocab[word] >= cutoff]) | [
"def create_vocabulary(directory, cutoff):\n top_level = os.listdir(directory)\n vocab = {}\n for d in top_level:\n subdir = d if d[-1] == '/' else d+'/'\n files = os.listdir(directory+subdir)\n for f in files:\n with open(directory+subdir+f,'r') as doc:\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return the class conditional probability of label over all words, with smoothing | def p_word_given_label(vocab, training_data, label):
smooth = 1 # smoothing factor
word_prob = {}
# TODO: add your code here
total_word = 0
word_prob[None] = 0
for dic in training_data:
for index0, i0 in enumerate(dic['bow']):
if (list(dic['bow'])[index0] in word_prob):
continue;
word_prob[list(dic['bow'])[index0]] = 0
#word_prob[None] = 0
if(dic["label"] == label):
for index, i in enumerate(dic["bow"]):
if(list(dic['bow'])[index] in vocab):
if(list(dic['bow'])[index] in word_prob):
word_prob[list(dic['bow'])[index]] += dic["bow"][i]
else:
word_prob[list(dic['bow'])[index]] = dic["bow"][i]
else:
if(None in word_prob):
word_prob[None] += dic["bow"][i]
else:
word_prob[None] = 0
total_word += dic["bow"][i]
#word_prob [None] = 5
for h in word_prob:
word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1)))
return word_prob | [
"def p_word_given_label(vocab, training_data, label):\n\n smooth = 1 # smoothing factor\n wordCnt = 0\n word_prob = {}\n\n for word in vocab:\n word_prob[word] = smooth\n \n word_prob[None] = smooth\n\n for data in training_data:\n if data['label'] == label:\n for word ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find all pairs of unique indices which form a palindrome. | def palindromePairs(lst):
results = []
for i, e1 in enumerate(lst):
for j, e2 in enumerate(lst):
if i != j:
if isPalindrome(e1+e2):
results.append((i, j))
return results | [
"def palindromo(start, end):\n return [i for i in range(start, end + 1) if str(i) == str(i)[::-1]]",
"def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n",
"def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a dictionary to an earth engine feature server side | def dict_to_feature(d):
f = ee.Feature(None,ee.Dictionary(d))
return f | [
"def _preprocess(self, feature_dict):\n return feature_dict",
"def readings_dict_to_features(dict, raw):\n final = None\n for key in sorted(dict):\n converted = reading_to_feature(dict[key], raw)\n if final is None:\n final = converted\n else:\n final = np.concatenate([final, converted])\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert volume to flux | def volumeToFlux(volume_image):
image = ee.Image(volume_image)
flux_image = image.divide(ee.Image(AREA_PFAF6_30MIN)).multiply(1e6).copyProperties(image)
flux_image = flux_image.set("units","m")
flux_image = flux_image.set("convertedToFlux", 1)
return flux_image | [
"def flux(source, freq=0.0, deltafreq=0.0, daysback=0.0) :\n x = queryFlux(source,freq,deltafreq,daysback)\n return x.flux",
"def flux(self, x):\n return self.cal_spec.get_flux(self(x))",
"def flux(self, q):\n q1, q2 = q\n if q1 > 0:\n u = q2/q1\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
filters an imagecollection based on year and month | def filter_ic(ic,year,month):
ic_filtered = (ic.filter(ee.Filter.eq("month",month))
.filter(ee.Filter.eq("year",year)))
image = ee.Image(ic_filtered.first())
return(image) | [
"def _filter_temporal(self, start_date: str, end_date: str) -> 'ImageCollection':\n process_id = 'filter_daterange'\n args = {\n 'imagery': self.graph,\n 'extent': [start_date, end_date]\n }\n\n return self.graph_add_process(process_id, args)",
"def fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Zonal statistics with rasters as input and rasters and lists as output | def zonalStatsToRaster(image,zonesImage,geometry,maxPixels,reducerType):
# reducertype can be mean, max, sum, first. Count is always included for QA
# the resolution of the zonesimage is used for scale
reducer = ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mean"),ee.Reducer.mean(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"max"),ee.Reducer.max(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"sum"),ee.Reducer.sum(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"first"),ee.Reducer.first(),
ee.Algorithms.If(ee.Algorithms.IsEqual(reducerType,"mode"),ee.Reducer.mode(),"error"))))
)
reducer = ee.Reducer(reducer).combine(reducer2= ee.Reducer.count(), sharedInputs= True).group(groupField=1, groupName="zones")
scale = zonesImage.projection().nominalScale().getInfo()
zonesImage = zonesImage.select(zonesImage.bandNames(),["zones"])
totalImage = ee.Image(image).addBands(zonesImage)
resultsList = ee.List(totalImage.reduceRegion(
geometry= geometry,
reducer= reducer,
scale= scale,
maxPixels=maxPixels
).get("groups"))
resultsList = resultsList.map(ensure_default_properties);
zoneList = mapList(resultsList, 'zones');
countList = mapList(resultsList, 'count');
valueList = mapList(resultsList, reducerType);
valueImage = zonesImage.remap(zoneList, valueList).select(["remapped"],[reducerType])
countImage = zonesImage.remap(zoneList, countList).select(["remapped"],["count"])
newImage = zonesImage.addBands(countImage).addBands(valueImage)
return newImage,zoneList,valueList,countList | [
"def zonal_stats(vectors, raster, layer=0, band_num=1, nodata_value=None,\n global_src_extent=False, categorical=False, stats=None,\n copy_properties=False, all_touched=False, transform=None, affine=None,\n add_stats=None, raster_out=False, category_map=None, **kwargs):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate the daily profile class.. | def __init__(self, profile: Dict[datetime.time, float] = None) -> None:
if profile is None:
profile = dict()
if not isinstance(profile, dict):
raise ProgrammerJudgementFault(
"The input daily profile provided is not a mapping of the correct type."
)
self._profile = profile | [
"def __init__(self, dt=60*60*24):\n pass",
"def __init__(self, start_date=None, subusers=None): \n self._subusers = None\n super(SubuserStats, self).__init__()\n\n # Minimum required for subusers stats\n if start_date and subusers:\n self.start_date = start_dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the internal profile with the mapping provided. | def update(self, profile: Dict[datetime.time, float]) -> None:
if self._profile is None:
self._profile = profile
else:
self._profile.update(profile) | [
"def update_profile(self):\n self.update()",
"def UpdateProfile(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def update(self, mapping):\n if not ismapping(mapping):\n raise TypeError(\"mapping type required\")\n field_names = getpyattr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The density of air varies as a function of temperature. | def density_of_air(self) -> float:
return self.pressure / (SPECIFIC_GAS_CONSTANT_OF_AIR * self.ambient_temperature) | [
"def air_density(alt, temp): \n return air_density_pressure(temp, pressure_from_altitude(alt))",
"def air_density(self):\n return self.flow_field.air_density",
"def density(self):\n return self.fluid.density(self.T_C)",
"def air_density_pressure(temp, pressure_hpa): \n R_air = 287\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The dynamic viscosity of air varies as a function of temperature. | def dynamic_viscosity_of_air(self) -> float:
return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (
self.ambient_temperature + 110.4
) | [
"def dynamic_viscosity(self):\n return self.fluid.viscosity(self.T_C)",
"def kinematic_viscosity_of_air(self) -> float:\n\n return self.dynamic_viscosity_of_air / self.density_of_air",
"def latent_heat_vapourisation(self, tair):\n return (2.501 - 0.00237 * tair) * 1E06",
"def air_density(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the heat capacity of air in Joules perkilogram Kelvin. The heat capacity of air varies with a function of temperature and is given by an empiricallyderived formula. | def heat_capacity_of_air(self) -> float:
return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2 | [
"def vibrational_heat_capacity(self, temperature, volume):\n y = self.debye_temperature(volume) / temperature\n factor = 3. / y ** 3\n if y < 155:\n integral = quadrature(lambda x: x ** 4 *np.exp(x)/ (np.exp(x) - 1.)**2, 0, y)\n return 3*self.kb * self.natoms * list(integr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The kinematic viscosity of air varies as a function of temperature. | def kinematic_viscosity_of_air(self) -> float:
return self.dynamic_viscosity_of_air / self.density_of_air | [
"def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )",
"def kinematic_viscosity(self):\n return self.mu / self.rho",
"def dynamic_viscosity(self):\n return self.fluid.vi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines the radiative temperature of the sky. The "sky," as a black body, has a radiative temperature different to that of the surrounding air, or the ambient temperature. This function converts between them and outputs the sky's radiative temperature. | def sky_temperature(self) -> float:
return 0.0552 * (self.ambient_temperature**1.5) | [
"def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)",
"def temperature(self):\n return 2 * self.annealing_factor**self.episodes_so_far",
"def ambient_te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The thermal conductivity of air varies as a function of temperature. | def thermal_conductivity_of_air(self) -> float:
# This more accurate equation is not used by the paper.
# return (0.02646 * self.ambient_temperature ** 1.5) / (
# self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))
# )
# The reference suggests this equation is accurate to 1%.
return 0.02646 * (self.ambient_temperature / 300) ** 0.8646 | [
"def thermal_conductivity(self):\n return self.fluid.conductivity(self.T_C)",
"def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature",
"def thermal_conductivity(self):\n return self._thermal_conductivity",
"def thermal_conductivity(temperature):\n a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The thermal expansion coefficient of air varies as a function of temperature. | def thermal_expansivity_of_air(self) -> float:
return 1 / self.ambient_temperature | [
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference sug... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |