query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Mark a class as Controller Resource | def add_resource(self, cls):
# check if the same controller was already used for another cls (Resource)
if (
hasattr(self, Controller.RESOURCE_CLASS_KEY)
and getattr(self, Controller.RESOURCE_CLASS_KEY) != cls
):
raise MultipleResourceException()
# check if cls (Resource) was exteded from another
if hasattr(cls, Controller.RC_KEY):
self.__get_parent_routes(cls.__router__)
setattr(cls, Controller.RC_KEY, self.router)
setattr(self, Controller.RESOURCE_CLASS_KEY, cls)
cls.router = lambda: Controller.__parse_controller_router(cls)
return cls | [
"def create_controller(self, resource):\n\n return type('%sController' % resource.__name__, (self.default_controller,), {\n 'configuration': self,\n 'resource': resource,\n 'version': (resource.version, 0),\n })",
"def resource(self, resource):\n self._resourc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
It returns the FastAPI router. Use it as if you are using the original one. | def route(self) -> APIRouter:
return self.router | [
"def router(self):\n return self.pluginpod.router",
"def get_router():\r\n router = getattr(settings, 'RAPIDSMS_ROUTER',\r\n 'rapidsms.router.blocking.BlockingRouter')\r\n if isinstance(router, basestring):\r\n try:\r\n router = import_class(router)()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if two shards overlap. | def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):
# For each dim of each shard, check if one shard resides on the other
# end of second shard with respect to that dim. As an example for a 2D
# shard, we would check if one shard is above or on the left of the
# other shard.
ndims = len(shard1.shard_offsets)
for i in range(ndims):
if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_lengths[i]:
return False
if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_lengths[i]:
return False
return True | [
"def _check_box_overlap(\n box0: ChunkStorageMetadata, box1: ChunkStorageMetadata\n) -> bool:\n\n # For each dim of each shard, check if one shard resides on the other\n # end of second shard with respect to that dim. As an example for a 2D\n # shard, we would check if one shard is above or on the left ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensures none of the shards overlap with each other. | def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):
# TODO: evaluate optimizing this if needed.
for i in range(len(shards)):
for j in range(i + 1, len(shards)):
if _check_shard_metadata_pair_overlap(shards[i], shards[j]):
raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap') | [
"def can_overlap(self):\n return False",
"def validate_shards(shard_ranges_by_partition_name):\n shards_seen = set()\n previous_range = None\n for group, shard_range, in sorted(list(shard_ranges_by_partition_name.items()),\n key=lambda x: x[1]):\n if not... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the shards_metadata is compatible with the provided tensor dims. | def check_tensor(shards_metadata, tensor_dims) -> None:
# If the tensor's volume matches the total volume of all shards and
# all shard boundaries are within tensor dims, we have a compatible
# sharding spec for this tensor. Note that we have already verified
# we don't have overlapping shards.
tensor_rank = len(tensor_dims)
shards_rank = len(shards_metadata[0].shard_offsets)
if tensor_rank != shards_rank:
raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}')
total_shard_volume = 0
for shard in shards_metadata:
shard_volume = 1
for i, shard_length in enumerate(shard.shard_lengths):
shard_volume *= shard_length
if shard.shard_offsets[i] + shard.shard_lengths[i] > tensor_dims[i]:
raise ValueError(
f'Shard offset {shard.shard_offsets[i]} and length '
f'{shard.shard_lengths[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}')
total_shard_volume += shard_volume
tensor_volume = 1
for size in tensor_dims:
tensor_volume *= size
if total_shard_volume != tensor_volume:
# TODO: Can we improve this error message to point out the gaps?
raise ValueError(
f'Total volume of shards: {total_shard_volume} '
f'does not match tensor volume: {tensor_volume}, in other words '
f'all the individual shards do not cover the entire tensor') | [
"def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process newly submitted GeoPost entry... PROCEEDURE 1) Get data from POST body 2) Validate form 3) Upload photo to bucket 4) Make WFS transaction with GeoServer | def post(self, request):
# GET REQUEST DATA
fid = request.POST.get('fid', False)
uuid = request.POST.get('uuid', False)
title_text = request.POST.get('title', False)
body = request.POST.get('body', False)
photo = request.FILES.get('photo', False) # FOR STORAGE
wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER
data = {
'uuid': uuid,
'title_text': title_text,
'body': body,
'wfsxml': wfsxml
}
# VALIDATE FORM
form = GeoPostForm(data, request.FILES)
logger.info("\ninstantiate Geopost form\n")
# IF FORM VALIDATION ERROR
if not form.is_valid():
return server_error(request.body)
#context = self.getContext(form)
#return render(request, 'geopost/entry.html', context)
else:
pass
# GET CLEAN VALUES
uuid = form.cleaned_data['uuid']
wfsxml = form.cleaned_data['wfsxml']
# UPLOAD PHOTO TO BUCKET
# if editing existing entry, first delete existing photo
if fid:
delete_from_bucket(uuid, self.imageBucket)
else:
pass
photo.open('rb')
error = upload_to_bucket(
photo, self.imageBucket, photo.content_type, uuid)
photo.close()
# IF ERROR UPLOADING IMAGE
if error:
return server_error(error)
else:
pass
# MAKE GEOSERVER WFS TRANSACTION
error = post_to_geoserver(wfsxml, self.wfsURL)
# ALL GOOD
if not error:
return HttpResponseRedirect(reverse('geopost_home'))
# IF WFS TRANSACTION ERROR
else:
delete_from_bucket(uuid, self.imageBucket)
return server_error(error) | [
"def _preprocess_rack_form(postdata):\n\n if postdata[u'geocoded'] != u'1':\n if postdata['address'].strip():\n results = _geocode(postdata['address'])\n # XXX handle multiple (or zero) results.\n try:\n lat, lon = results[0][1]\n except IndexErro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download pdf of VanTechy presentation slideshow. | def vantechy(request):
return FileResponse(open('/files/presentation.pdf', 'rb')) | [
"def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it",
"def download(filename):\n return send_from_directory(directory='pdf', filename=filename)",
"def download(urls, target_dir):\n valid_urls = FileDownloadAndCombiner.filter_va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List all available charts | def list_charts():
charts_root = Path(R".\charm\data\charts")
charts = list(charts_root.rglob("*.chart"))
return charts | [
"def charts(self, **kwargs):\n return [Chart(result) for result in self._invoke('charts', kwargs)]",
"def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts",
"def charts(self):\n return self._charts",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Convert a chart Path object to a string path relative to .\charm\data\charts | def strch(chart):
charts_root = Path(R".\charm\data\charts")
return str(chart.relative_to(charts_root)) | [
"def get_data_path(path):\n\n data_path = Path(self.kard.meta.get('data_path', 'data'))\n\n if data_path.is_absolute():\n return str(data_path / path)\n\n return str(self.kard_folder_path / self.kard.name / data_path /\n path)",
"def path(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the map grid cell as obstacle | def set_obstacle(self, pos: tuple):
if self.within_map(pos):
self.map[round(pos[0]), round(pos[1])] = OBSTACLE
return True
else:
return False | [
"def set_obstacle(self):\n self.state = self.Obstacle",
"def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1",
"def put(self, cell):\n if cell.x >= 0 and cell.x < len(self._grid[0]) and \\\n cell.y >= 0 and cell.y < len(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the main script for the bigmacc process. It iteartes through various CEA and bigmacc operations for each key (i.e. 01011101). It ends by saving a sample of the hourly results across the key for each building in a netcdf and then wiping the project files to reset them for the next iteration. | def run(config):
locator = cea.inputlocator.InputLocator(config.scenario)
print('Key in run')
print(config.bigmacc.key)
i = config.bigmacc.key
print(i)
# SCENARIO SETUP ---
config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)
print(config.general.project)
cea.datamanagement.data_initializer.main(config)
# use the scenario code to set the year for the lca and other operations that need the current year
pathway_code = config.general.parent
pathway_items = pathway_code.split('_')
scenario_year = int(pathway_items[1])
config.emissions.year_to_calculate = scenario_year
bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)
scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')
experiment_key = 'exp_{}'.format(i)
print(experiment_key)
keys = [int(x) for x in str(i)]
if experiment_key in scen_check['Experiments'].values.tolist():
print('Experiment was finished previously, moving to next.')
pass
else:
print('START: experiment {}.'.format(i))
# INITIALIZE TIMER ---
t0 = time.perf_counter()
if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):
print(' - Folder exists for experiment {}.'.format(i))
else:
os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))
print(' - Folder does not exist for experiment {}, creating now.'.format(i))
# run the archetype mapper to leverage the newly loaded typology file and set parameters
print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))
cea.datamanagement.archetypes_mapper.main(config)
# run the rule checker to set the scenario parameters
print(' - Running rule checker for experiment {}.'.format(i))
cea.bigmacc.bigmacc_rules.main(config)
# SIMULATIONS ---
print(' - Run radiation is {}.'.format(config.bigmacc.runrad))
print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))
# checking on need for radiation simulation
if config.bigmacc.runrad == True:
# this nested statement is for when we rerun the simulations and no longer need to run the unique radiation
if config.bigmacc.rerun != True:
print(' - Running radiation simulation for experiment {}.'.format(i))
if os.path.exists(locator.get_radiation_building('B000')):
print(' - Radiation folder exists for experiment {}, copying.'.format(i))
else:
print(' - Radiation running for experiment {}.'.format(i))
cea.resources.radiation_daysim.radiation_main.main(config)
else:
# print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))
old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,
config.general.scenario_name, 'outputs', 'data', 'solar-radiation')
# distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())
else:
radfiles = config.bigmacc.copyrad
# print(' - Copying radiation results from {}.'.format(radfiles))
# distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())
print(' - Experiment {} does not require new radiation simulation.'.format(i))
# running demand forecasting
if os.path.exists(locator.get_schedule_model_file('B000')):
print(' - Schedules exist for experiment {}.'.format(i))
else:
print(' - Schedule maker running for experiment {}.'.format(i))
schedule_maker.main(config)
# check to see if we need to rerun demand or if we can copy
if config.bigmacc.rerun != True:
print(' - Running demand simulation for experiment {}.'.format(i))
cea.demand.demand_main.main(config)
else:
if keys[0] == 1:
print(' - Running demand simulation for experiment {}.'.format(i))
cea.demand.demand_main.main(config)
elif keys[6] == 1:
print(' - Running demand simulation for experiment {}.'.format(i))
cea.demand.demand_main.main(config)
else:
cea.demand.demand_main.main(config)
# print(' - Looking for demand results data from previous run for experiment {}.'.format(i))
# old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,
# config.general.scenario_name, 'outputs', 'data', 'demand')
# if os.path.exists(old_demand_files):
# # print(' - Copy demand results files from previous run of experiment {}.'.format(i))
# # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())
# pass
# else:
# print(' - No results found.')
# print(' - Running demand simulation for experiment {}.'.format(i))
# cea.demand.demand_main.main(config)
if config.bigmacc.pv == True:
print(' - Run PV is {}.'.format(config.bigmacc.pv))
if config.bigmacc.rerun == True:
print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))
old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,
config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')
if os.path.exists(old_pv_files):
# print(' - Copying PV files from previous run of experiment {}.'.format(i))
# distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())
pass
else:
print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))
print(' - Running PV simulation for experiment {}.'.format(i))
photovoltaic.main(config)
else:
# if PV simulation is needed, run it.
print(' - Running PV simulation for experiment {}.'.format(i))
photovoltaic.main(config)
print('Run water-body exchange is {}.'.format(config.bigmacc.water))
# if water-body simulation is needed, run it.
if config.bigmacc.water == True:
print(' - Running water body simulation for experiment {}.'.format(i))
water.main(config)
# recalculating the supply split between grid and ng in the websrook DH
if keys[4] == 1:
print(' - Do not run district heat recalculation.')
else:
print(' - Run district heat recalculation.')
cea.bigmacc.wesbrook_DH.main(config)
if keys[7] == 1:
print(' - PV use detected. Adding PV generation to demand files.')
util.write_pv_to_demand(config)
else:
print(' - No PV use detected.')
# running the emissions and costing calculations
print(' - Run cost and emissions scripts.')
cea.analysis.costs.system_costs.main(config)
cea.analysis.lca.main.main(config)
# clone out the simulation inputs and outputs directory
print(' - Transferring results directory for experiment {}.'.format(i))
new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,
config.general.scenario_name, 'inputs')
new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,
config.general.scenario_name, 'outputs', 'data')
if config.bigmacc.rerun != True:
distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)
distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)
time_elapsed = time.perf_counter() - t0
# save log information
log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),
index_col='Unnamed: 0')
log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),
'Completed': 'True',
'Experiment Time': '%d.2 seconds' % time_elapsed,
'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)
log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))
log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv", )
# write netcdf of hourly_results
netcdf_writer.main(config, time='hourly')
if config.bigmacc.rerun != True:
shutil.rmtree(locator.get_costs_folder())
shutil.rmtree(locator.get_demand_results_folder())
shutil.rmtree(locator.get_lca_emissions_results_folder())
shutil.rmtree(locator.get_solar_radiation_folder())
shutil.rmtree(locator.get_potentials_folder())
else:
print(' - Rerun does not require purging of the files.')
# when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here
if keys[0] == 1:
cea.datamanagement.data_initializer.main(config)
else:
pass
print('END: experiment {}. \n'.format(i)) | [
"def main():\n start = 1554994269 # unix timestamp, fixed for reproducability\n stop = start + 850 * 61 # number of acqs * time between acqs\n sampling_rate = 512. # Hz\n\n # Nyquist freq needs to be larger than frequency of J-peaks\n nyquist = sampling_rate / 2 + 1\n assert nyquist > 250\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns if postcode like | def is_postal_code(elem):
return 'post' in elem.attrib['k'] | [
"def postcode(self):\n return self._postcode",
"def is_valid_postcode(postcode):\n if len(postcode) != 6 or postcode[:2] != \"72\":\n return False\n return postcode.isdigit()",
"def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dictionary named switcher to store all the switchlike cases. When you pass an argument to the switch_demo function, it is looked up against the switcher dictionary mapping. If a match is found, the associated value is printed, else a default string ('Invalid Month') is printed. The default string helps implement the 'default case' of a switch statement. | def switch_demo(argument):
switcher = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
return switcher.get(argument, "Invalid month") | [
"def explaining_switch_with_default_dict(character2):\n character1 = defaultdict(lambda :'some other character',character)\n return character2+\":\"+character1[character2]",
"def dia_semana_switch(num):\n switcher = {\n 1: \"Lunes\",\n 2: \"Martes\",\n 3: \"Miercoles\",\n 4: \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove journal entry at position `pos`. | def remove_entry(self, pos: int) -> None:
del self.entries[pos] | [
"def _remove(self, pos):\n node = self._validate(pos)\n\n if self.num_children(pos) > 1:\n raise ValueError('pos have more than one child:', pos._node._element, list(pos._node._children.values()))\n\n # node is a leaf node\n if len(node._children) == 0:\n if not nod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save journal entries into a file. | def save(journal: Journal, file: Path) -> None:
with open(file, "w") as output:
output.writelines(f"{entry}\n" for entry in journal.get_entries()) | [
"def save(name, journal_data):\n filename = get_full_pathname(name)\n print(\"Saving to: {}\".format(filename))\n file_out = open(filename, 'w')\n\n for entry in journal_data:\n file_out.write(entry + '\\n')\n\n file_out.close()",
"def save_exit(name, data):\n jrn_path = build_path(name)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load journal entries from a file. | def load(journal: Journal, file: Path) -> None: | [
"def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load journal entries from a URI. | def load_from_web(journal: Journal, uri: str) -> None: | [
"def load(name):\n jrn_path = build_path(name)\n if not os.path.exists(jrn_path):\n print(f'... journal file \\'{jrn_path}\\' does not exist ...')\n print('... initializing new journal ...')\n with open(jrn_path, 'w') as file:\n pass\n return []\n else:\n print... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Spawning next generation of collection by selecting n pairs of distinct forests from previous generation and them over. | def _next_generation(self, previous_generation):
self._fullInput, self._fullOutput = previous_generation.get_data()
self.power = self.settings.population_count
for forest_iteration in range(self.power):
first, second = previous_generation.selection()
print 'selected for crossover ->', first.fitness, second.fitness
self._forests.append(OneForest(self.settings, first_forest=first, second_forest=second)) | [
"def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_fun... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function. | def execute(self):
process_list = []
forests_queue = Queue(self.power)
iterational = 0
print '| |-starting evaluation, training and validation'
for one_forest in self._forests:
process_list.append(
Process(target=main_async_method,
args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings)))
iterational += 1
for proc in process_list:
proc.start()
for proc in process_list:
proc.join()
for smth in range(forests_queue.qsize()):
tmp = forests_queue.get()
self._forests[tmp['place']].fitness = tmp['fitness']
fitness_summ = sum(map(lambda forest: forest.fitness, self._forests))
fss = map(lambda x: x.fitness, self._forests)
print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss)
self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests) | [
"def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)",
"def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Just mutating every forest in collection. | def mutate(self):
for forest in self._forests:
forest.mutate(self._fullInput) | [
"def unifyPreviewNodes(self):\n\n self.leaves.update(self.forced)\n self.forced = set()",
"def update(self):\n map(lambda x: x.update(), self._children.values())",
"def reset(self):\n for index in self.values():\n index.reset()\n self.objectids = self.family.IF.TreeSet()",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Query a SGL di un sensore del traffico Vedi query_ensor() per sensorURI, fromTime e toTime | def get_traffic_sensor_df(sensorURI: str, fromTime: str, toTime: str, resampleFreq: str = None, remove_outliers=False):
values = ["count", "sumSpeed"]
result = None
for v in values:
# data = query_ensor(sensorURI, fromTime, toTime, v)
data = multiday_query(sensorURI, fromTime, toTime, v)
df = pd.DataFrame(data, columns=["measuredTime", v])
df["measuredTime"] = pd.to_datetime(df["measuredTime"])
df.index = df["measuredTime"]
del df["measuredTime"]
if remove_outliers:
z_scores = np.abs(stats.zscore(df))
print(f"Removed outliers: {df.size - df[(z_scores < 3).all(axis=1)].size}")
df = df[(z_scores < 3).all(axis=1)]
if resampleFreq is not None:
df = df.resample(resampleFreq).sum()
if result is not None:
result = pd.merge_ordered(result, df, left_on="measuredTime", right_on="measuredTime")
result.index = result["measuredTime"]
del result["measuredTime"]
else:
result = df
# avg speed
result["avgSpeed"] = result["sumSpeed"] / result["count"]
result.loc[~np.isfinite(result["avgSpeed"]), "avgSpeed"] = np.nan
result["avgSpeed"] = result["avgSpeed"].interpolate()
return result | [
"def sensor():\n\n return Sensors(TERRAREF_BASE, 'station1', 'lv1_sensor1',\n stations=STATIONS)",
"def requestSensorData(self):\n self._sendSerialMessage('GET_SENSOR', [])",
"def read_sensor_wf(table, evt, isens):\n return (table.read_where(\"(event=={}) & (ID=={})\".format(evt, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot time points given in data file and compare to x3 | def plot_data(fname):
if not os.path.isfile(fname):
print('No data has been generated yet, aborting...')
sys.exit(1)
with open(fname, 'r') as fd:
data = json.load(fd)
x = np.arange(0, max(data, key=lambda e: e[0])[0], 1)
const = .55e-8
func = lambda x: const * x**3
plt.plot(
*zip(*data),
label=r'ShRec3D data points',
linestyle='None', marker='h'
)
plt.plot(x, func(x), label=r'$ %.0e \cdot x^3$' % const)
plt.title(r'Complexity ($\in \Theta\left(x^3\right)$) visualization of ShRec3D')
plt.xlabel('loci number')
plt.ylabel('execution time (seconds)')
plt.legend(loc='best')
plt.savefig('time_comparison.png', dpi=300, bbox_inches='tight')
plt.show() | [
"def visualize_time_data(time, index=None):\n length = time[0].shape[0]\n index = np.array(index or range(length))\n time_x = time[0].iloc[index]\n time_y = time[1].iloc[index]\n for key,val in eval(CFGS[\"DATA\"][\"PLOTTIMECOL\"]).items():\n for i in range(val[0], val[1]+1):\n idx... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Push the item in the front of the deque | def enqueue_front(self, item):
self._items.insert(0, item) | [
"def push_front(self, val):\r\n self.deque.insert(0, val)",
"def push_front(self, item):\n self.list.prepend(item)",
"def push_front(self, e):\n if(self.size_ >= self.capacity_):#If our Deque is full we need to resize it first\n self.resize_front()\n self.data_[self.fr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pop the item in the front of the deque. Raise IndexError if the deque is empty. | def dequeue_front(self):
try:
return self._items.pop(0)
except:
raise IndexError('The deque is empty') | [
"def pop_back(self):\r\n if self.size():\r\n self.deque.pop(-1)\r\n else:\r\n raise IndexError(\"Deque is empty.\")",
"def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None",
"def dequeue_rear(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pop the item in the end of the deque. Raise IndexError if the deque is empty. | def dequeue_rear(self):
try:
return self._items.pop()
except:
raise IndexError('The deque is empty') | [
"def pop_back(self):\r\n if self.size():\r\n self.deque.pop(-1)\r\n else:\r\n raise IndexError(\"Deque is empty.\")",
"def pop(self):\r\n try:\r\n return self.pop_from_deque()\r\n except IndexError:\r\n return None",
"def pop(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an array of full paths for a relative path with globs | def expand_path(__file__, path_with_globs):
return glob.glob(relative_path(__file__, path_with_globs)) | [
"def get_paths(file_path):\n return glob(path.join(file_path, '*'))",
"def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files",
"def recursive_glob(path):\n if \"*\" not in path:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
One solution would be to do an inorder traversal and sum the values along the way (or just recursive sum along the tree). => O(N) but in case the range [lo,hi] is small, this is wasteful. | def rangeSumBST(self, root: TreeNode, lo: int, hi: int) -> int:
def visit(node: TreeNode) -> int:
if not node:
return 0
if node.val < lo:
return visit(node.right)
elif hi < node.val:
return visit(node.left)
else:
return node.val + visit(node.left) + visit(node.right)
return visit(root) | [
"def rangeSumBST(self, root: TreeNode, low: int, high: int) -> int:\n self.traverse_path = []\n self.inorder(root)\n return sum(filter(lambda x: low<=x<=high, self.traverse_path))",
"def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = ent... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loops over arrays in the arrays_iterator and evaluates the cut_function at the cut_values. Returns a list of efficiences, passed events/objects, and total events/objects. cut_function is expected to return a tuple (n_pass, n_total) with input (arrays, cut_value). | def get_eff(arrays_iterator, cut_function, cut_values):
n_cuts = len(cut_values)
n_total = np.zeros(n_cuts)
n_pass = np.zeros(n_cuts)
for arrays, dataset in arrays_iterator:
weight = dataset.get_weight()
for i_cut, cut in enumerate(cut_values):
this_n_pass, this_n_total = cut_function(arrays, cut)
n_total[i_cut] += weight * this_n_total
n_pass[i_cut] += weight * this_n_pass
# Basically n_pass / n_total, but returns 0 if n_total has a 0 somewhere
eff = np.divide(n_pass, n_total, out=np.zeros_like(n_pass), where=n_total!=0)
return eff, n_pass, n_total | [
"def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values. | def roccurve(signals, bkgs, cut_function, cut_values):
eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)
eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values)
return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg | [
"def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):\n# if percent_sig_to_keep < 100:\n# raise NotImplementedError(\"percentage of < 100 not yet imlemented\")\n percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep\n bkg_length_before = len(bkg_da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Basic plotting style for a single roccurve, based on multiple signal and bkgs samples. Expects an ax object to be given, this function is not standalone | def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):
eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)
return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax) | [
"def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):\n # Get a default ax if none is given\n if ax is None:\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,8))\n ax = fig.gca()\n # Plot the base line\n ax.plot([0.0,1.0], [0.0,1.0], linestyle='... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main routine for plotting a single roccurve | def plot_single_roccurve(signals, bkgs, cut_function, cut_values, ax=None):
# Get a default ax if none is given
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
# Plot the base line
ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')
# Plot the single roccurve
line = plot_roccurve(signals, bkgs, cut_function, cut_values, ax=ax)
line.set_label(bkgs[0].get_category())
# Plot settings
ax.set_xlim(0.0, 1.05)
ax.set_ylim(0.0, 1.05)
ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)
ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)
ax.legend(fontsize=DEFAULT_FONTSIZE)
return ax | [
"def plot_roccurve(signals, bkgs, cut_function, cut_values, ax):\n eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg = roccurve(signals, bkgs, cut_function, cut_values)\n return _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)",
"def plot_ROC_zoom():\r\n \r\n fpr = dict()\r\n tpr =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots the roccurve per background category. Assumes signals are all datasets of the same signal. | def plot_roccurves_per_bkg(signals, bkgs, cut_function, cut_values, ax=None):
# Get a default ax if none is given
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
# Get signal efficieny once
eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values)
# Perform some basic plotting setup
ax.plot([0.0,1.0], [0.0,1.0], linestyle='--', color='xkcd:gray')
ax.set_xlim(0.0, 1.05)
ax.set_ylim(0.0, 1.05)
ax.set_ylabel('Signal eff.', fontsize=DEFAULT_FONTSIZE)
ax.set_xlabel('Bkg eff.', fontsize=DEFAULT_FONTSIZE)
ax.legend(fontsize=DEFAULT_FONTSIZE)
# Then efficiencies per bkg category (ttjets, qcd, ...)
bkg_categories = list(set([ b.get_category() for b in bkgs ]))
bkg_categories.sort()
lines = {}
for bkg_cat in bkg_categories:
# Get Datasets that have this category
bkgs_this_cat = [ b for b in bkgs if b.get_category() == bkg_cat ]
# Compute efficiency in this category
eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs_this_cat), cut_function, cut_values)
# Draw roccurve for this category
line = _draw_roccurve(eff_sig, eff_bkg, cut_values, ax)
line.set_label(bkg_cat)
# Save this line in a dict for potential outputting/modifying
lines[bkg_cat] = line
return ax | [
"def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fills a coffea.hist.Hist for a single distribution. Takes a list of Dataset objects, and a function `get_array` that should return a numpylike array when given an arrays object. Also requires a string `name` to know in which hist to fill it | def hist_single_distribution(
arrays_iterator, get_array,
varname='somevar', vartitle=None, distrname='somedistr', distrtitle=None,
hist=None, left=-1., right=1., nbins=50
):
if hist is None:
import coffea.hist
vartitle = varname if vartitle is None else vartitle
hist = coffea.hist.Hist(
"Count",
coffea.hist.Bin(varname, vartitle, nbins, left, right),
coffea.hist.Cat('label', varname),
)
for arrays, dataset in arrays_iterator:
print(dataset.get_weight(), get_array(arrays))
hist.fill(label=distrname, weight=dataset.get_weight(), **{varname: get_array(arrays)})
return hist | [
"def array2hist(array, hist_name='hist_name', binning=(10,0,100), errors=None):\n if array.size != binning[0]:\n raise ValueError('Array size must be number of bins!')\n padded = np.pad(array,(1,1),'constant')\n if array.dtype == np.float32:\n h = ROOT.TH1F(hist_name,hist_name,binning[0],binn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a cut function and tries to return a title for it | def get_title(fn):
title = fn.name if hasattr(fn, 'name') else fn.__name__
title = title.replace('_cut_function','')
suffix = []
# if 'JetsAK15_subleading_' in title:
# suffix.append(r'$j^{\mathrm{AK15}}_{\mathrm{subl}}$')
title = title.replace('JetsAK15_subleading_', '').replace('subleading_', '')
if hasattr(fn, 'left'):
suffix.append('({:.0f} < {} < {:.0f})'.format(fn.left, svjflatanalysis.utils.get_title('mt'), fn.right))
# Transform variable name to title stirng
title = svjflatanalysis.utils.get_title(title)
if hasattr(fn, 'operator'):
title += ' ' + fn.operator + ' cut'
# Add the suffix
title += ' ' + ' '.join(suffix)
return title | [
"def sub_case_title(self, arg_tc):\n return self.title",
"def SubTitle(Text):\n pass",
"def make_title(words):",
"def get_title(title: str):\n return title",
"def generate_finding_title(title):\n\treturn \"Trend Micro: {}\".format(title)",
"def getTitle(test:str) -> str:\n return test[5:].... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Windows version of base.processInterrupt Note! This doesn't work terribly well with a lot of processes. | def processInterrupt(uPid):
try:
# pylint: disable=no-member
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid);
#GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent
#rc = GenerateConsoleCtrlEvent(1, uPid);
#reporter.log('GenerateConsoleCtrlEvent -> %s' % (rc,));
fRc = True;
except:
reporter.logXcpt('uPid=%s' % (uPid,));
fRc = False;
return fRc; | [
"def send_interrupt(process):\n logger.debug(\"Interrupting process {0} ...\".format(process))\n try:\n os.kill(process.pid, SIGINT)\n # os.kill(process.pid, SIGTERM)\n except OSError:\n pass # process cannot be killed\n except TypeError:\n pass # pid is incorrect type\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Posts a WM_CLOSE message to the specified thread. | def postThreadMesssageClose(uTid):
fRc = False;
try:
win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member
fRc = True;
except:
reporter.logXcpt('uTid=%s' % (uTid,));
return fRc; | [
"def postThreadMesssageQuit(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;",
"def terminate_thread... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Posts a WM_QUIT message to the specified thread. | def postThreadMesssageQuit(uTid):
fRc = False;
try:
win32api.PostThreadMessage(uTid, win32con.WM_QUIT, 0x40010004, 0); # DBG_TERMINATE_PROCESS # pylint: disable=no-member
fRc = True;
except:
reporter.logXcpt('uTid=%s' % (uTid,));
return fRc; | [
"def postThreadMesssageClose(uTid):\n fRc = False;\n try:\n win32api.PostThreadMessage(uTid, win32con.WM_CLOSE, 0, 0); # pylint: disable=no-member\n fRc = True;\n except:\n reporter.logXcpt('uTid=%s' % (uTid,));\n return fRc;",
"def threaded_quit(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Windows version of base.processKill | def processKill(uPid):
return processTerminate(uPid); | [
"def kill_subprocess(self):\n try:\n self.process.kill()\n except OSError:\n pass\n return",
"def cmd_process_kill(self, mysql_pid):\n raise NotImplementedError",
"def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WIND... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The Windows version of base.processCheckPidAndName | def processCheckPidAndName(uPid, sName):
fRc = processExists(uPid);
if fRc is True:
try:
from win32com.client import GetObject; # pylint: disable=F0401
oWmi = GetObject('winmgmts:');
aoProcesses = oWmi.InstancesOf('Win32_Process');
for oProcess in aoProcesses:
if long(oProcess.Properties_("ProcessId").Value) == uPid:
sCurName = oProcess.Properties_("Name").Value;
reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName));
sName = sName.lower();
sCurName = sCurName.lower();
if os.path.basename(sName) == sName:
sCurName = os.path.basename(sCurName);
if sCurName == sName \
or sCurName + '.exe' == sName \
or sCurName == sName + '.exe':
fRc = True;
break;
except:
reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName));
return fRc; | [
"def _get_process_name(pid):\n PROCESS_QUERY_INFORMATION = 0x0400\n PROCESS_VM_READ = 0x0010\n LIST_MODULES_ALL = 0x03\n MAX_PATH = 260\n\n kernel32 = ctypes.WinDLL(\"kernel32\", use_last_error=True)\n kernel32.OpenProcess.rettype = ctypes.wintypes.HANDLE\n kernel32.OpenProcess.argtypes = [\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Logs windows memory stats. | def logMemoryStats():
class MemoryStatusEx(ctypes.Structure):
""" MEMORYSTATUSEX """
kaFields = [
( 'dwLength', ctypes.c_ulong ),
( 'dwMemoryLoad', ctypes.c_ulong ),
( 'ullTotalPhys', ctypes.c_ulonglong ),
( 'ullAvailPhys', ctypes.c_ulonglong ),
( 'ullTotalPageFile', ctypes.c_ulonglong ),
( 'ullAvailPageFile', ctypes.c_ulonglong ),
( 'ullTotalVirtual', ctypes.c_ulonglong ),
( 'ullAvailVirtual', ctypes.c_ulonglong ),
( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),
];
_fields_ = kaFields; # pylint: disable=invalid-name
def __init__(self):
super(MemoryStatusEx, self).__init__();
self.dwLength = ctypes.sizeof(self);
try:
oStats = MemoryStatusEx();
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));
except:
reporter.logXcpt();
return False;
reporter.log('Memory statistics:');
for sField, _ in MemoryStatusEx.kaFields:
reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));
return True; | [
"def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"",
"def log... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calls HeapValidate(GetProcessHeap(), 0, NULL); | def checkProcessHeap():
# Get the process heap.
try:
hHeap = ctypes.windll.kernel32.GetProcessHeap();
except:
reporter.logXcpt();
return False;
# Check it.
try:
fIsOkay = ctypes.windll.kernel32.HeapValidate(hHeap, 0, None);
except:
reporter.logXcpt();
return False;
if fIsOkay == 0:
reporter.log('HeapValidate failed!');
# Try trigger a dump using c:\utils\procdump64.exe.
from common import utils;
iPid = os.getpid();
asArgs = [ 'e:\\utils\\procdump64.exe', '-ma', '%s' % (iPid,), 'c:\\CrashDumps\\python.exe-%u-heap.dmp' % (iPid,)];
if utils.getHostArch() != 'amd64':
asArgs[0] = 'c:\\utils\\procdump.exe'
reporter.log('Trying to dump this process using: %s' % (asArgs,));
utils.processCall(asArgs);
# Generate a crash exception.
ctypes.windll.msvcrt.strcpy(None, None, 1024);
return True; | [
"def _mem_heap(self):\n return False",
"def test_func_heap(self):\n cmd = \"deref $_heap()\"\n target = _target(\"heap\")\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd, target=target))\n res = gdb_run_silent_cmd(cmd, target=target)\n self.assertNoException(res)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be calculated. There must be a known Heat Recovery project for this component to run. | def run (self, scalers = {'capital costs':1.0}):
self.was_run = True
self.reason = "OK"
tag = self.cd['file id'].split('+')
if len(tag) > 1 and tag[1] != 'transmission':
self.was_run = False
self.reason = "Not a transmission project."
return
if not self.cd["model electricity"]:
self.was_run = False
self.reason = "Electricity must be modeled to analyze "+\
"transmission. It was not for this community."
return
if np.isnan(float(self.comp_specs['distance to community'])):
self.was_run = False
self.reason = ("There are no communities within 30 miles with"
" lower cost of electricity.")
return
self.calc_average_load()
try:
self.get_intertie_values()
except ValueError:
self.was_run = False
self.reason = ("Could not find data on community to intertie to.")
return
self.calc_pre_intertie_generation()
self.calc_intertie_offset_generation()
if self.cd["model heating fuel"]:
# change these below
self.calc_lost_heat_recovery()
# see NOTE*
#~ return
if self.cd["model financial"]:
# AnnualSavings functions (don't need to write)
self.get_diesel_prices()
# change these below
self.calc_capital_costs()
self.calc_annual_electric_savings()
self.calc_annual_heating_savings()
# AnnualSavings functions (don't need to write)
self.calc_annual_total_savings()
self.calc_annual_costs(self.cd['interest rate'],
scalers['capital costs'])
self.calc_annual_net_benefit()
self.calc_npv(self.cd['discount rate'], self.cd["current year"])
#~ print self.benefit_cost_ratio
self.calc_levelized_costs(self.proposed_generation_cost) | [
"def run (self, scalers = {'capital costs':1.0}):\n self.was_run = True\n self.reason = \"OK\"\n\n tag = self.cd['file id'].split('+')\n if len(tag) > 1 and tag[1] != 'residential':\n self.was_run = False\n self.reason = \"Not a residential project.\"\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the Average Diesel load of the current system Attributes | def calc_average_load (self):
#~ self.generation = self.forecast.generation_by_type['generation diesel']\
#~ [self.start_year]
self.average_load = \
self.forecast.yearly_average_diesel_load.ix[self.start_year] | [
"def calc_average_load (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.average_load = None\n self.generation = self.forecast.generation['generation diesel']\\\n [self.start_year]\n self.average_load = \\\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the generation offset by connecting a transmission line to the community to connect to. Attributes | def calc_intertie_offset_generation (self):
self.generation = \
self.forecast.get_generation(self.start_year,self.end_year)
dist = self.comp_specs['distance to community']
self.annual_transmission_loss = \
1 - (
(1- (self.comp_specs['transmission loss per mile']/ 100.0))
** dist)
self.intertie_offset_generation = \
self.generation * (1 + self.annual_transmission_loss)
gen_eff = self.intertie_generation_efficiency
self.intertie_offset_generation_fuel_used = \
self.intertie_offset_generation / gen_eff
#~ print 'self.proposed_generation',self.proposed_generation
#~ print con | [
"def calculate_and_set_propagation_distances(self):\n\n self.l_edge = self.calculate_distance_edge()\n self.l_int = self.calculate_distance_interaction()",
"def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.po... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the heat recovery | def calc_lost_heat_recovery (self):
if not self.cd['heat recovery operational']:
self.lost_heat_recovery = [0]
else:
gen_eff = self.cd["diesel generation efficiency"]
self.lost_heat_recovery = \
(self.generation / gen_eff )* .10 | [
"def _calculate_heat(self, traj):\n \n pass",
"def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the capital costs. Attributes | def calc_capital_costs (self):
road_needed = 'road needed'
if self.cd['on road system']:
road_needed = 'road not needed'
dist = self.comp_specs['distance to community']
self.capital_costs = self.comp_specs['est. intertie cost per mile']\
[road_needed] * dist
#~ print self.capital_costs | [
"def calc_capital_costs (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" calculate self.capital_costs(the cost of the project) a dollar value\"",
"def calc_capital_costs (self):\n self.capital_costs = self.opportunity_HH * self.refit_cost_rate",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate annual electric savings created by the project. Attributes | def calc_annual_electric_savings (self):
costs = self.comp_specs['diesel generator o&m']
for kW in costs.keys():
try:
if self.average_load < int(kW):
maintenance = self.comp_specs['diesel generator o&m'][kW]
break
except ValueError:
maintenance = self.comp_specs['diesel generator o&m'][kW]
self.baseline_generation_cost = maintenance + \
(self.pre_intertie_generation_fuel_used * self.diesel_prices)
maintenance = self.capital_costs * \
(self.comp_specs['percent o&m'] / 100.0)
self.proposed_generation_cost = maintenance + \
self.intertie_offset_generation_fuel_used * \
self.intertie_diesel_prices
self.annual_electric_savings = self.baseline_generation_cost -\
self.proposed_generation_cost
#~ print len(self.annual_electric_savings)
#~ print 'self.annual_electric_savings',self.annual_electric_savings | [
"def calc_annual_electric_savings (self):\n self.annual_electric_savings = np.zeros(self.project_life)",
"def calc_annual_electric_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_electric_savings as an np.array, length\" +\\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate annual heating savings created by the project. Attributes | def calc_annual_heating_savings (self):
price = self.diesel_prices + self.cd['heating fuel premium']
maintenance = self.comp_specs['heat recovery o&m']
self.annual_heating_savings = -1 * \
(maintenance + (self.lost_heat_recovery * price)) | [
"def calc_annual_heating_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_heating_savings as an np.array, length\" +\\\n \" self.project_life, of dollar values(numbers)\"",
"def calc_annual_heating_savings (self):\n se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get total fuel saved. Returns float the total fuel saved in gallons | def get_fuel_total_saved (self):
#~ print self.lost_heat_recovery
#~ print self.intertie_offset_generation_fuel_used
#~ print self.pre_intertie_generation_fuel_used
#~ gen_eff = self.cd["diesel generation efficiency"]
#~ fuel_used = self.intertie_offset_generation / gen_eff
generation_diesel_reduction = \
np.array(self.pre_intertie_generation_fuel_used\
[:self.actual_project_life])
return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\
generation_diesel_reduction | [
"def get_fuel_total_saved (self):\n return self.electric_diesel_reduction + self.reduction_diesel_used",
"def total_saved_fuel(self) -> int:\n return float(self._state.attributes[SERVICE_ALL_TRIPS]['totalSavedFuel'])",
"def get_fuel_total_saved (self):\n base_heat = \\\n self.bas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get your current running jobs on the Sherlock cluster | def running_jobs_sherlock():
user = os.environ['USER']
return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:] | [
"def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs",
"def show_jobs(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
simply sends a message to the client address specified. | def send_net_message_client(message, client_addr):
serverSocket.sendto(message, client_addr) | [
"def send_to_client(self, msg):\r\n msg += \"\\r\\n\"\r\n self.client.sendall(msg.encode('utf-8'))\r\n log_message = \"Sent to client at {0}: {1}\".format(self.address, msg)\r\n self.output_and_log(log_message)",
"def send_message(self, name, message):\n if message == \"\":\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the payee_wallet_id of this EscrowTransactionResponse. | def payee_wallet_id(self, payee_wallet_id):
self._payee_wallet_id = payee_wallet_id | [
"def payer_wallet_id(self, payer_wallet_id):\n\n self._payer_wallet_id = payer_wallet_id",
"def payeeid(self, payeeid):\n self._payeeid = payeeid",
"def payer_id(self, payer_id):\n if payer_id is None:\n raise ValueError(\"Invalid value for `payer_id`, must not be `None`\")\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the payer_wallet_id of this EscrowTransactionResponse. | def payer_wallet_id(self, payer_wallet_id):
self._payer_wallet_id = payer_wallet_id | [
"def payee_wallet_id(self, payee_wallet_id):\n\n self._payee_wallet_id = payee_wallet_id",
"def payer_id(self, payer_id):\n if payer_id is None:\n raise ValueError(\"Invalid value for `payer_id`, must not be `None`\")\n\n self._payer_id = payer_id",
"def payor_id(self, payor_id):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the withdrawn of this EscrowTransactionResponse. | def withdrawn(self, withdrawn):
self._withdrawn = withdrawn | [
"def withdraw(self, **params):\n # force a name for the withdrawal if one not set\n if 'coin' in params and 'name' not in params:\n params['name'] = params['coin']\n return self._request_margin_api('post', 'capital/withdraw/apply', True, data=params)",
"def withdraw(self, **params)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the escrow_address of this EscrowTransactionResponse. | def escrow_address(self, escrow_address):
self._escrow_address = escrow_address | [
"def set_address(self, address):\n pass",
"def set_address(self, a_address):\n self.set_parameter('address', a_address)\n return self",
"def address(self, address):\n self._address = address",
"def address(self, value):\n self.api_args['rdata']['address'] = value\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the record_status of this EscrowTransactionResponse. | def record_status(self, record_status):
self._record_status = record_status | [
"def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the update_date of this EscrowTransactionResponse. | def update_date(self, update_date):
self._update_date = update_date | [
"def updated_date(self, updated_date):\n\n self._updated_date = updated_date",
"def updated_date(self, updated_date):\n self._updated_date = updated_date",
"def set_latest_update_date(self):\n metadata = self.info()\n metadata.updated_at = dt.datetime.now()\n self.commit()",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper to log the failed SQS records metric | def _log_failed(cls, count):
MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count) | [
"def test_failed_deliveries_logging(self):\n sms = SMS.objects.create(to='+6280000000000', status=STATUS.queued,\n backend_alias='error')\n call_command('send_queued_sms', log_level=0)\n self.assertEqual(sms.logs.count(), 0)\n\n sms = SMS.objects.create(to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Segment the records into batches that conform to SQS restrictions This will log any single record that is too large to send, and skip it. | def _message_batches(cls, records):
# Dump the records to a list of minimal json
records_json = [
json.dumps(record, separators=(',', ':')) for record in records
]
current_batch_size = 0
current_batch = []
for record in records_json:
line_len = len(record)
# Check if the max size of the batch has been reached or if the current
# record will exceed the max batch size and start a new batch
if ((len(current_batch) == cls.MAX_BATCH_COUNT) or
(current_batch_size + line_len > cls.MAX_BATCH_SIZE)):
yield current_batch[:]
current_batch_size = 0
del current_batch[:]
if line_len > cls.MAX_BATCH_SIZE:
LOGGER.error('Record too large (%d) to send to SQS:\n%s', line_len, record)
cls._log_failed(1)
continue
# Add the record to the batch
current_batch_size += line_len
current_batch.append(record)
# yield the result of the last batch (no need to copy via slicing)
if current_batch:
yield current_batch | [
"def test_firehose_segment_records_by_size(self):\n\n record_batch = [\n # unit_test_simple_log\n {'unit_key_01': 2, 'unit_key_02': 'testtest' * 10000}\n for _\n in range(100)]\n\n sized_batches = []\n\n for sized_batch in self.__sa_handler._segment_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inspect the response and remove any records records that have successfully to sent For each record, the index of the response element is the same as the index used in the request array. | def _strip_successful_records(cls, messages, response):
success_ids = {
item['Id'] for item in response['Successful']
}
LOGGER.info('Removing sucessful message indices from batch: %s', success_ids)
for success_id in success_ids:
# Get the successful message by ID and remove it
message = cls._extract_message_by_id(messages, success_id)
if not message:
continue
messages.remove(message) | [
"def test_strip_successful_records(self):\n batch = [{'test': 'success'}, {'other': 'failure'}, {'other': 'info'}]\n response = {\n 'FailedPutCount': 1,\n 'RequestResponses': [\n {'RecordId': 'rec_id_01'},\n {'ErrorCode': 10, 'ErrorMessage': 'foo'},\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Send a list of records to SQS, batching as necessary | def send(self, payloads):
records = self._payload_messages(payloads)
# SQS only supports up to 10 messages so do the send in batches
for message_batch in self._message_batches(records):
response = self._send_messages(message_batch)
self._finalize(response, message_batch) | [
"def _send_batch(self):\n LOGGER.info('Sending SQS batch of %d keys: %s ... %s',\n sum(msg.num_keys for msg in self._messages), self._first_key, self._last_key)\n response = SQS_CLIENT.send_message_batch(\n QueueUrl=self._queue_url,\n Entries=[msg.sqs_entry() f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to add a user as friends that is, to create a bidirectional link that connects the two users. | def add_friends(self, user1_index, user2_index):
if user1_index >= self.num_users or user2_index >= self.num_users:
raise ValueError(
f"Number of users is {self.num_users}, but indices "
f"{user1_index} and {user2_index} were requested."
)
if self.users_hat[user1_index, user2_index] == 0:
self.users_hat[user1_index, user2_index] = 1
elif self.is_verbose():
self.log(f"User {user2_index} was already following user {user1_index}")
if self.users_hat[user2_index, user1_index] == 0:
self.users_hat[user2_index, user1_index] = 1
elif self.is_verbose():
self.log(f"User {user1_index} was already following user {user2_index}") | [
"def add_to_friends(self):\n self._iface.activate_overlay('friendadd', self.user_id)",
"def friending(user, friend):\n user.update(add_to_set__friends=friend)\n friend.update(add_to_set__friends=user)",
"def add_friend(self, friend_id):\n if Relationship.objects.filter(from_user_id=self.pk, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render the Lilypond music expression lily using lilypond. | def render_lily(self, lily):
shasum = "%s.png" % sha(lily.encode('utf-8')).hexdigest()
relfn = posixpath.join(self.builder.imgpath, 'lily', shasum)
outfn = path.join(self.builder.outdir, '_images', 'lily', shasum)
if path.isfile(outfn):
return relfn
if hasattr(self.builder, '_lilypng_warned'):
return None, None
music = DOC_HEAD + self.builder.config.pnglily_preamble + lily
if isinstance(music, unicode):
music = music.encode('utf-8')
# use only one tempdir per build -- the use of a directory is cleaner
# than using temporary files, since we can clean up everything at once
# just removing the whole directory (see cleanup_tempdir_lily)
if not hasattr(self.builder, '_lilypng_tempdir'):
tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp()
else:
tempdir = self.builder._lilypng_tempdir
tf = open(path.join(tempdir, 'music.ly'), 'w')
tf.write(music)
tf.close()
ensuredir(path.dirname(outfn))
# use some standard lilypond arguments
lilypond_args = [self.builder.config.pnglily_lilypond]
#lilypond_args += ['-o', tempdir, '--png']
lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts',
'-o', tempdir, '--png']
# add custom ones from config value
lilypond_args.extend(self.builder.config.pnglily_lilypond_args)
# last, the input file name
lilypond_args.append(path.join(tempdir, 'music.ly'))
try:
p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE)
except OSError, err:
if err.errno != 2: # No such file or directory
raise
self.builder.warn('lilypond command %r cannot be run (needed for music '
'display), check the pnglily_lilypond setting' %
self.builder.config.pnglily_lilypond)
self.builder._lilypng_warned = True
return None, None
stdout, stderr = p.communicate()
if p.returncode != 0:
raise LilyExtError(u'lilypond exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s' % (stderr.decode('utf-8'), stdout.decode('utf-8')))
shutil.copyfile(path.join(tempdir, 'music.png'), outfn)
#Popen(['mogrify', '-trim', outfn], stdout=PIPE, stderr=PIPE)
return relfn | [
"def render_voice(self, instrument):\n voice_render = f'<div class=\"lyrics\">{instrument.get_lyric()}</div>'\n return voice_render",
"def render(self, ontol, **args):\n pass",
"def hxlexpand():\n run_script(hxlexpand_main)",
"def playOutput():\n global coordinates, lastPlayedCoordin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function places an order for "context.index" in the amount required to neutralize the beta exposure of the portfolio. Note that additional leverage in the account is taken on, however, net market exposure is reduced. | def hedge_portfolio(context, data):
factors = get_alphas_and_betas(context, data)
beta_exposure = 0.0
count = 0
for asset in context.portfolio.positions:
if asset in factors and asset != context.index:
if not np.isnan(factors[asset].beta):
beta_exposure += factors[asset].beta
count += 1
beta_hedge = -1.0 * beta_exposure / count
dollar_amount = context.portfolio.portfolio_value * beta_hedge
record(beta_hedge=beta_hedge)
if not np.isnan(dollar_amount):
order_target_value(context.index, dollar_amount) | [
"def trade(context, data):\n # Create a single series from our stock and bond weights\n total_weights = pd.concat([context.stock_weights, context.bond_weights])\n \n # Create a TargetWeights objective\n target_weights = opt.TargetWeights(total_weights)\n \n # Execute the order_optimal_portfolio metho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a dataframe of 'alpha' and 'beta' exposures for each asset in the current universe. | def get_alphas_and_betas(context, data):
all_assets = context.portfolio.positions.keys()
if context.index not in all_assets:
all_assets.append(context.index)
prices = data.history(all_assets, 'price', context.lookback, '1d')
returns = prices.pct_change()[1:]
# index_returns = returns[context.index]
factors = {}
for asset in context.portfolio.positions:
try:
y = returns[asset]
factors[asset] = linreg(returns[context.index], y)
except:
log.warn("[Failed Beta Calculation] asset = %s" % asset.symbol)
return pd.DataFrame(factors, index=['alpha', 'beta']) | [
"def exposure(self, universe: pd.DataFrame) -> pd.DataFrame:\n exposure = pd.DataFrame(0.0, index=universe.index, columns=universe.columns)\n exposure.loc[:, self.asset] = universe.loc[:, self.asset] * self.lot\n i_entry, i_close = universe.index.get_indexer([self.entry, self.close])\n e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes charracters listed in self.custom_chars | def _remove_custom_chars(self, text: str) -> str:
patterns = "|".join([x for x in self.custom_chars])
return re.sub(patterns, "", str(text), flags=re.IGNORECASE) | [
"def remove_special_characters(self, txt: str) -> str:",
"def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)",
"def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes strings starting with http | def _remove_urls(self, text: str) -> str:
pattern = r"http\S+"
return re.sub(pattern, " ", str(text)) | [
"def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)",
"def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)",
"def clean_http_url(s: str) -> str:\n return (\n s.replace(\"/index\", \"\")[::-1]\n .replace(\"/\", \"\", 1)[::-1]\n .re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes isolated block of digits | def _remove_digit_blocks(self, text: str) -> str:
return re.sub(r"\b\d+\b", " ", str(text)) | [
"def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")",
"def removeDigits(self,txt):\n digitRemovedText = re.sub(r'[\\d]',\"\",txt)\n return digitRemovedText",
"def remove_digits(self, text):\n return re.sub('\\d+', '', text)",
"def remove_digits(box... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes special characters as defined by the pattern in self.special_chars_pattern | def _remove_special_chars(self, text: str) -> str:
pattern = re.compile(self.special_chars_pattern)
text = re.sub(pattern, " ", text)
return text | [
"def remove_special_characters(self, txt: str) -> str:",
"def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)",
"def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text",
"def remove_sp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return data (tuple of classes, params) for a given host. | def get_host_data(hostname, gettype='walk'):
filteredNodes = Node.objects.filter(hostname=hostname)
if (filteredNodes.count() == 1):
node = filteredNodes[0]
exclusions = get_exclusions(node)
if gettype == 'work':
(classes, params) = work_tree(node, exclusions=exclusions)
return (classes, params)
elif gettype == 'optwork':
(classes, params) = optimized_work_tree(node, exclusions=exclusions)
return (classes, params)
elif gettype == 'classwork':
(classes, params) = work_tree2(node, exclusions=exclusions)
return (classes, params)
elif gettype == 'walk':
(classes, params) = walk_tree(node, exclusions=exclusions)
return (classes, params)
else:
return ({}, {}) | [
"def loadAllHostinfo():\n hidata={}\n str=\"\"\n keytypes=loadHostinfoKeys()\n keylist=sorted(hostinfo.keys())\n keylist.remove('hostname')\n for k in keylist:\n \tstr+=\" -p %s \" % k\n f=os.popen('/app/hostinfo/bin/hostinfo --noheader --csv %s' % str)\n data=f.read()\n f.close()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a node entry definition if there is no lower depth definition. Raises RuntimeError if the depth matches. | def add_entry(self, key, value, depth):
current = self.entries.get(key, None)
if current is None or current.depth > depth:
self.entries[key] = NodeEntry(key, value, depth)
elif current.depth == depth:
raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key)) | [
"def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)",
"def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds all the entries in objs at the current depth. | def add_entries(self, objs, keyname, valuename, depth):
add_entry = self.add_entry
for obj in objs:
key = getattr(obj, keyname, None)
if key is None:
continue
value = getattr(obj, valuename, None)
add_entry(key, value, depth) | [
"def _add_children(tree, git_objects, git_root):\n for line in cat_file(tree.sha, git_root, CatFileOption.PRETTY).split(\"\\n\"):\n *_, sha, name = line.strip().split()\n child = git_objects[sha]\n tree.add_child(name, child)",
"def add(self, fetchables, depth=1):\n if fetchables:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine if a sysfs_gpu_name file indicates an AMD device | def _is_amd(sysfs_gpu_name):
with open(sysfs_gpu_name) as src:
return src.read().strip() == 'amdgpu' | [
"def is_GPU_available():\n code = os.system(\"nvidia-smi\")\n return code == 0",
"def is_gpu(xpu):\n return xpu.main_device is not None\n # return 'gpu' in xpu.mode",
"def find_available_device():\r\n ids = ['h', 'i', 'j', 'k', 'l', 'm', 'n']\r\n for device_id in ids:\r\n if not... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determine the gpu index given a sysfs_gpu_name | def _amd_index(sysfs_gpu_name):
drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):]
return drop_prefix.split('/')[0] | [
"def choose_gpu():\r\n # query GPU memory and save the result in `tmp`\r\n os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\r\n # read the file `tmp` to get a gpu memory list\r\n memory_gpu = [int(x.split()[2]) for x in open('tmp','r').readlines()]\r\n log.logger.info('memory_gpu: {}... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Configures logging logging_config.json should have been placed in the directory AUTOMINE_LOG_DIR, to which this process must have read and write access | def _configure_logger():
try:
log_dir = os.environ['AUTOMINE_LOG_DIR']
log_name = _log_name()
cfg_path = os.path.join(log_dir, 'logging_config.json')
with open(cfg_path) as src:
cfg = json.load(src)
handlers = cfg.get('handlers')
for handler in iter(handlers.values()):
filename = handler.get('filename')
if filename:
filename = filename.replace('{{AUTOMINE_LOG_DIR}}',
log_dir)
filename = filename.replace('{{__name__}}', log_name)
handler['filename'] = filename
loggers = cfg.get('loggers')
if '__name__' in loggers:
loggers[log_name] = loggers.pop('__name__')
# add logging to the console if env var is set
log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ
if log_to_console and 'console' in handlers:
logger_handlers = loggers[log_name].get('handlers')
if logger_handlers:
logger_handlers.append('console')
dictConfig(cfg)
except Exception as err: # pylint: disable=broad-except
logging.basicConfig()
raise err | [
"def setup_logging():\n name_json = 'logging_config.json'\n path_json = os.path.join(os.path.dirname(__file__), name_json)\n with open(path_json, 'r') as f_json:\n dict_config = json.load(f_json)\n logging.config.dictConfig(dict_config)",
"def _configure_logs(self):\n logging.basicConfig... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Endpoint to display create item page. | def create_item_page():
catagories = [c.name for c in Catagory.fetch_all()]
return render_template('add_item.html', catagories=catagories, values={}) | [
"def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 's... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Post endpoint to create an item. If form is invalid will return create item page with errors displayed, otherwise create item and redirect to item page. | def create_item():
name = request.form['name']
catagory = request.form['catagory']
description = request.form['description']
errors = form_errors(request.form)
if errors:
catagories = [c.name for c in Catagory.fetch_all()]
values = {
'name': name, 'catagory': catagory, 'description': description
}
return render_template(
'add_item.html',
catagories=catagories,
values=values,
errors=errors
)
Item.create(name, catagory_name=catagory, description=description)
return redirect(url_for(
'read_item', catagory_name=catagory, item_name=name
)) | [
"def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # ne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Endpoint to display update item page. | def update_item_page(item_name, catagory_name):
item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)
catagories = [c.name for c in Catagory.fetch_all()]
return render_template(
'edit_item.html',
catagories=catagories,
values={
'name': item.name,
'catagory': item.catagory_name,
'description': item.description
},
) | [
"def update_item(self, item_form):\n pass",
"def editItem(id):\r\n item = Item.query.filter_by(id=id).first()\r\n\r\n # Abort if logged in user is not the owner of the page\r\n if int(current_user.get_id()) != item.owner_id:\r\n abort(403);\r\n\r\n form = EditForm(id=id, name=item.name, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return dict containing form validation errors for create / update item. | def form_errors(form):
errors = {}
max_name_length = Item.name.property.columns[0].type.length
if not form.get('name', None):
errors['name'] = 'Please enter a name.'
elif len(form['name']) > max_name_length:
errors['name'] = (
'Name must be less than %s characters.' % max_name_length
)
if not Catagory.exists(form.get('catagory', None)):
errors['catagory'] = 'Not a valid catagory.'
if not form.get('description', None):
errors['description'] = 'Please enter a description.'
return errors | [
"def compact_form_errors(form):\n errors = {}\n\n for name, validationerror in form.errors.as_data().items():\n errors[name] = [item.code for item in validationerror]\n\n return errors",
"def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Chooses a BoTorch `MarginalLogLikelihood` class using the given `Model` class. | def choose_mll_class(
model_class: Type[Model],
state_dict: Optional[Dict[str, Tensor]] = None,
refit: bool = True,
) -> Type[MarginalLogLikelihood]:
# NOTE: We currently do not support `ModelListGP`. This code block will only
# be relevant once we support `ModelListGP`.
if (state_dict is None or refit) and issubclass(model_class, ModelListGP):
return SumMarginalLogLikelihood
return ExactMarginalLogLikelihood | [
"def get_classifier(model_type = 'mlp', **kwargs):\n if model_type == 'logistic':\n model = LogisticRegression(penalty='none', **kwargs)\n elif model_type == 'gradient_boosting':\n model = HistGradientBoostingClassifier(**kwargs)\n elif model_type == 'mlp':\n model = MLPClassifier(**kwargs)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Chooses a BoTorch `AcquisitionFunction` class. | def choose_botorch_acqf_class() -> Type[AcquisitionFunction]:
# NOTE: In the future, this dispatch function could leverage any
# of the attributes of `BoTorchModel` or kwargs passed to
# `BoTorchModel.gen` to intelligently select acquisition function.
return qNoisyExpectedImprovement | [
"def _function_class(self):\n return FriCASExpectFunction",
"def run_acquisition_function(\n acquisition_function,\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n data_array,\n model_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a `TrainingData` object based on sizes of Xs, Ys, and Yvars, and the type of model, for which the training data is intended. | def construct_training_data(
Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], model_class: Type[Model]
) -> TrainingData:
if not isclass(model_class): # pragma: no cover
raise ValueError(
f"Expected `Type[Model]`, got: {model_class} "
f"(type: {type(model_class)})."
)
if len(Xs) == len(Ys) == 1:
# Just one outcome, can use single model.
return TrainingData(X=Xs[0], Y=Ys[0], Yvar=Yvars[0])
elif issubclass(model_class, BatchedMultiOutputGPyTorchModel) and all(
torch.equal(Xs[0], X) for X in Xs[1:]
):
# All Xs are the same and model supports batched multioutput.
return TrainingData(
X=Xs[0], Y=torch.cat(Ys, dim=-1), Yvar=torch.cat(Yvars, dim=-1)
)
elif model_class is ModelListGP: # pragma: no cover
# TODO: This will be case for `ListSurrogate`.
raise NotImplementedError("`ModelListGP` not yet supported.")
raise ValueError(f"Unexpected training data format for {model_class}.") | [
"def create(model_config, batch_size, num_workers=0, augmentations=None):\n path = model_config.data_dir('anon')\n\n train_dataset = Anon(path, partition='train', download=True)\n test_dataset = Anon(path, partition='test', download=True)\n\n return TrainingData(\n train_dataset,\n test_da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validates that Xs, Ys, Yvars, and metric names all have equal lengths. | def validate_data_format(
Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], metric_names: List[str]
) -> None:
if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1:
raise ValueError( # pragma: no cover
"Lengths of Xs, Ys, Yvars, and metric_names must match. Your "
f"inputs have lengths {len(Xs)}, {len(Ys)}, {len(Yvars)}, and "
f"{len(metric_names)}, respectively."
) | [
"def _validateDim(self, obj1, obj2, errors, label1='Input 1', label2='Input 2'):\n if obj1 is not None and obj2 is not None:\n d1 = obj1.getXDim()\n d2 = obj2.getXDim()\n\n if d1 is None:\n errors.append(\"Can not get dimensions from %s.\" % label1)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract acquisition and optimizer options from `model_gen_options`. | def construct_acquisition_and_optimizer_options(
acqf_options: TConfig, model_gen_options: Optional[TConfig] = None
) -> Tuple[TConfig, TConfig]:
acq_options = acqf_options.copy()
opt_options = {}
if model_gen_options:
acq_options.update(
checked_cast(dict, model_gen_options.get(Keys.ACQF_KWARGS, {}))
)
# TODO: Add this if all acq. functions accept the `subset_model`
# kwarg or opt for kwarg filtering.
# acq_options[SUBSET_MODEL] = model_gen_options.get(SUBSET_MODEL)
opt_options = checked_cast(
dict, model_gen_options.get(Keys.OPTIMIZER_KWARGS, {})
).copy()
return acq_options, opt_options | [
"def get_optimizer_experimental_options():\n return context.context().get_optimizer_experimental_options()",
"def get_image_generator_from_options(options):\n #if options.dataset == 'robonet':\n if options.feature_extractor==\"spxl_segmenter\":\n params = {'reduce_features': True, 'small_reduction':... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the hash digest as a bytes object. This is the bigendian representation of the value returned by ``intdigest()`` and is equivalent to the output of the ``XXH64_canonicalFromHash()`` function in the `reference implementation`_ applied to the value returned by ``intdigest()``. | def digest(self):
# For discussion of big-endian vs little-endian for the hash
# digest of XXHASH algorithms, see
# https://github.com/Cyan4973/xxHash/issues/45
return struct.pack(">Q", self.intdigest()) | [
"def hexdigest(self):\n\n d = map(None, self.digest())\n d = map(ord, d)\n d = map(lambda x:\"%02x\" % x, d)\n d = ''.join(d)\n\n return d",
"def digest(self):\n return self._hash",
"def bytes_from_string_digest(cls, hash_string: str) -> bytes:\n pass",
"def di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the hash digest as a string of hexidecimal digits. This is the value returned by ``digest()`` expressed as a printable hex string for easy display. | def hexdigest(self):
# bytes.hex() is simpler, but not available For Python <= 3.4
return "".join("{0:0>2x}".format(b) for b in self.digest()) | [
"def hexdigest(self):\n\n d = map(None, self.digest())\n d = map(ord, d)\n d = map(lambda x:\"%02x\" % x, d)\n d = ''.join(d)\n\n return d",
"def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])",
"def hexdigest(self):\n return '%08x%... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the specified instance matches the service's model. | def _isinstance(self, instance, raise_error=True):
if isinstance(instance, self.__model__):
return True
elif raise_error:
raise ValueError('{} is not of type {}.'.format(
instance, self.__model__,
))
else:
return False | [
"def is_instance_of_model(obj, model):\r\n return (isinstance(obj, model)\r\n or type(obj) is model\r\n or model in obj.__class__.__bases__)",
"def checkModel(self, model):\n # TODO",
"def instance_exists(self, instance):\n pass",
"def is_model_instance(self):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts the provided integer 'n' into a valid insertion point in the string 's', ie the current index locations or at the end | def gen_index_via_mod(s, n):
if len(s) == 0:
return 0
return n % (len(s) + 1) | [
"def insert_newlines(s, n):\n \n i = n\n while i<len(s):\n s.insert(i, '\\n')\n i += n+2",
"def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first",
"def insert(t, n):\n return t[:-1] + (n, t[-1])",
"def esrever2(n, s):\n if n == 0:\n return s\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets all announcements on the server | def get(self):
announcements = Announcement.query.all()
announcements = announcements_schema.dump(announcements)
if not announcements:
return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served
return {'status': 'success', 'announcements': announcements}, 200 | [
"def do_list(client):\n response = client.cmd('announce/list').json()\n lines = []\n for i, announcement in enumerate(response):\n lines.append(\"[{}] {}\".format(i, announcement['text']))\n print '\\n'.join(lines)",
"def getServiceAnnouncements():\r\n soapheader['SOAPAction'] = '/getService... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
delete a announcement by ID | def delete(self, announcementID):
announcement = Announcement.query.filter_by(announcementID=announcementID)
if not announcement.first():
return {'status': 'fail', 'message': 'No announcement with ID ' + str(announcementID) + ' exists'}, 404
announcement.delete()
db.session.commit()
return {'status': 'sucess', 'message': 'Announcement Deleted'}, 200 | [
"def delete(self, _id):",
"def delete(self,note_id):",
"def delete(self, id):\n delete_entry(id)\n return None, 204",
"def deleteEntry(entry_id):",
"def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that converts category name to Python module name Eg. rwgeneric to RwGenericYang | def get_module_name_from_log_category(log_category):
words = log_category.split('-')
words.append('yang')
return ''.join(word.capitalize() for word in words) | [
"def normalize_module_name(layer_name):\n modules = layer_name.split('.')\n try:\n idx = modules.index('module')\n except ValueError:\n return layer_name\n del modules[idx]\n return '.'.join(modules)",
"def get_prettified_module_name(module_name: str):\n module_name = module_name.l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set Log category name to be used. | def set_category(self, category_name):
try:
module_name = get_module_name_from_log_category(category_name)
log_yang_module = importlib.import_module('gi.repository.' + module_name)
if not log_yang_module:
logger.error("Module %s is not found to be added as log category for %s", module_name, category_name)
print("Module %s is not found to be added as log category for %s", module_name, category_name)
return
for level in RwLogger.level_event_cls_map.values():
if not hasattr(log_yang_module, level):
logger.error("Module %s does not have required log notification for %s", module_name, level)
print("Module %s does not have required log notification for %s", module_name, level)
return
self._log_yang_module = log_yang_module
self._log_category_name = category_name
except Exception as e:
logger.exception("Caught error %s when trying to set log category (%s)",repr(e), category_name) | [
"def set_category(self, category_name):\n if category_name == self._category:\n return\n\n try:\n module_name = get_module_name_from_log_category(category_name)\n\n gi.require_version(module_name, '1.0')\n log_yang_module = importlib.import_module('gi.reposi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Permet de trouver la position du pion dans la grille du morpion en fonction de l'endroit où le joueur a cliqué. | def trouve_position(self, x, y):
xpos = 0
ypos = 0
# xpos est la position en x dans la grille
if (x > 200) and (x < (self.width - 400) / 4 + 200):
xpos = 1
if (x > (self.width - 400) / 3 + 200) and (x < (self.width - 400) * 2 / 3 + 200):
xpos = 2
if (x > (self.width - 400) * 2 / 3 + 200) and (x < self.width - 200):
xpos = 3
# ypos est la position en y dans la grille
if (y > 0) and (y < self.height / 4):
ypos = 3
if (y > self.height / 4) and (y < self.height / 2):
ypos = 2
if (y > self.height / 2) and (y < self.height * 3 / 4):
ypos = 1
return xpos, ypos | [
"def posAnteior(self):\n # # BUG: NÃO TA FAZENDO MUDANÇA DE POSIÇÃO\n linha = self.posicao[0]\n coluna = self.posicao[1]\n Arena.ponto(linha, coluna)",
"def position_of(self, p: P2) -> int:\n p0 = self.start_point # 直线段起点\n d = self.direct # 直线方向\n\n p0_t0_p: P2 ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests whether ``TextInputStyle`` instance values are all the expected value type. | def test__TextInputStyle__value():
for instance in TextInputStyle.INSTANCES.values():
vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE) | [
"def _isvalid(self, attr_values):\n attr_types = attrs(self.model)\n value_types = {a: v.__class__ for a, v in attr_values.items()}\n\n for attr, value_type in value_types.items():\n if value_type is not attr_types[attr]:\n msg = \"%s value should be type %s not %s\"\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that example.com was in the dashboard. | def test_link_list(self):
response = self.client.get('/tests/dashboard/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "example.com") | [
"def test_dashboard_page(self):\r\n\r\n result = self.client.get(\"/dashboard\", follow_redirects = True)\r\n self.assertNotIn(b\"Family Ties - Dashboard\", result.data)",
"def test_dashboards_v2_show(self):\n pass",
"def test_analytics_id(self):\n response = self.client.get(reverse(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that the admin list found the User and Group admins | def test_admin_list(self):
response = self.client.get('/tests/dashboard/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="/admin/auth/group/">Group</a>', html=True)
self.assertContains(response, '<a href="/admin/auth/user/">User</a>', html=True) | [
"def list_admin() -> None:\n admin_users = list(User.objects(admin=True).scalar('email'))\n if admin_users:\n echo('Allowed admins are')\n for email in admin_users:\n echo('- %s' % email)\n else:\n echo('No admins found')\n\n users = list(User.objects(admin=False).scalar(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Backup the git refs. | def backup_ref(self):
# Back ourselves up!
backup_ref="refs/backups/{0}-{1}-{2}".format(self.ref_type, self.ref_name, int( time.time() ))
command = ("git", "update-ref", backup_ref, self.old_sha1)
process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) | [
"def backup_repo(self):\n self.log.info('Initialized backup of repo.')\n try:\n dest = os.path.join(self.backup_dir, '%s.git' % (self.project))\n local('rsync -avz /var/git/projects/%s/ %s' % (self.project, dest))\n except:\n self.log.exception('Backing up the r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Whether the audit failed (True) or passed (False). | def audit_failed(self):
return self.__failed | [
"def is_failed(self):\n return self.status.value and self.status.value.upper() == \"FAILED\"",
"def hasFailed(self):\n record = self.getRunRecord().getRecord(\"run\")\n return record.state is FAIL",
"def is_fail(self):\n return self.command == CommandResponse.fail.value",
"def fail... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character. | def audit_eol(self):
# Regex's....
re_commit = re.compile("^\xff(.+)\xff$")
re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$")
blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$")
# Bool to allow special files such as vcards to bypass the check
eol_allowed = False
# Do EOL audit!
process = get_change_diff( self.repository, ["-p"] )
for line in process.stdout:
commit_change = re.match( re_commit, line )
if commit_change:
commit = commit_change.group(1)
continue
file_change = re.match( re_filename, line )
if file_change:
filename = file_change.group(2)
eol_violation = False
eol_allowed = False
# Check if it's an allowed mimetype
# First - check with the mimetypes system, to see if it can tell
guessed_type, _ = mimetypes.guess_type(filename)
if guessed_type in self.ALLOWED_EOL_MIMETYPES:
eol_allowed = True
continue
# Second check: by file extension
# NOTE: This uses the FIRST dot as extension
splitted_filename = filename.split(os.extsep)
# Check if there's an extension or not
# NOTE This assumes that files use dots for extensions only!
if len(splitted_filename) > 1:
extension = splitted_filename[1]
if extension in self.ALLOWED_EOL_EXTENSIONS:
eol_allowed = True
continue
# Unless they added it, ignore it
if not line.startswith("+"):
continue
if re.search( blocked_eol, line ) and not eol_violation:
# Is this an allowed filename?
if eol_allowed:
continue
# Failure has been found... handle it
eol_violation = True
self.__log_failure(commit, "End of Line Style (non-Unix): " + filename); | [
"def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n col_commit = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Audit the file names in the commit. | def audit_filename(self):
for commit in self.repository.commits.values():
for filename in commit.files_changed:
if commit.files_changed[ filename ]["change"] not in ["A","R","C"]:
continue
for restriction in self.filename_limits:
if re.search(restriction, filename):
self.__log_failure(commit.sha1, "Invalid filename: " + filename) | [
"def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |