query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
A view to add a new portfolio project
def add_project(request): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): project = form.save() messages.success(request, 'Project added successfully!') return redirect(reverse('portfolio')) else: messages.error(request, 'Failed to add project.\ # Please ensure the form is valid') else: form = ProjectForm() form = ProjectForm() template = 'portfolio/add_project.html' context = { 'form': form, } return render(request, template, context)
[ "def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to edit a portfolio project
def edit_project(request, project_id): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) project = get_object_or_404(Project, pk=project_id) if request.method == 'POST': form = ProjectForm(request.POST, request.FILES, instance=project) if form.is_valid(): form.save() messages.success(request, 'Successfully updated project') return redirect(reverse('portfolio')) else: messages.error(request, 'Failed to update project. \ # Please ensure the form is valid.') else: form = ProjectForm(instance=project) messages.info(request, f'You are editing {project.name}') template = 'portfolio/edit_project.html' context = { 'form': form, 'project': project, } return render(request, template, context)
[ "def project_edit(request, project_slug):\n project = get_object_or_404(request.user.projects.live(), slug=project_slug)\n\n if project.is_imported:\n form_class = ImportProjectForm\n else:\n form_class = CreateProjectForm\n\n form = form_class(instance=project, data=request.POST or None)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A view to delete a project from the portfolio
def delete_project(request, project_id): if not request.user.is_superuser: messages.error(request, 'Sorry, only store owners can do that.') return redirect(reverse('home')) project = get_object_or_404(Project, pk=project_id) project.delete() messages.success(request, 'Project deleted!') return redirect(reverse('portfolio'))
[ "def delete_project(request, project):\n\n project = get_object_or_404(Project, id=project, user=request.user)\n if request.method == \"POST\":\n project.delete()\n return redirect(\"/projects/\")\n return render(request, \"delete-project.html\", {\"project\": project})", "def delete_projec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a single database
def find_database(self, name_or_id, instance, ignore_missing=True): instance = self._get_resource(_instance.Instance, instance) return self._find( _database.Database, name_or_id, instance_id=instance.id, ignore_missing=ignore_missing, )
[ "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)", "def database():\n return _d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a single database
def get_database(self, database, instance=None): return self._get(_database.Database, database)
[ "def database():\n return _databases[_active_db]", "def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)", "def select_db(cli, dbname):\n db = cli[dbname]\n return db", "def get_database(self, name):\n try:\n return [db for db in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a single flavor
def get_flavor(self, flavor): return self._get(_flavor.Flavor, flavor)
[ "def get(self, flavor):\n return self._get(\"/flavors/%s\" % base.getid(flavor), \"flavor\")", "def get_flavor(name):\r\n return nova.flavors.find(name=name)", "def flavor(self, name=None):\n return self.find(self.flavors(), name=name)", "def get_flavor(self, flavor_id):\n return self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a generator of flavors
def flavors(self, **query): return self._list(_flavor.Flavor, **query)
[ "def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))", "def show_flavors():\n return get_flavors()", "def describe_flavors(self):\r\n print(\"\\nFlavors of ice cream: \")\r\n for flavor in self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a single instance
def find_instance(self, name_or_id, ignore_missing=True): return self._find( _instance.Instance, name_or_id, ignore_missing=ignore_missing )
[ "def find_instance(cls, identifier):\r\n for instance in cls.all:\r\n if instance.identifier == identifier:\r\n return instance\r\n return None", "def get_instance_by_name(self, name):\n try:\n data = self.get_instances()\n if \"errors\" in data...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a single instance
def get_instance(self, instance): return self._get(_instance.Instance, instance)
[ "def _get_instance(self, id):\n if id not in self._instances:\n self._instances[id] = self._load_constructor(id)\n\n return self._instances[id]", "def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance", "def get(cls):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a user to 'prospects' unless the user is the campaign owner or is already linked to 'workers', 'prospects', or 'blacklist'. Also decline to add prospects when the campaign is not active. user A TcsUser instance to link to 'prospects'
def addProspect(self, user): if self.is_active and (user != self.owner) and not self.prospects.filter(pk=user.id).exists() \ and not self.workers.filter(pk=user.id) and not self.blacklist.filter(pk=user.id).exists(): self.prospects.add(user) return self return None
[ "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the user from the lists of workers and prospects, if applicable, and add the user to the blacklist. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to the blacklist
def addToBlacklist(self, user): if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists(): self.blacklist.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self return None
[ "async def blacklist(\n self, ctx: commands.Context, user: Union[ConvertUserAPI, discord.Member] = None\n ):\n if user is None:\n return await ctx.send_help()\n\n guild = ctx.guild\n async with self.config.guild(guild).blacklist() as data:\n if user.id not in dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the user from 'prospects' and 'blacklist', if applicable, and add the user to 'workers'. Note that adding somebody as a worker removes the person from the blacklist. user A TcsUser instance to link to workers
def addWorker(self, user): if (user != self.owner) and not self.workers.filter(pk=user.id).exists(): self.workers.add(user) if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) if self.blacklist.filter(pk=user.id).exists(): self.blacklist.remove(user) return self return None
[ "def addToBlacklist(self, user):\n if (user != self.owner) and not self.blacklist.filter(pk=user.id).exists():\n self.blacklist.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.workers.filter(pk=user.id).exists(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return active constituent voters who have not been contacted since the last election and have not been served to a supporter in the last two days. Don't limit the size of the result set here; let APIs do that.
def getVotersToContact(self): two_days_ago = date.today() - timedelta(2) year_ago = date.today() - timedelta(365) return self.voters.filter( Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago), Q(campaignstovoters__last_contacted=None) | Q(campaignstovoters__last_contacted__lt=year_ago), campaignstovoters__is_active=True, is_active=True)
[ "def get_past_incidents(self):\n return Incident.objects.filter(services__in=self,\n end_date__lte=datetime.now())", "def _inactiveplayers():\n\n rosters = _activerosters()\n dbrosters = _eidset() # players not in rosters scrape but in db.\n notactive = dbros...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return active constituent voters with valid phone contact information who have not been contacted since the last election. Don't limit the size of the result set here; let APIs do that.
def getVotersToDial(self): return self.getVotersToContact().exclude( (Q(phone_number1='') | Q(wrong_phone_number1__gt=1)), (Q(phone_number2='') | Q(wrong_phone_number2__gt=1)))
[ "def getVotersToContact(self):\n two_days_ago = date.today() - timedelta(2)\n year_ago = date.today() - timedelta(365)\n return self.voters.filter(\n Q(campaignstovoters__last_served=None) | Q(campaignstovoters__last_served__lt=two_days_ago),\n Q(campaignstovoters__last_co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers
def removeWorker(self, user): if user == self.owner: return None # Without these queries, there's no way to tell if anything actually gets removed. # Calling remove() on a user that is not in the set does not raise an error. if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) return self return None
[ "def addWorker(self, user):\n if (user != self.owner) and not self.workers.filter(pk=user.id).exists():\n self.workers.add(user)\n if self.prospects.filter(pk=user.id).exists():\n self.prospects.remove(user)\n if self.blacklist.filter(pk=user.id).exists():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of voters a user has contacted for the campaign.
def voterContactCount(self, user): return self.votercontact_set.filter(user=user).count()
[ "def number_of_volunteers(self):\n return self._number_of_volunteers", "def get_number_of_ver_sponsors(model):\n n_agents = len([k for k, v in model.schedule.agents_by_type['Customer'].items() if v.__class__.__name__ == 'VerificationSponsor'])\n return n_agents", "def nay_voter_cnt(self):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an indented representation of the nested dictionary.
def pretty_repr(self, num_spaces=4): def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return f'FrozenDict({pretty_dict(self._dict)})'
[ "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n outp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new FrozenDict with additional or replaced entries.
def copy( self, add_or_replace: Mapping[K, V] = MappingProxyType({}) ) -> 'FrozenDict[K, V]': return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]
[ "def fromkeys(iterable, value=None):\n return FrozenDict(dict.fromkeys(iterable, value))", "def copy(\n x: Union[FrozenDict, Dict[str, Any]],\n add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict(\n {}\n ),\n) -> Union[FrozenDict, Dict[str, Any]]:\n\n if isinstance(x, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deep copy unfrozen dicts to make the dictionary FrozenDict safe.
def _prepare_freeze(xs: Any) -> Any: if isinstance(xs, FrozenDict): # we can safely ref share the internal state of a FrozenDict # because it is immutable. return xs._dict # pylint: disable=protected-access if not isinstance(xs, dict): # return a leaf as is. return xs # recursively copy dictionary to avoid ref sharing return {key: _prepare_freeze(val) for key, val in xs.items()}
[ "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util.tree_map\n # uses an optimized C implementation.\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict.
def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursive...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new dict with additional and/or replaced entries. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.copy`.
def copy( x: Union[FrozenDict, Dict[str, Any]], add_or_replace: Union[FrozenDict[str, Any], Dict[str, Any]] = FrozenDict( {} ), ) -> Union[FrozenDict, Dict[str, Any]]: if isinstance(x, FrozenDict): return x.copy(add_or_replace) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x new_dict.update(add_or_replace) return new_dict raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
[ "def copy(\n self, add_or_replace: Mapping[K, V] = MappingProxyType({})\n ) -> 'FrozenDict[K, V]':\n return type(self)({**self, **unfreeze(add_or_replace)}) # type: ignore[arg-type]", "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal stat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new dict where one entry is removed. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pop`.
def pop( x: Union[FrozenDict, Dict[str, Any]], key: str ) -> Tuple[Union[FrozenDict, Dict[str, Any]], Any]: if isinstance(x, FrozenDict): return x.pop(key) elif isinstance(x, dict): new_dict = jax.tree_map(lambda x: x, x) # make a deep copy of dict x value = new_dict.pop(key) return new_dict, value raise TypeError(f'Expected FrozenDict or dict, got {type(x)}')
[ "def dictRemove(d, e):\n if e in d:\n d.pop(e)", "def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]:\n if isinstance(x, FrozenDict):\n # deep copy internal state of a FrozenDict\n # the dict branch would also work here but\n # it is much less performant because jax.tree_util...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an indented representation of the nested dictionary. This is a utility function that can act on either a FrozenDict or regular dict and mimics the behavior of `FrozenDict.pretty_repr`. If x is any other dtype, this function will return `repr(x)`.
def pretty_repr(x: Any, num_spaces: int = 4) -> str: if isinstance(x, FrozenDict): return x.pretty_repr() else: def pretty_dict(x): if not isinstance(x, dict): return repr(x) rep = '' for key, val in x.items(): rep += f'{key}: {pretty_dict(val)},\n' if rep: return '{\n' + _indent(rep, num_spaces) + '}' else: return '{}' return pretty_dict(x)
[ "def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a subset of the COCO dataset.
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None, class_names=None, class_map=None, return_coco=False, auto_download=False): if auto_download is True: self.auto_download(dataset_dir, subset, year) coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year)) if subset == "minival" or subset == "valminusminival": subset = "val" image_dir = "{}/{}{}".format(dataset_dir, subset, year) # Select class_ids from class_names: if class_names: class_ids = sorted(coco.getCatIds(catNms=class_names)) # Load all classes or a subset? if not class_ids: # All classes class_ids = sorted(coco.getCatIds()) # All images or a subset? if class_ids: image_ids = [] for id in class_ids: imgs = [] # list of images to add to image_ids # Select at most COCO_IMAGES_PER_OBJECT and select only the images # that have at most COCO_MAX_NUM_MASK_PER_IMAGE masks inside them: for imgid in list(coco.getImgIds(catIds=[id])): if len(imgs) >= COCO_IMAGES_PER_OBJECT: break if len(coco.loadAnns(coco.getAnnIds(imgIds=[imgid], catIds=class_ids, iscrowd=None))) <= COCO_MAX_NUM_MASK_PER_IMAGE: imgs.append(imgid) image_ids.extend(imgs) #image_ids.extend(list(coco.getImgIds(catIds=[id]))[:COCO_IMAGES_PER_OBJECT]) # Remove duplicates image_ids = list(set(image_ids)) else: # All images image_ids = list(coco.imgs.keys()) # Add classes for i in class_ids: self.add_class("coco", i, coco.loadCats(i)[0]["name"]) # Add images for i in image_ids: #print(len(coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None)))) self.add_image( "coco", image_id=i, path=os.path.join(image_dir, coco.imgs[i]['file_name']), width=coco.imgs[i]["width"], height=coco.imgs[i]["height"], annotations=coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=None))) if return_coco: return coco
[ "def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,\n class_map=None, return_coco=False):\n\n coco = COCO(\"{}/annotations/instances_{}{}.json\".format(dataset_dir, subset, year))\n if subset == \"minival\" or subset == \"valminusminival\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates this store's current state with incoming data from the network. data should be a mapping containing 'metacontacts', 'order', and 'info' structures (see comment at top of file)
def update_data(self, data): rebuild = False # This method needs to substitute some defaultdicts for the normal # dictionaries that come back from the server. # Metacontact information #if data['metacontacts'] mc_dict = data.get('metacontacts', {}) if not isinstance(mc_dict, dict): log.critical('invalid metacontacts dictionary') mc_dict = {} # Contact information like SMS numbers and email addresses. self.info = defaultdict(dict) si = self.info if 'info' in data: for (k, v) in data['info'].iteritems(): if isinstance(k, str): cmpk = k.decode('utf8') else: cmpk = k if not isinstance(cmpk, unicode): continue if cmpk.startswith('Meta') or any((cmpk.endswith('_' + prot) for prot in protocols.iterkeys())): if any(v.values()): si[k] = v for c, v in si.iteritems(): for attr in ('email', 'sms'): if attr in v: self.contact_info_changed(c, attr, v[attr]) self.metacontacts = MetaContactManager(self, mc_dict) if hasattr(self, 'new_sorter'): on_thread('sorter').call(self.new_sorter.removeAllContacts) rebuild = True # Manual ordering of groups try: self.order = deepcopy(data['order']) self.order['groups'] = list(oset(self.order['groups'])) contacts = self._filtered_contacts() self.order['contacts'] = defaultdict(list) self.order['contacts'].update(contacts) except Exception: log.critical('error receiving order') self._init_order() # note: loading tofrom data from the network is deprecated. this data # now goes out to disk. see save/load_local_data if 'tofrom' in data and isinstance(data['tofrom'], dict) and \ 'im' in data['tofrom'] and 'email' in data['tofrom']: self.dispatch.set_tofrom(deepcopy(data['tofrom'])) if rebuild: self.rebuild() self.update_order()
[ "def update(self, data):\n\n\t\tself.data = data\n\t\tself.last_update = time.time()", "def update_data():\n pass", "def set_observed_data(self, data):\n self.state_space.data = data", "def update_data(self):\n pass", "def _update_model(self, new_state, data):", "def _update_data(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Error in label only whitespace allowed, no tabs if checked label differs, raise an error
def CheckLabel(Line): for i in Line: if i == '\t': #can't detect leading tabs, stops at the first \ raise InputError(Line,"malformed input") elif i != ' ': break
[ "def test_label(self):\n nt = NewickTokenizer(newick=\"(a\\n'b',(b,c),(d,e));\")\n self.assertRaises(ValueError, nt.tokens)", "def test_ignoring_label_to_string(self):\n code = \"\"\"nop\n label:\n nop\"\"\"\n interpreter = Interpreter(code)\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parsing a given text file containing labels and sequences load file, tidy it, process each line in the file return the labels and sequences as list[tuple(string,string)]
def ParseSeqFile(FilePath): SeqFile = rSeqFile(FilePath) TidyFile = TidyLines(SeqFile) result = [] for line in TidyFile: t = ( ProcessLine(line) ) result.append(t) return(result)
[ "def parse(self):\n\n with open(self.fasta_file) as file:\n content = file.readlines()\n\n sequences = []\n sequence_ids = []\n sequence = []\n for line in content:\n if line.startswith('>'):\n sequence_ids.append(line.strip())\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return 'p1' if the current player is Player 1, and 'p2' if the current player is Player 2.
def get_current_player_name(self) -> str: if self.p1_turn: return 'p1' return 'p2'
[ "def opponent(self):\n if self.player() == 'p1':\n return 'p2'\n else:\n return 'p1'", "def other_player(player):\n if player == PLAYER_ONE:\n return PLAYER_TWO\n elif player == PLAYER_TWO:\n return PLAYER_ONE\n else:\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether move is a valid move for this GameState.
def is_valid_move(self, move: Any) -> bool: return move in self.get_possible_moves()
[ "def has_valid_move(self) -> bool:\r\n\t\tis_valid_move = False\r\n\t\tfor row in range(self._othello_game.get_rows()):\r\n\t\t\tif is_valid_move:\r\n\t\t\t\tbreak\r\n\r\n\t\t\tfor col in range(self._othello_game.get_cols()):\r\n\r\n\t\t\t\tif len(self._othello_game.placement_is_valid(row, col)) > 0: #if the move i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an estimate in interval [LOSE, WIN] of best outcome the current player can guarantee from state self.
def rough_outcome(self) -> float: # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE # pick move based on this may not be optimal but better than random # return 1 if win immediately # return -1 if all states reachable will result the other player win # return 0 if otherwise ??? what the fuck does this mean # look two states forward pass
[ "def rough_outcome(self) -> float:\n if is_win(self):\n return 1\n elif is_lose(self):\n return -1\n return 0", "def rough_outcome_strategy(game: 'Game') -> Any:\n current_state = game.current_state\n best_move = None\n best_outcome = -2 # Temporarily -- just so...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set common fields in layer to addressing dictonary.
def set_address_values(layer): cursor = arcpy.SearchCursor(layer) for row in cursor: layer_fields = arcpy.ListFields(layer) for x in range(len(layer_fields)): layer_fields[x] = layer_fields[x].name for key in address_dict: if key in layer_fields and address_dict.get(key) is None: address_dict[key] = row.getValue(key)
[ "def _update_layer_fields(layer, field_mapping):\r\n\r\n if 'layerDefinition' in layer and layer['layerDefinition'] is not None:\r\n layer_definition = layer['layerDefinition']\r\n \r\n if 'definitionExpression' in layer_definition and layer_definition['definitionExpression'] is not None...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get AWS ECS task information. For the puspose of getting the EC2 instance id by a given AWS ECS task name, for now, only the 'containerInstanceArn' is fetched from the AWS ECS task.
def get_tasks_information( task: str, list_tasks: str, cluster=CLUSTER_NAME, client=None, region=REGION, ): if not client: session = boto3.session.Session() client = session.client("ecs", region) try: # Get all tasks in the cluster. cluster_tasks = client.list_tasks(cluster=cluster)["taskArns"] logger.debug(f"[CLUSTERTASKS]: '{cluster_tasks}'.") tasks = client.describe_tasks(cluster=cluster, tasks=cluster_tasks)[ "tasks" ] logger.debug(f"[TASKS]: '{tasks}'.") # Filter for given task name. # Get instance id, container_instances = [] task_name = "" for task_ in tasks: task_definition = task_.get("taskDefinitionArn", "") if list_tasks: container_instances.append(task_definition) continue container_instance_arn = task_.get("containerInstanceArn", None) if container_instance_arn: if not list_tasks: if re.search(task, task_definition): container_instances.append(container_instance_arn) task_name = task_definition break else: container_instances.append(container_instance_arn) if list_tasks: return "\n".join(container_instances) instances = describe_instances_with_cluster( container_instances=container_instances, cluster=cluster, client=client, region=region, ) if not instances: return "" logger.info(f"Instance '{instances[0]}' runs task '{task_name}'.") return instances[0] except (botocore.exceptions.ClientError) as e: # TODO: Check right error code. if e.response["Error"]["Code"] == "ClusterNotFoundException": logger.error(f"Cluster '{cluster}' not found: {str(e)}.") else: logger.error(f"Error: {str(e)}") sys.exit(1)
[ "def get(profile, cluster, tasks):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"tasks\"] = tasks\n return client.describe_tasks(**params)", "def get_ec2_instances(self, task):\n # Assemble arguments to filter ec2 instances by task and ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Geeft bericht of iemand lang genoeg is voor de attractie.
def lang_genoeg(lengte): return
[ "def substituer(texte): # Donne une vague idée mais pas efficace, mal codé\r\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\n texte_lettre_only = []\r\n for car in texte:\r\n if car in alphabet:\r\n texte_lettre_only.append(car)\r\n nouveau_texte = list(texte)\r\n j = 0\r\n alpha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a Pseudocode Operation at the actual active buffer.
def AddPseudoCode(self, pcode): self.buffers[self.buffergrade].append(pcode)
[ "def add_code(self, code):\n self.code += code", "def add_operation(self):\n arg1 = self.memory[self.memory[self._cursor + 1]]\n arg2 = self.memory[self.memory[self._cursor + 2]]\n arg3 = self.memory[self._cursor + 3]\n self.memory[arg3] = arg1 + arg2\n # print(f'Cursor: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Increment the BufferGrade and initialize a new empty buffer.
def IndentBuffer(self): self.buffergrade += 1 self.buffers[self.buffergrade] = []
[ "def set_garbage(self):\n self.grade = 0", "def new_grade(self, value):\r\n self.logger.warn(\"Setting values on new_grade will NOT update the remote Canvas instance.\")\r\n self._new_grade = value", "def update(self, grade):\n for i, component_grade in enumerate(grade._component_gra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrement the BufferGrade and pop out the buffer active before.
def DeIndentBuffer(self): if self.buffergrade == 0: raise Exception("You can't deindent more.") self.buffergrade -= 1 tmp = self.buffers[self.buffergrade + 1] del self.buffers[self.buffergrade + 1] return tmp
[ "def decrease_grade(self):\n if self.grade > 0:\n self.grade -= 1", "def decrease(self):\n self.score -= self.score", "def hit_decrement(self):\n self.hit -= 1", "def decrement(self):\n self.count -= 1", "def decrement(self):\n self.data[self.pointer] -= 1\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a reference to the actual buffer activated.
def RefBuffer(self): return self.buffers[self.buffergrade]
[ "def buffer(self):\n return self.buffer_dict.get_active()", "def get_buffer(self):\n return self.buffer", "def current_buffer(self):\n return self.layout.current_buffer", "def buffer_backend(cls, *args, **kwargs):\n return cls._buffer_context", "def current_buffer_app(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Track a code indentation index for successive utilization.
def TrackIfIndex(self, index): self.indentindex.append(index)
[ "def increase_code_indent(self) -> None:\n self._parent_node.increase_code_indent()", "def _increaseindentation(self):\n self._indentlist.append(self._curindent)\n if not self._equalsigns[-1]:\n self._curindent = self._curindent + self._indent", "def addIndent(self):\r\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pop (get and remove) the last code indentation index tracked.
def PopIfIndex(self): return self.indentindex.pop()
[ "def _decreaseindentation(self):\n self._curindent = self._indentlist.pop()", "def pop(self, i=0):\n return self.frame.stack.pop(-1-i)", "def pop_scope(self):\n top = self.scope_stack[-1]\n self.scope_stack = self.scope_stack[:-1]\n return top", "def pop_current_line(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialization of protected Operation Object attribute for subclasses.
def __init__(self): self._OPERATION = None
[ "def __init__(self, **kwargs):\n super(StoredObject, self).__init__()", "def __init__(self):\n Calculation.__init__(self)\n\n self._calculation_process_type = BasicCalculationProcess", "def __init__(self, operations = []):\n self.operations = operations", "def __init__(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Operation Object generated by the command.
def getOp(self): return self._OPERATION
[ "def get_operation(self) -> Operation:\n\n if self.operation_type == 'model':\n operation = Model(operation_type=self.operation_name)\n elif self.operation_type == 'data_operation':\n operation = DataOperation(operation_type=self.operation_name)\n else:\n raise ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a temporary image for manipulation, and handles optional RGB conversion.
def _create_tmp_image(self, content): content.seek(0) image = Image.open(content) if self.force_rgb and image.mode not in ('L', 'RGB', 'RGBA'): image = image.convert('RGB') return image
[ "def temporary_image(self):\n\n image = Image.new('RGB', (1, 1))\n tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')\n image.save(tmp_file, 'jpeg')\n # important because after save(),\n # the fp is already at the end of the file\n tmp_file.seek(0) # retrieves the creat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renders the image. Override this method when creating a custom renderer.
def _render(self, image): raise NotImplementedError('Override this method to render images!')
[ "def render_canvas(self):\r\n self._display.image(self._image)\r\n self._display.display()", "def render(self):\n self.dirty = True\n self.image.fill(self.fill_color)\n if len(self.text):\n while self.font.size(self.text)[0]> self.pixel_width:\n self.ba...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize, pad and batch the input images.
def preprocess_image(self, batched_inputs): images = [x.to(self.device) for x in batched_inputs] norms = [self.normalizer(x) for x in images] size = (norms[0].shape[1],norms[0].shape[2]) images = ImageList.from_tensors(norms, self.backbone.size_divisibility) return images, size
[ "def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that apigateway v1 and apigateway v2 actions are both present in the ses namespace
def test_services_with_multiple_pages_apigateway(self): # API Gateway Management V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html self.assertTrue("apigateway:AddCertificateToDomain" in self.all_actions) self.assertTrue("apigateway:RemoveCertificateFromDomain" in self.all_actions) self.assertTrue("apigateway:SetWebACL" in self.all_actions) # API Gateway Management V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonapigatewaymanagement.html # API Gateway V2 doesn't have any unique actions in but it does have some unique resource types. Let's make sure those resource types are in the IAM Definition. # Resource types unique to API Gateway V2: resource_types = get_arn_types_for_service("apigateway") resource_types = list(resource_types.keys()) self.assertTrue("AccessLogSettings" in resource_types) # Resource types unique to API Gateway V1: self.assertTrue("RestApi" in resource_types)
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that awsmarketplace actions from all the different awsmarketplace SAR pages are present in the IAM definition.
def test_services_with_multiple_pages_aws_marketplace(self): # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html self.assertTrue("aws-marketplace:AcceptAgreementApprovalRequest" in self.all_actions) # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html self.assertTrue("aws-marketplace:CancelChangeSet" in self.all_actions) # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html self.assertTrue("aws-marketplace:GetEntitlements" in self.all_actions) # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html self.assertTrue("aws-marketplace:DescribeBuilds" in self.all_actions) # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html self.assertTrue("aws-marketplace:BatchMeterUsage" in self.all_actions) # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html self.assertTrue("aws-marketplace:AssociateProductsWithPrivateMarketplace" in self.all_actions) # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html self.assertTrue("aws-marketplace:DescribeProcurementSystemConfiguration" in self.all_actions) results = get_actions_for_service("aws-marketplace") actions = [ "aws-marketplace:AcceptAgreementApprovalRequest", "aws-marketplace:BatchMeterUsage", "aws-marketplace:CancelAgreementRequest", "aws-marketplace:CancelChangeSet", "aws-marketplace:CompleteTask", "aws-marketplace:DescribeAgreement", "aws-marketplace:DescribeBuilds", "aws-marketplace:DescribeChangeSet", "aws-marketplace:DescribeEntity", "aws-marketplace:DescribeProcurementSystemConfiguration", "aws-marketplace:DescribeTask", "aws-marketplace:GetAgreementApprovalRequest", "aws-marketplace:GetAgreementRequest", "aws-marketplace:GetAgreementTerms", "aws-marketplace:GetEntitlements", "aws-marketplace:ListAgreementApprovalRequests", "aws-marketplace:ListAgreementRequests", "aws-marketplace:ListBuilds", "aws-marketplace:ListChangeSets", "aws-marketplace:ListEntities", "aws-marketplace:ListTasks", "aws-marketplace:MeterUsage", "aws-marketplace:PutProcurementSystemConfiguration", "aws-marketplace:RegisterUsage", "aws-marketplace:RejectAgreementApprovalRequest", "aws-marketplace:ResolveCustomer", "aws-marketplace:SearchAgreements", "aws-marketplace:StartBuild", "aws-marketplace:StartChangeSet", "aws-marketplace:Subscribe", "aws-marketplace:Unsubscribe", "aws-marketplace:UpdateAgreementApprovalRequest", "aws-marketplace:UpdateTask", "aws-marketplace:ViewSubscriptions", ] for action in actions: self.assertTrue(action in results)
[ "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that greengrass v1 and greengrass v2 actions are both present in the greengrass namespace
def test_services_with_multiple_pages_greengrass(self): # Greengrass V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrass.html self.assertTrue("greengrass:CreateResourceDefinition" in self.all_actions) # Greengrass V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsiotgreengrassv2.html self.assertTrue("greengrass:CreateComponentVersion" in self.all_actions) results = get_actions_for_service("greengrass") actions = [ "greengrass:AssociateRoleToGroup", "greengrass:CreateConnectorDefinition", "greengrass:CreateConnectorDefinitionVersion", "greengrass:CreateCoreDefinition", "greengrass:CreateCoreDefinitionVersion", "greengrass:CreateDeviceDefinition", "greengrass:CreateDeviceDefinitionVersion", "greengrass:CreateFunctionDefinition", "greengrass:CreateFunctionDefinitionVersion", "greengrass:CreateGroup", "greengrass:CreateGroupCertificateAuthority", "greengrass:CreateGroupVersion", "greengrass:CreateLoggerDefinition", "greengrass:CreateLoggerDefinitionVersion", "greengrass:CreateResourceDefinition", "greengrass:CreateResourceDefinitionVersion", "greengrass:CreateSoftwareUpdateJob", "greengrass:CreateSubscriptionDefinition", "greengrass:CreateSubscriptionDefinitionVersion", "greengrass:DeleteConnectorDefinition", "greengrass:DeleteCoreDefinition", "greengrass:DeleteDeviceDefinition", "greengrass:DeleteFunctionDefinition", "greengrass:DeleteGroup", "greengrass:DeleteLoggerDefinition", "greengrass:DeleteResourceDefinition", "greengrass:DeleteSubscriptionDefinition", "greengrass:DisassociateRoleFromGroup", "greengrass:Discover", "greengrass:GetAssociatedRole", "greengrass:GetBulkDeploymentStatus", "greengrass:GetConnectorDefinition", "greengrass:GetConnectorDefinitionVersion", "greengrass:GetCoreDefinition", "greengrass:GetCoreDefinitionVersion", "greengrass:GetDeploymentStatus", "greengrass:GetDeviceDefinition", "greengrass:GetDeviceDefinitionVersion", "greengrass:GetFunctionDefinition", "greengrass:GetFunctionDefinitionVersion", "greengrass:GetGroup", "greengrass:GetGroupCertificateAuthority", "greengrass:GetGroupCertificateConfiguration", "greengrass:GetGroupVersion", "greengrass:GetLoggerDefinition", "greengrass:GetLoggerDefinitionVersion", "greengrass:GetResourceDefinition", "greengrass:GetResourceDefinitionVersion", "greengrass:GetSubscriptionDefinition", "greengrass:GetSubscriptionDefinitionVersion", "greengrass:GetThingRuntimeConfiguration", "greengrass:ListBulkDeploymentDetailedReports", "greengrass:ListBulkDeployments", "greengrass:ListConnectorDefinitionVersions", "greengrass:ListConnectorDefinitions", "greengrass:ListCoreDefinitionVersions", "greengrass:ListCoreDefinitions", "greengrass:ListDeviceDefinitionVersions", "greengrass:ListDeviceDefinitions", "greengrass:ListFunctionDefinitionVersions", "greengrass:ListFunctionDefinitions", "greengrass:ListGroupCertificateAuthorities", "greengrass:ListGroupVersions", "greengrass:ListGroups", "greengrass:ListLoggerDefinitionVersions", "greengrass:ListLoggerDefinitions", "greengrass:ListResourceDefinitionVersions", "greengrass:ListResourceDefinitions", "greengrass:ListSubscriptionDefinitionVersions", "greengrass:ListSubscriptionDefinitions", "greengrass:ResetDeployments", "greengrass:StartBulkDeployment", "greengrass:StopBulkDeployment", "greengrass:UpdateConnectorDefinition", "greengrass:UpdateCoreDefinition", "greengrass:UpdateDeviceDefinition", "greengrass:UpdateFunctionDefinition", "greengrass:UpdateGroup", "greengrass:UpdateGroupCertificateConfiguration", "greengrass:UpdateLoggerDefinition", "greengrass:UpdateResourceDefinition", "greengrass:UpdateSubscriptionDefinition", "greengrass:UpdateThingRuntimeConfiguration" ] for action in actions: self.assertTrue(action in results) # if action not in results: # print(action)
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that elb v1 and elb v2 actions are both present in the elasticloadbalancing namespace
def test_services_with_multiple_pages_elb(self): results = get_actions_for_service("elasticloadbalancing") actions = [ "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", "elasticloadbalancing:AttachLoadBalancerToSubnets", "elasticloadbalancing:ConfigureHealthCheck", "elasticloadbalancing:CreateAppCookieStickinessPolicy", "elasticloadbalancing:CreateLBCookieStickinessPolicy", "elasticloadbalancing:CreateLoadBalancerListeners", "elasticloadbalancing:CreateLoadBalancerPolicy", "elasticloadbalancing:DeleteLoadBalancerListeners", "elasticloadbalancing:DeleteLoadBalancerPolicy", "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancerPolicies", "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", "elasticloadbalancing:DetachLoadBalancerFromSubnets", "elasticloadbalancing:DisableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:EnableAvailabilityZonesForLoadBalancer", "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:SetLoadBalancerListenerSSLCertificate", "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", ] for action in actions: self.assertTrue(action in results)
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that lex v1 and lex v2 actions are both present in the lex namespace
def test_services_with_multiple_pages_lex(self): # Lex V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlex.html self.assertTrue("lex:DeleteUtterances" in self.all_actions) # Lex V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonlexv2.html self.assertTrue("lex:ListBotLocales" in self.all_actions) results = get_actions_for_service("lex") actions = [ "lex:CreateIntentVersion", "lex:CreateSlotTypeVersion", "lex:DeleteBotChannelAssociation", "lex:DeleteIntentVersion", "lex:DeleteSlotTypeVersion", "lex:GetBot", "lex:GetBotAlias", "lex:GetBotAliases", "lex:GetBotChannelAssociation", "lex:GetBotChannelAssociations", "lex:GetBotVersions", "lex:GetBots", "lex:GetBuiltinIntent", "lex:GetBuiltinIntents", "lex:GetBuiltinSlotTypes", "lex:GetExport", "lex:GetImport", "lex:GetIntent", "lex:GetIntentVersions", "lex:GetIntents", "lex:GetMigration", "lex:GetMigrations", "lex:GetSlotType", "lex:GetSlotTypeVersions", "lex:GetSlotTypes", "lex:GetUtterancesView", "lex:PostContent", "lex:PostText", "lex:PutBot", "lex:PutBotAlias", "lex:PutIntent", "lex:PutSlotType", "lex:StartMigration", ] for action in actions: self.assertTrue(action in results)
[ "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that Kinesis Analytics V1 actions are both present in the ses namespace
def test_services_with_multiple_pages_kinesis_analytics(self): # Kinesis Analytics V1 results = get_actions_for_service("kinesisanalytics") actions = [ "kinesisanalytics:GetApplicationState", # Only in v1, not v2 "kinesisanalytics:ListApplications", # In both ] for action in actions: self.assertTrue(action in results)
[ "def test_kafka_action_names_overlap_issue(self):\n # Kafka actions used to be in two pages but are now one. This verifies the current state.\n # results = get_actions_for_service(\"kafka\")\n # print(results)\n actions = [\n \"kafka:BatchAssociateScramSecret\",\n \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that ses v1 and ses v2 actions are both present in the ses namespace
def test_services_with_multiple_pages_ses(self): # SES V1: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html self.assertTrue("ses:PutIdentityPolicy" in self.all_actions) # SES V2: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonsimpleemailservicev2.html self.assertTrue("ses:ListImportJobs" in self.all_actions) results = get_actions_for_service("ses") actions = [ "ses:CloneReceiptRuleSet", "ses:CreateConfigurationSetTrackingOptions", "ses:CreateReceiptFilter", "ses:CreateReceiptRule", "ses:CreateReceiptRuleSet", "ses:CreateTemplate", "ses:DeleteConfigurationSetTrackingOptions", "ses:DeleteIdentity", "ses:DeleteIdentityPolicy", "ses:DeleteReceiptFilter", "ses:DeleteReceiptRule", "ses:DeleteReceiptRuleSet", "ses:DeleteTemplate", "ses:DeleteVerifiedEmailAddress", "ses:DescribeActiveReceiptRuleSet", "ses:DescribeConfigurationSet", "ses:DescribeReceiptRule", "ses:DescribeReceiptRuleSet", "ses:GetAccountSendingEnabled", "ses:GetIdentityDkimAttributes", "ses:GetIdentityMailFromDomainAttributes", "ses:GetIdentityNotificationAttributes", "ses:GetIdentityPolicies", "ses:GetIdentityVerificationAttributes", "ses:GetSendQuota", "ses:GetSendStatistics", "ses:GetTemplate", "ses:ListIdentities", "ses:ListIdentityPolicies", "ses:ListReceiptFilters", "ses:ListReceiptRuleSets", "ses:ListTemplates", "ses:ListVerifiedEmailAddresses", "ses:PutIdentityPolicy", "ses:ReorderReceiptRuleSet", "ses:SendBounce", "ses:SendBulkTemplatedEmail", "ses:SendRawEmail", "ses:SendTemplatedEmail", "ses:SetActiveReceiptRuleSet", "ses:SetIdentityDkimEnabled", "ses:SetIdentityFeedbackForwardingEnabled", "ses:SetIdentityHeadersInNotificationsEnabled", "ses:SetIdentityMailFromDomain", "ses:SetIdentityNotificationTopic", "ses:SetReceiptRulePosition", "ses:TestRenderTemplate", "ses:UpdateAccountSendingEnabled", "ses:UpdateConfigurationSetReputationMetricsEnabled", "ses:UpdateConfigurationSetSendingEnabled", "ses:UpdateConfigurationSetTrackingOptions", "ses:UpdateReceiptRule", "ses:UpdateTemplate", "ses:VerifyDomainDkim", "ses:VerifyDomainIdentity", "ses:VerifyEmailAddress", "ses:VerifyEmailIdentity", ] for action in actions: self.assertTrue(action in results)
[ "def test_gh_226_elasticloadbalancing_v1_and_v2(self):\n results = get_actions_for_service(\"elasticloadbalancing\")\n # print(json.dumps(results, indent=4))\n lb_v1_only_action = \"elasticloadbalancing:CreateTargetGroup\"\n lb_v2_only_action = \"elasticloadbalancing:SetSecurityGroups\"\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that kafka actions are not overwritten in the IAM definition
def test_kafka_action_names_overlap_issue(self): # Kafka actions used to be in two pages but are now one. This verifies the current state. # results = get_actions_for_service("kafka") # print(results) actions = [ "kafka:BatchAssociateScramSecret", "kafka:BatchDisassociateScramSecret", "kafka:CreateClusterV2", "kafka:DeleteConfiguration", "kafka:DescribeClusterV2", "kafka:ListClustersV2", "kafka:ListConfigurationRevisions", "kafka:ListKafkaVersions", "kafka:ListScramSecrets", "kafka:RebootBroker", "kafka:UpdateBrokerType", "kafka:UpdateConfiguration", "kafka:UpdateConnectivity", "kafka:UpdateSecurity" ] for action in actions: self.assertTrue(action in self.all_actions)
[ "def test_excluded_actions_scan_policy_file(self):\n test_policy = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1. Maintain a decreasing stack by scanning nums from left to right. 2. Then scan the nums from right to left and calculate the maxWidth between each ramp.
def maxWidthRamp(self, nums: list[int]) -> int: maxWidth = 0 descStack = [] # Generate decreasing stack. for i, num in enumerate(nums): if not descStack or nums[descStack[-1]] > num: descStack.append(i) # Check elements from right to left. for j in reversed(range(len(nums))): while descStack and nums[descStack[-1]] <= nums[j]: maxWidth = max(maxWidth, j - descStack.pop()) return maxWidth
[ "def peg_width_per_levels(base_width):\n limiter = 2\n decrementer = -2\n decrementing_width = int(base_width)\n peg_count_per_level = []\n while decrementing_width >= limiter:\n peg_count_per_level.append(int(decrementing_width))\n decrementing_width += decrementer\n return peg_coun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given an input (instance of the BenchInput tuple), constructs and validates a disjunctive ChaumPedersen proof, returning the time (in seconds) to do each operation.
def chaum_pedersen_bench(bi: BenchInput) -> Tuple[float, float]: (keypair, r, s) = bi ciphertext = get_optional(elgamal_encrypt(0, r, keypair.public_key)) start1 = timer() proof = make_disjunctive_chaum_pedersen_zero( ciphertext, r, keypair.public_key, ONE_MOD_Q, s ) end1 = timer() valid = proof.is_valid(ciphertext, keypair.public_key, ONE_MOD_Q) end2 = timer() if not valid: raise Exception("Wasn't expecting an invalid proof during a benchmark!") return end1 - start1, end2 - end1
[ "def dpTime():\n print \"calculating...\"\n startTime = time.time()\n dpa = dpAdvisor(subjects, 20)\n endTime = time.time()\n #printSubjects(dpa)\n print \"%.4f\" % (endTime-startTime)", "def dpTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 50...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test of function choosing if log rotation is needed
def test_need_to_rotate_log(self): self.assertTrue(need_to_rotate_log(0, 20, 'daily', 15, 'daily'), 'rotate log by time') self.assertFalse(need_to_rotate_log(10, 20, 'daily', 15, 'hourly'), 'do not rotate log by time') self.assertTrue(need_to_rotate_log(10, 20, 'daily', 25, None), 'rotate log by max size') self.assertFalse(need_to_rotate_log(10, 20, 'hourly', 5, 'hourly'), 'do not rotate log by min size')
[ "def _should_rotate_log(self, handler):\n if handler[\"rotate_log\"]:\n rotate_time_index = handler.get(\"rotate_log_index\", \"day\")\n try:\n rotate_time_index = self._decode_time_rotation_index(rotate_time_index)\n except ValueError:\n rotate_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests of try rotation with compress in configuration
def test_process_log_with_compress_in_configuration(self): with tempfile.TemporaryDirectory() as sandbox: with mock.patch('sys.stdout', new=io.StringIO()) as fake_stdout: srcfile = Path(sandbox, 'pokus.log') srcfile.touch() destfile = Path(sandbox, 'backup', 'pokus.log') compressors = process_log( datetime.datetime(year=2019, month=1, day=10, hour=21, minute=30), { 'target': '{{path}}/backup/{{name}}.{{ext}}', 'interval': 'hourly', 'compress': 'gzip -9' }, 'hourly', str(srcfile), 10 ) self.assertEqual(compressors, [[sandbox, 'gzip', '-9', str(destfile)]]) self.assertFalse(srcfile.exists()) self.assertTrue(destfile.exists()) self.assertEqual(fake_stdout.getvalue(), 'Checking "{src}"... rotating... "{src}" -> "{dest}" done.\n'.format(src=srcfile, dest=destfile))
[ "def test_compress_works(self):\n tau = 45.0\n mrate = 60.0\n Mrate = 100.0\n gain = 5\n\n tmax = 50.0\n dt = 0.2\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = gain\n\n self.motor.error_fct ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test get_spec_config on empty conf
def test_get_spec_config_empty(self): spec_conf = get_spec_config({}, '') self.assertEqual(spec_conf, {})
[ "def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assertEqual(spec_conf, {'foo': 'bar'})", "def test_get_missing_param(self):\n Config.init(sample_conf)\n res = Config...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test get_spec_config on conf with defaults
def test_get_spec_config_defaults(self): spec_conf = get_spec_config({ 'defaults': { 'foo': 'bar' } }, '') self.assertEqual(spec_conf, {'foo': 'bar'})
[ "def test_get_spec_config_match(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'default_foo': 'default_bar',\n 'foo': 'bar'\n },\n 'specific': [\n {'mask': ['filenomatch'], 'foo': 'bar_nomatch'},\n {'mask':...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test get_spec_config on matching conf
def test_get_spec_config_match(self): spec_conf = get_spec_config({ 'defaults': { 'default_foo': 'default_bar', 'foo': 'bar' }, 'specific': [ {'mask': ['filenomatch'], 'foo': 'bar_nomatch'}, {'mask': ['filematch'], 'foo': 'match'}, {'mask': ['filenomatch2'], 'foo': 'bar_nomatch2'} ] }, 'filematch') self.assertEqual(spec_conf, {'default_foo': 'default_bar', 'foo': 'match', 'mask': ['filematch']})
[ "def test_get_canary_configs_using_get(self):\n pass", "def test_get_canary_config_using_get(self):\n pass", "def test_get_spec_config_defaults(self):\n spec_conf = get_spec_config({\n 'defaults': {\n 'foo': 'bar'\n }\n }, '')\n self.assert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that given modifier name is valid one. If not raise exception based on violation.
def _isValidModifier(self, modifiers, modifierName): if Modifiers.ILLEGAL_MODIFIER_PATTER.search(modifierName): msg = ('Modifier named "{0}" in sheet {1} contains illegal characters. ' 'Supported characters are a to z, A to Z, 0 to 9 and underscore "_". ' 'Spaces are not allowed characters, use underscore instead. For example ' '"some_mod".' ).format(modifierName, MODIFIER_LIST_SHEET_NAME) raise errors.UnsupportedCharacter(MODIFIER_LIST_SHEET_NAME, msg) if modifierName in map(lambda mod: mod.name, modifiers): msg = ('Modifier named "{0}" already exists in the sheet {1}. ' 'Modifier names must be unique. To fix remove or rename ' 'duplicates.' ).format(modifierName, MODIFIER_LIST_SHEET_NAME) raise errors.DuplicateError(MODIFIER_LIST_SHEET_NAME, msg)
[ "def validate_name(name, reserved_names=()):", "def check_name(name):\n if len(name) > WorkflowCRD.NAME_MAX_LENGTH:\n raise ValueError(\n \"Name is too long. Max length: {}, now: {}\"\n \"\".format(WorkflowCRD.NAME_MAX_LENGTH, len(name))\n )\n if \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if a given datetime.datetime is aware.
def is_aware(value): return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
[ "def is_aware(value: datetime) -> bool:\n\n return value.utcoffset() is not None", "def dt_is_aware(dtime):\n if dtime.tzinfo is not None and dtime.tzinfo.utcoffset(dtime) is not None:\n return True\n\n return False", "def dt_is_aware(dt_value):\n return dt_value.tzinfo is not None and dt_val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Define ZMQ connection and return socket to work with
def connect_to_worker(): socket = context.socket(zmq.REQ) socket.connect("tcp://localhost:5555") return socket
[ "def get_connection(self):\n\n # Socket type DEALER is used in asynchronous request/reply patterns.\n # It prepends identity of the socket with each message.\n socket = self.zmq_context.socket(zmq.DEALER)\n socket.setsockopt(zmq.IDENTITY, self.identity)\n socket.connect('tcp://127...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to handle not responding zmq server
def raise_timeout(*args, **kwargs): raise ZMQNotResponding('ZMQ server is not responding')
[ "def fix_zmq_exit():\n import zmq\n ctx = zmq.Context.instance()\n ctx.term()", "def test_recv_nomsg(self):\n flag, msg_recv = self.recv_instance.recv(timeout=self.sleeptime)\n assert(not flag)\n nt.assert_equal(msg_recv, self.recv_instance.eof_msg)", "def checkConnection(self,msg)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this functions creates a draft with the email data given the user id should be either 'me', either 'users/email.com' either 'users/{AAD_userId}',
def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None): data = {} data['Subject'] = subject data['Body'] = {} data['Body']['ContentType'] = 'HTML' data['Body']['Content'] = body data['ToRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in addresses] data['ccRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in cc_addresses] if attachments_list is not None: data['Attachments'] = attachments_list params = json.dumps(data).encode('utf8') url = "{api_url}/{user_id}/messages".format(api_url=API_URL, user_id=user_id) headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(auth.access_token) } req = urllib.request.Request(url, params, headers) try: resp = urllib.request.urlopen(req) resp_data = json.load(resp) logging.getLogger(__name__).info("Draft created") return resp_data['id'] except urllib.error.HTTPError as err: raise AzureError(err)
[ "def create_draft(self, subject=\"\", to=\"\", cc=\"\", bcc=\"\", content=\"\", draft_folder=None):\n \n new_message = self._create_message_wrapper(subject, to, cc, bcc, content)\n \n if not self.is_simulate:\n try:\n if draft_folder is not None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
iterator which goes through all the pages to find all the emails
def get_all_emails_it(auth, user_id, folder_id='AllItems', pages_limit=None, pages_size=50, **kwargs): i = 0 args_dict = dict(kwargs, top=pages_size, skip=pages_size * i) curr_emails = get_emails(auth, user_id, folder_id, **args_dict) while len(curr_emails) != 0: yield curr_emails if pages_limit is not None and i >= pages_limit: break i += 1 args_dict = dict(kwargs, top=pages_size, skip=pages_size * i) curr_emails = get_emails(auth, user_id, folder_id, **args_dict)
[ "def test_get_inbox_emails_paginated(self):\n pass", "def get_email_addresses(startdate, enddate, user, password):\n emails = []\n page = 1\n more_pages = True\n\n while more_pages:\n response = requests.get(\n 'https://restapi.surveygizmo.com/v2/survey/{survey}'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the masked ratio.
def get_masked_ratio(mask): hist = mask.histogram() return hist[0] / np.prod(mask.size)
[ "def maskedFraction(self):\n\n\t\tif not self._masked:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn self._masked_fraction", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def mask_percent(img):\n if (len(img.shape) == 3) and (i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a dictionary with domain architectures exclusive in a single pathogen type group.
def generateArchitectureDataStructure(db, collapse_pathogen_groups=False): # Calculate total numbers of species and strains for each pathogen group counts_species_pathogen_dict = defaultdict(lambda: defaultdict(int)) for row in db.getNumSpeciesPathogen(): counts_species_pathogen_dict[row['pathogen_type']]['num_species'] = row['num_species'] counts_species_pathogen_dict[row['pathogen_type']]['num_strains'] = row['num_strains'] architecture_pathogen_dict = defaultdict(list) arch_strains_species_dict = defaultdict(lambda: defaultdict(list)) for row in db.getArchitecturePathogenTypeIterator(): strains = row['species'] species = str(strains).split(' (')[0] pathogen_type = row['pathogen_type'] architecture_id = row['architecture'] architecture_acc = row['architecture_acc'] architecture_pathogen_dict[(architecture_id, architecture_acc)].append(pathogen_type) arch_strains_species_dict[(architecture_id, architecture_acc)]['species'].append(species) arch_strains_species_dict[(architecture_id, architecture_acc)]['strains'].append(strains) for architecture in architecture_pathogen_dict.keys(): # If an architecture is only present in proteins of a certain pathogen_type, # it should have only 1 pathogen_type pathogen_groups_set = set(architecture_pathogen_dict[architecture]) if not exclusive_arch(pathogen_groups_set, collapse_pathogen_groups): architecture_pathogen_dict.pop(architecture) arch_strains_species_dict.pop(architecture) else: # Check if the architecture is present in all species and strains species_set = set(arch_strains_species_dict[architecture]['species']) strains_set = set(arch_strains_species_dict[architecture]['strains']) total_num_species, total_num_strains = get_number_ssp_stt_members(counts_species_pathogen_dict, pathogen_groups_set, collapse_pathogen_groups) arch_strains_species_dict[architecture]['total_num_species'] = total_num_species arch_strains_species_dict[architecture]['total_num_strains'] = total_num_strains if total_num_species == len(species_set): arch_strains_species_dict[architecture]['all_species'] if total_num_strains == len(strains_set): arch_strains_species_dict[architecture]['all_strains'] return architecture_pathogen_dict, arch_strains_species_dict
[ "def get_architectures() -> dict:\n archs = {}\n for arch in list(Architecture):\n archs[arch.name] = arch\n\n return archs", "def for_sim_type(sim_type):\n if sim_type not in cfg:\n return {}\n return pkcollections.map_to_dict(cfg[sim_type])", "def formDomain(self):\r\n doma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Boolean function to check if a given architecture is exclusive.
def exclusive_arch(pathogen_groups_set, collapse_pathogen_groups): if len(pathogen_groups_set) == 1: return True # Only check pathogen grouping when the flag is on if collapse_pathogen_groups: if len(pathogen_groups_set) > 2: return False if 0 in pathogen_groups_set and 1 in pathogen_groups_set: return True if 3 in pathogen_groups_set and 4 in pathogen_groups_set: return True return False
[ "def IsExclusive(self):\n return False", "def isCheckedOutExclusive(self) -> bool:\n ...", "def exclusive_state(self) -> bool:\n return pulumi.get(self, \"exclusive_state\")", "def is_skip_and_return_zero_patch_available(\n\t self, addr: int, arch: Optional['architecture.Architecture'] = N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns True if employee has rejoined otherwise False
def is_rejoinee(self): return len(self._start_date) > 1
[ "def is_employee():\n return _is_member('uw_employee')", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return (self.date_joined + expiration_date <= datetime.datetime.now())", "def already_booked(slots, attendees, user_name):\n already...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process the Exit of employee
def process_employee_exit(self): if self.is_employee_serving(): self._end_date.append(datetime.now().isoformat()) print(f"Successfully processed exit for employee {self.name} on" \ f"{self._end_date[-1]}\nWe wish {self.name} for future endeavours") return raise RejoiningException("Employee not in service. Cannot process exit.")
[ "def identify_result_exit(self, record):\n return [\"exit\"]", "def _handler_generic_exit(self):", "def _common_state_exit(self, *args, **kwargs):", "def _exit(n):\n pass", "def exit(self, status=0,message=None):\n\t\tpass", "def on_exit_step(self) -> Event:\n return self._on_exit_step", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a full media url from Bandwidth and extracts the media id
def get_media_id(media_url): split_url = media_url.split("/") #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/file.png if split_url[-2] == "media": return split_url[-1] #Media urls of the format https://messaging.bandwidth.com/api/v2/users/123/media/abc/0/file.png else: #This is required for now due to the SDK parsing out the `/`s return "%2F".join(split_url[-3:])
[ "def extract_media_id(self, s):\n return int(re.findall(r\"_(\\d+).ts\", s)[0])", "def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id", "def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:\n try:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a full media url from Bandwidth and extracts the filename
def get_media_filename(media_url): return media_url.split("/")[-1]
[ "def get_content_name(self, content_url):\n endpoint = content_url.split('/')[-1]\n return re.match(r'(.+\\.(?:jpg|mp4))', endpoint).group(0)", "def getImageFilename(url):\n head, filename = url.rsplit('/', 1)\n return filename", "def get_media_id(media_url):\n split_url = media_url.split...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of media urls and downloads the media into the temporary storage
def download_media_from_bandwidth(media_urls): downloaded_media_files = [] for media_url in media_urls: media_id = get_media_id(media_url) filename = get_media_filename(media_url) with open(filename, "wb") as f: try: downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id) f.write(downloaded_media.body) except Exception as e: print(e) downloaded_media_files.append(filename) return downloaded_media_files
[ "def download_all_media():\n # download_path = '{}'.format(os.path.join(folder_name(conv_s()), \"media\"))\n # profile.set_preference(\"browser.download.dir\", download_path)\n open_media()\n download_media()\n while left_media():\n pass\n esc()\n esc()\n o(\"Finished downloading medi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of media files and uploads them to Bandwidth The media file names are used as the media id
def upload_media_to_bandwidth(media_files): for filename in media_files: with open(filename, "rb") as f: file_content = f.read() try: ##Note: The filename is doubling as the media id## response = messaging_client.upload_media(MESSAGING_ACCOUNT_ID, filename, str(len(file_content)), body=file_content) except Exception as e: print(e)
[ "def _upload(self, files, voice_clip=False):\n file_dict = {\"upload_{}\".format(i): f for i, f in enumerate(files)}\n\n data = {\"voice_clip\": voice_clip}\n\n j = self._payload_post(\n \"https://upload.facebook.com/ajax/mercury/upload.php\", data, files=file_dict\n )\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes all of the given files
def remove_files(files): for file_name in files: os.remove(file_name)
[ "def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)", "def remove_files(self):\n for ff in self._all_paths:\n try:\n os.remove(ff)\n except OSError:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes information from a Bandwidth inbound message callback that includes media and responds with a text message containing the same media sent through Bandwidth's media resource.
def handle_inbound_media_mms(to, from_, media): downloaded_media_files = download_media_from_bandwidth(media) upload_media_to_bandwidth(downloaded_media_files) remove_files(downloaded_media_files) body = MessageRequest() body.application_id = MESSAGING_APPLICATION_ID body.to = [from_] body.mfrom = to body.text = "Rebound!" #Build the media URL by taking the media ids (that doubled as the file names) and appending them to #the bandwidth media base url body.media = [BANDWIDTH_MEDIA_BASE_ENDPOINT + media_file for media_file in downloaded_media_files] try: messaging_client.create_message(MESSAGING_ACCOUNT_ID, body) except Exception as e: print(e) return None
[ "def send_callback(context):\n publish_next_media_to_channel(context, chat_id=context.job.context)", "def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes information from a Bandwidth inbound message callback and initiates a call
def handle_inbound_sms_call_me(to, from_): handle_call_me(to, from_)
[ "def incoming_call(self, call):\n self.active_call = call\n self.ringer.play_ringer()", "def _handleMessage(self):\r\n call = self._onBeforeCall()\r\n ## execute incoming RPC\r\n d = maybeDeferred(self._callProcedure, call)\r\n ## register callback and errback with extra argument...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A method for showing how to handle Bandwidth messaging callbacks. For inbound SMS that contains the phrase "call me", a phone call is made and the user is asked to forward the call to another number For inbound SMS that doesn't contain the phrase "call me", the response is a SMS with the date and time. For inbound MMS with a media attachment, the response is the same media attachment sent through Bandwidth's media resource. For all other events, the callback is logged to console
def handle_inbound_message(): data = json.loads(request.data) if data[0]["type"] == "message-received": if "call me" in data[0]["message"]["text"]: handle_inbound_sms_call_me(data[0]["message"]["to"][0], data[0]["message"]["from"]) elif "media" in data[0]["message"]: handle_inbound_media_mms(data[0]["message"]["to"][0], data[0]["message"]["from"], data[0]["message"]["media"]) else: handle_inbound_sms(data[0]["message"]["to"][0], data[0]["message"]["from"]) else: print(data) return ""
[ "def handle_inbound_sms_call_me(to, from_):\n handle_call_me(to, from_)", "def handleSMS(self, callerID, message, node):", "def handle_inbound_media_mms(to, from_, media):\n downloaded_media_files = download_media_from_bandwidth(media)\n upload_media_to_bandwidth(downloaded_media_files)\n remove_fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats |record| with color.
def format(self, record): msg = super(ColoredFormatter, self).format(record) color = self._COLOR_MAPPING.get(record.levelname) if self._use_colors and color: msg = '%s%s%s' % (color, msg, self._RESET) return msg
[ "def format(self, record):\n\t\tif self.color:\n\t\t\ttry:\n\t\t\t\tcat = getattr(record, self.CATEGORY, None)\n\t\t\t\tif not cat:\n\t\t\t\t\tif record.levelname == 'WARN': cat = LOG_WARN\n\t\t\t\t\telif record.levelname == 'ERROR': cat = LOG_ERROR\n\t\t\t\t\telif record.levelname == 'DEBUG': cat = LOG_DEBUG\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Always symlink |path| to a relativized |target|.
def symlink(target, path): unlink(path) path = os.path.realpath(path) target = os.path.relpath(os.path.realpath(target), os.path.dirname(path)) logging.info('Symlinking %s -> %s', path, target) os.symlink(target, path)
[ "def symlink(self, path, target, *args, **kwargs): # pragma: no cover", "def symlink_to(self, target, target_is_directory=False):\n if self._closed:\n self._raise_closed()\n self._accessor.symlink(target, self, target_is_directory)", "def symlink(source, link_name, target_is_directory=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return sha256 hex digest of |path|.
def sha256(path: Union[Path, str]) -> str: # The file shouldn't be too big to load into memory, so be lazy. with open(path, 'rb') as fp: data = fp.read() m = hashlib.sha256() m.update(data) return m.hexdigest()
[ "def hex_hash(path):\n\n return hashlib.md5(path).hexdigest()[:2]", "def hexhash(path):\n\n m = hashlib.md5()\n with open(path) as handle:\n for line in handle:\n m.update(line)\n return m.hexdigest()", "def _checksum_sha256(file_path):\n sha256_hash = hashlib.sha256()\n chunk_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unpack |archive| into |cwd|.
def unpack(archive: Union[Path, str], cwd: Optional[Path] = None, files: Optional[List[Union[Path, str]]] = ()): archive = Path(archive) if cwd is None: cwd = Path.cwd() if files: files = ['--'] + list(files) else: files = [] # Try to make symlink usage easier in Windows. extra_env = { 'MSYS': 'winsymlinks:nativestrict', } logging.info('Unpacking %s', archive.name) # We use relpath here to help out tar on platforms where it doesn't like # paths with colons in them (e.g. Windows). We have to construct the full # before running through relpath as relative archives will implicitly be # checked against os.getcwd rather than the explicit cwd. src = os.path.relpath(cwd / archive, cwd) run(['tar', '--no-same-owner', '-xf', src] + files, cwd=cwd, extra_env=extra_env)
[ "def unpack_archive(fname: Union[str, Path], tgt_dir: Union[str, Path]) -> None:\n shutil.unpack_archive(str(fname), str(tgt_dir))", "def _unzip(archive, dst):\n with zipfile.ZipFile(archive) as zf:\n for zi in zf.infolist():\n if zi.filename[-1] == '/': # skip dir\n contin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an |archive| with |paths| in |cwd|. The output will use XZ compression.
def pack(archive: Union[Path, str], paths: List[Union[Path, str]], cwd: Optional[Path] = None, exclude: Optional[List[Union[Path, str]]] = ()): archive = Path(archive) if cwd is None: cwd = Path.cwd() if archive.suffix == '.xz': archive = archive.with_suffix('') # Make sure all the paths have sane permissions. def walk(path): if path.is_symlink(): return elif path.is_dir(): # All dirs should be 755. mode = path.stat().st_mode & 0o777 if mode != 0o755: path.chmod(0o755) for subpath in path.glob('*'): walk(subpath) elif path.is_file(): # All scripts should be 755 while other files should be 644. mode = path.stat().st_mode & 0o777 if mode in (0o755, 0o644): return if mode & 0o111: path.chmod(0o755) else: path.chmod(0o644) else: raise ValueError(f'{path}: unknown file type') logging.info('Forcing sane permissions on inputs') for path in paths: walk(cwd / path) logging.info('Creating %s tarball', archive.name) # We use relpath here to help out tar on platforms where it doesn't like # paths with colons in them (e.g. Windows). We have to construct the full # before running through relpath as relative archives will implicitly be # checked against os.getcwd rather than the explicit cwd. tar = os.path.relpath(cwd / archive, cwd) run(['tar', '--owner=0', '--group=0', '-cf', tar] + [f'--exclude={x}' for x in exclude] + ['--'] + paths, cwd=cwd) logging.info('Compressing tarball') run(['xz', '-f', '-T0', '-9', tar], cwd=cwd)
[ "def make_archive():\n Utils.delete_if_exist(Path.get_zip_path(True))\n shutil.make_archive(Path.get_zip_path(), 'zip', Utils.reverse_path_if_windows(Path.get_dist_path()))", "def generate_archive_file(location, paths, environment=None, compression=None, archive_format=None):\n if archive_format == 'zip'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch |uri| and write the results to |output| (or return BytesIO).
def fetch_data(uri: str, output=None, verbose: bool = False, b64: bool = False): # This is the timeout used on each blocking operation, not the entire # life of the connection. So it's used for initial urlopen and for each # read attempt (which may be partial reads). 5 minutes should be fine. TIMEOUT = 5 * 60 if output is None: output = io.BytesIO() try: with urllib.request.urlopen(uri, timeout=TIMEOUT) as infp: mb = 0 length = infp.length while True: data = infp.read(1024 * 1024) if not data: break # Show a simple progress bar if the user is interactive. if verbose: mb += 1 print('~%i MiB downloaded' % (mb,), end='') if length: percent = mb * 1024 * 1024 * 100 / length print(' (%.2f%%)' % (percent,), end='') print('\r', end='', flush=True) if b64: data = base64.b64decode(data) output.write(data) except urllib.error.HTTPError as e: logging.error('%s: %s', uri, e) sys.exit(1) return output
[ "def _fetch_file(self, location, output=None):\n\n self.log.debug(\"Fetching '%s' file...\" % location)\n\n if not output:\n output = tempfile.mktemp(\"-dogen\")\n\n self.log.debug(\"Fetched file will be saved as '%s'...\" % os.path.basename(output))\n\n r = requests.get(locat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download our copies of node & npm to our tree and updates env ($PATH).
def node_and_npm_setup(): # We have to update modules first as it'll nuke the dir node lives under. node.modules_update() node.update()
[ "def InstallNodeDependencies():\n logging.info('entering ...')\n # Install the project dependencies specified in package.json into\n # node_modules.\n logging.info('installing AMP Validator engine dependencies ...')\n subprocess.check_call(\n ['npm', 'install', '--userconfig', '../.npmrc'],\n stdout=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a module from the filesystem.
def load_module(name, path): loader = importlib.machinery.SourceFileLoader(name, path) module = types.ModuleType(loader.name) loader.exec_module(module) return module
[ "def load_module(name, path):\n\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module", "def loadModule (\r\n \r\n self,\r\n path = None\r\n ) :\r\n\r\n if not ut...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load & cache the program module.
def _module(self): if self._module_cache is None: self._module_cache = load_module(self._name, self._path) return self._module_cache
[ "def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n if len(sys.argv) != 2:\n print(\"format: ls8.py [filename]\")\n sys.exit(1)\n\n program = sys.argv[1]\n address = 0\n\n # For now, we've just hardcoded a program:\n\n # program = [\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the packet length.
def _set_packet_len(self, packet_len): self._packet_len = packet_len
[ "def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def setlen(self, length):\n self._len = length", "def set_length(self, length):\n self.length = length", "def set(self, length):\r\n self.length = length", "def setLength(self, new_length):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a XCP Ethernet frame
def create_message(self, packet): self._header.packet_len = len(bytes(packet)) frame_bytes = super(EthernetTransport, self).create_message(packet) # Update control counter for next frame self._header.update_control() return bytes(frame_bytes)
[ "def _create_frame(self, packets, type):\n fr = bytearray()\n fr += struct.pack('>H', type.value)\n fr += struct.pack('>H', len(packets))\n frameno = self.next_frameno()\n fr += struct.pack('>Q', frameno)\n for pkt in packets:\n fr += struct.pack('>H', len(pkt))\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the pickup_features feature group. To restrict features to a time range, pass in ts_column, start_date, and/or end_date as kwargs.
def pickup_features_fn(df, ts_column, start_date, end_date): df = filter_df_by_ts( df, ts_column, start_date, end_date ) pickupzip_features = ( df.groupBy( "pickup_zip", window("tpep_pickup_datetime", "1 hour", "15 minutes") ) # 1 hour window, sliding every 15 minutes .agg( mean("fare_amount").alias("mean_fare_window_1h_pickup_zip"), count("*").alias("count_trips_window_1h_pickup_zip"), ) .select( col("pickup_zip").alias("zip"), unix_timestamp(col("window.end")).alias("ts").cast(IntegerType()), partition_id(to_timestamp(col("window.end"))).alias("yyyy_mm"), col("mean_fare_window_1h_pickup_zip").cast(FloatType()), col("count_trips_window_1h_pickup_zip").cast(IntegerType()), ) ) return pickupzip_features
[ "def dropoff_features_fn(df, ts_column, start_date, end_date):\n df = filter_df_by_ts(\n df, ts_column, start_date, end_date\n )\n dropoffzip_features = (\n df.groupBy(\"dropoff_zip\", window(\"tpep_dropoff_datetime\", \"30 minute\"))\n .agg(count(\"*\").alias(\"count_trips_window_30m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the dropoff_features feature group. To restrict features to a time range, pass in ts_column, start_date, and/or end_date as kwargs.
def dropoff_features_fn(df, ts_column, start_date, end_date): df = filter_df_by_ts( df, ts_column, start_date, end_date ) dropoffzip_features = ( df.groupBy("dropoff_zip", window("tpep_dropoff_datetime", "30 minute")) .agg(count("*").alias("count_trips_window_30m_dropoff_zip")) .select( col("dropoff_zip").alias("zip"), unix_timestamp(col("window.end")).alias("ts").cast(IntegerType()), partition_id(to_timestamp(col("window.end"))).alias("yyyy_mm"), col("count_trips_window_30m_dropoff_zip").cast(IntegerType()), is_weekend(col("window.end")).alias("dropoff_is_weekend"), ) ) return dropoffzip_features
[ "def pickup_features_fn(df, ts_column, start_date, end_date):\n df = filter_df_by_ts(\n df, ts_column, start_date, end_date\n )\n pickupzip_features = (\n df.groupBy(\n \"pickup_zip\", window(\"tpep_pickup_datetime\", \"1 hour\", \"15 minutes\")\n ) # 1 hour window, sliding...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ceilings datetime dt to interval num_minutes, then returns the unix timestamp.
def rounded_unix_timestamp(dt, num_minutes=15): nsecs = dt.minute * 60 + dt.second + dt.microsecond * 1e-6 delta = math.ceil(nsecs / (60 * num_minutes)) * (60 * num_minutes) - nsecs return int((dt + timedelta(seconds=delta)).timestamp())
[ "def dt_to_unix_time_ms(cls, dt):\n epoch = datetime.utcfromtimestamp(0)\n return int((dt - epoch).total_seconds() * 1000)", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def to_min(dt: datetime) -> int:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return current sample rate in Sa/s
def sample_rate(self): return self.query_float('ENTER Current Sample Rate (Sa/s)')
[ "def sample_rate(self) -> float:\n return self._sample_rate", "def sample_rate(self):\r\n return self.config.sample_rate", "def get_sample_rate(self):\n time_diffs = np.diff(self.get_time()).mean()\n return 1000/time_diffs", "def sampling_rate(self):\n return self.librarycal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assert unexpected_content has not been written to stdout
def assertStdoutDoesNotContain(self, unexpected_content): if type(unexpected_content) is not types.ListType: unexpected_content = [ unexpected_content ] stdout_message = sys.stdout.getvalue() for the_text in unexpected_content: self.assertNotIn(the_text, stdout_message,('Stdout "%s" contains text "%s"' % (stdout_message, the_text)))
[ "def check_cot_output(self, expected):\n sys.stdout = StringIO.StringIO()\n output = None\n try:\n self.instance.run()\n except (TypeError, ValueError, SyntaxError, LookupError):\n self.fail(traceback.format_exc())\n finally:\n output = sys.stdout....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Render the image represented by (rgbobj) at dst_x, dst_y in the offscreen pixmap.
def render_image(self, rgbobj, dst_x, dst_y): self.logger.debug("redraw pixmap=%s" % (self.pixmap)) if self.pixmap is None: return self.logger.debug("drawing to pixmap") # Prepare array for rendering arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8) (height, width) = arr.shape[:2] return self._render_offscreen(self.pixmap, arr, dst_x, dst_y, width, height)
[ "def draw(self, surface):\r\n surface.blit(self.image, self.rect)", "def blit(self):\n self.screen.blit(self.image, self.rect)", "def draw_image_processing(self, screen):\n screen.blit(self.get_image_processing(), self.get_image_processing_rect())", "def draw_inkblot(self):\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called when a mouse button is pressed in the widget. Adjust method signature as appropriate for callback.
def button_press_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates where the button was pressed self.last_win_x, self.last_win_y = x, y button = 0 # Prepare a button mask with bits set as follows: # left button: 0x1 # middle button: 0x2 # right button: 0x4 # Others can be added as appropriate self.logger.debug("button down event at %dx%d, button=%x" % (x, y, button)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('button-press', button, data_x, data_y)
[ "def on_mouse_press(self, x, y, button):\n\n pass", "def mouse_press(self, btn, x, y, modifiers):", "def on_mouse_press(self, x, y, button, modifiers):\n pass", "def _press(self, event):", "def on_mouse_release(self, x, y, button):\n pass", "def onMousePressed(self, event):\n (...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called when a drop (drag/drop) event happens in the widget. Adjust method signature as appropriate for callback.
def drop_event(self, widget, event): # make a call back with a list of URLs that were dropped #self.logger.debug("dropped filename(s): %s" % (str(paths))) #self.make_ui_callback('drag-drop', paths) raise NotImplementedError
[ "def hook_drop(self):\n widget = self.widget\n widget.setAcceptDrops(True)\n widget.dragEnterEvent = self.dragEnterEvent\n widget.dragMoveEvent = self.dragMoveEvent\n widget.dragLeaveEvent = self.dragLeaveEvent\n widget.dropEvent = self.dropEvent", "def dropEvent(self, e)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets details on currently logged in athlete.
def get_athlete(token): url = "https://www.strava.com/api/v3/athlete" params = {'access_token': token} response = return_json(url, "GET", parameters=params, timeout=10) return response
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.
def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save' headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'} payload = [{"_key": athlete_id, "id": athlete_id, "firstname": firstname, "lastname": lastname, "fullname": firstname + " " + lastname, "weight": weight, "ftp": ftp}] helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False)
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates dict with athlete details, including token expiry.
def set_athlete(response): name = response['athlete']['firstname'] + " " + response['athlete']['lastname'] athlete = { 'id': response['athlete']['id'], 'name': name, 'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at'], 'ts_activity': 0} return athlete
[ "def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def alpaca_create(self, keyname = \"ALPACA_API_KEY\", secret = \"ALPA...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }