query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Execute the sequence of SQL statements in {sql} as a single command
def execute(self, *sql): # assemble the command and pass it on to the connection return self.postgres.execute(self.connection, "\n".join(sql))
[ "def execute_sql_commands(sql, cur):\n for line in sql.split(\";\"):\n line = line.strip()\n line = line.replace(\"\\n\",\" \")\n if line == \"\":\n continue\n # lg.info(\"sql:: ::line %s\"%line)\n cur.execute(line)", "def execute(self, *sql):\n # assemble t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the project_name of this ShowProjectWorkHoursResponseBodyWorkHours. 项目名称
def project_name(self, project_name): self._project_name = project_name
[ "def set_project_name(self, name):\n self.project_tags[\"PROJECT_NAME\"] = name", "def project_name(self, project_name):\n \n self._project_name = project_name", "def project_name(self, project_name):\n\n self._project_name = project_name", "def evaluation_project_name(self, evaluation...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the nick_name of this ShowProjectWorkHoursResponseBodyWorkHours. 用户昵称
def nick_name(self): return self._nick_name
[ "def nick_name(self):\n if \"nickName\" in self._prop_dict:\n return self._prop_dict[\"nickName\"]\n else:\n return None", "def get_nick(self) -> str:\n return self.name", "def get_nickname(self):\n return self._nick", "def getNickname(self):\n return s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the nick_name of this ShowProjectWorkHoursResponseBodyWorkHours. 用户昵称
def nick_name(self, nick_name): self._nick_name = nick_name
[ "def nick_name(self):\n if \"nickName\" in self._prop_dict:\n return self._prop_dict[\"nickName\"]\n else:\n return None", "def nick_name(self):\n return self._nick_name", "async def _nick(self, nick: str) -> str:\n\n logger.debug(f\"Setting nick to {nick!r}\")\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_date of this ShowProjectWorkHoursResponseBodyWorkHours. 工时日期
def work_date(self): return self._work_date
[ "def work_hours_created_time(self):\n return self._work_hours_created_time", "def _get_workdate(self):\n return self.currentEnv.get('workdate') or datetime.today()", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def work_hours_num(self):\n return sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_date of this ShowProjectWorkHoursResponseBodyWorkHours. 工时日期
def work_date(self, work_date): self._work_date = work_date
[ "def _set_workdate(self, workdate):\n self.currentEnv['workdate'] = workdate", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_date(self):\n return self._work_date", "def work_hours_num(self, work_hours_num):\n self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_num of this ShowProjectWorkHoursResponseBodyWorkHours. 工时花费
def work_hours_num(self): return self._work_hours_num
[ "def actual_work_hours(self):\n return self._actual_work_hours", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def expected_work_hours(self):\n return self._expected...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_num of this ShowProjectWorkHoursResponseBodyWorkHours. 工时花费
def work_hours_num(self, work_hours_num): self._work_hours_num = work_hours_num
[ "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_hours(self, work_hours):\n if work_hours is not None and len(work_hours) > 1024:\n raise ValueError(\"Invalid value for `work_hours`, length must be less than or equal to `1024...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_type_name of this ShowProjectWorkHoursResponseBodyWorkHours. 工时类型
def work_hours_type_name(self): return self._work_hours_type_name
[ "def work_hours_type_name(self, work_hours_type_name):\n self._work_hours_type_name = work_hours_type_name", "def work_hours_num(self):\n return self._work_hours_num", "def workpiece_type(self):\n return self._workpiece_type", "def work_hours_created_time(self):\n return self._work...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_type_name of this ShowProjectWorkHoursResponseBodyWorkHours. 工时类型
def work_hours_type_name(self, work_hours_type_name): self._work_hours_type_name = work_hours_type_name
[ "def work_hours_type_name(self):\n return self._work_hours_type_name", "def work_hours_num(self, work_hours_num):\n self._work_hours_num = work_hours_num", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def work_hours_created_time(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the issue_id of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项id
def issue_id(self): return self._issue_id
[ "def issue_id(self) -> str:\n return self._yaml[\"commit\"][\"issue_id\"]", "def find_issue_id(self):", "def workitem_id(self):\n return self._workitem_id", "def get_issue(self):\n issue_id = self.kwargs['issue_id']\n try:\n issue = Issue.objects.get(pk=issue_id)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the issue_id of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项id
def issue_id(self, issue_id): self._issue_id = issue_id
[ "def issue_id(self):\n return self._issue_id", "def issue_id(self) -> str:\n return self._yaml[\"commit\"][\"issue_id\"]", "def workitem_id(self, workitem_id):\n self._workitem_id = workitem_id", "def SetIssue(self, issue=None):\n assert self.GetBranch()\n if issue:\n issue = i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the issue_type of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项类型
def issue_type(self): return self._issue_type
[ "def get_issue_type(issue):\n\tissue_type = ''\n\tif 'issuetype' in issue['fields'] and issue['fields']['issuetype'] is not None:\n\t\tissue_type = issue['fields']['issuetype'].get('name', '')\n\treturn issue_type", "def work_hours_type_name(self):\n return self._work_hours_type_name", "def type(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the issue_type of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项类型
def issue_type(self, issue_type): self._issue_type = issue_type
[ "def issue_set_type(self, issue, type):", "def workpiece_type(self, workpiece_type):\n self._workpiece_type = workpiece_type", "def problem_type(self, problem_type):\n if problem_type is None:\n raise ValueError(\"Invalid value for `problem_type`, must not be `None`\") # noqa: E501\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the closed_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项结束时间
def closed_time(self): return self._closed_time
[ "def actual_work_hours(self):\n return self._actual_work_hours", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def end_time(self):\n return self._end_time", "def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the closed_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工作项结束时间
def closed_time(self, closed_time): self._closed_time = closed_time
[ "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hours", "def closed_time(self):\n return self._closed_time", "def end_date_time...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_created_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时创建时间
def work_hours_created_time(self): return self._work_hours_created_time
[ "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def created_time(self):\n return self._created_time", "def create_time(self):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_created_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时创建时间
def work_hours_created_time(self, work_hours_created_time): self._work_hours_created_time = work_hours_created_time
[ "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hour...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the work_hours_updated_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时更新时间
def work_hours_updated_time(self): return self._work_hours_updated_time
[ "def work_hours_updated_time(self, work_hours_updated_time):\n self._work_hours_updated_time = work_hours_updated_time", "def work_hours_created_time(self):\n return self._work_hours_created_time", "def updated_time(self):\n return self._updated_time", "def update_time(self):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the work_hours_updated_time of this ShowProjectWorkHoursResponseBodyWorkHours. 工时更新时间
def work_hours_updated_time(self, work_hours_updated_time): self._work_hours_updated_time = work_hours_updated_time
[ "def work_hours_updated_time(self):\n return self._work_hours_updated_time", "def work_hours_created_time(self, work_hours_created_time):\n self._work_hours_created_time = work_hours_created_time", "def actual_work_hours(self, actual_work_hours):\n self._actual_work_hours = actual_work_hour...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return default zoom setting.
def _defaultZoom(self): return (-1.0, 1.0, -1.0, 1.0)
[ "def _get_zoom(self) :\n \n # TODO : make it absolute zoom value : a zoom of 1 displays one data\n # pixel in one viewport pixel.\n \n return self._zoom", "def zoom(self) -> Zoom:\n return self.__zoom", "def zoom(self) -> float:\n return self._zoom", "def zoom(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Approximates root of this function using single iteration of Newton's method.
def newtonsMethod(self, x, a): return x - a * (self._f(x) / self._df(x))
[ "def inexact_newton(f,x0,delta = 1.0e-7, epsilon=1.0e-6, LOUD=False):\n x = x0\n if (LOUD):\n print(\"x0 =\",x0)\n iterations = 0\n while (np.fabs(f(x)) > epsilon):\n fx = f(x)\n fxdelta = f(x+delta)\n slope = (fxdelta - fx)/delta\n if (LOUD):\n print(\"x_\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the generated fractal into an RGB image array
def _toRgbImage(self, fractal, colors, color_offset): soln_real = adjustRange(fractal[0], 0, 127) soln_imag = adjustRange(fractal[1], 0, 127) iters = adjustRange(fractal[2], 0, 128) rgb_image = np.array([ soln_real + iters, soln_imag + iters, iters ] ).astype(dtype=np.uint8) return rgb_image.T
[ "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def rgb_image(self):\n z3 = self.z[:,:,newaxis]\n return z3 * self.c", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking return values for `start` and `end` when calling channel_messages for numbers not multiples of 50.
def test_channel_messages_unlimited_pagination(): clear() userOne = auth_register('firstuser@gmail.com', '123abc!@#', 'First', 'User') randChannel = channels_create(userOne['token'], 'randChannel', True) for _ in range(149): message_send(userOne['token'], randChannel['channel_id'], 'Hello') messages = channel_messages(userOne['token'], randChannel['channel_id'], 0) assert(messages['start'] == 0) assert(messages['end'] == 50) messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50) assert(messages2['start'] == 50) assert(messages2['end'] == 100) messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100) assert(messages3['start'] == 100) assert(messages3['end'] == -1) assert(len(messages3['messages']) == 49) # an error should be raised when start is beyond 149 messages with pytest.raises(InputError): channel_messages(userOne['token'], randChannel['channel_id'], 150)
[ "def channel_messages(token, channel_id, start):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n if is_user_channel_member(channel_id, curr_id) is False:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if the user is already in the channel, raise accesserror if they are
def test_channel_join_already_in_channel(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) with pytest.raises(AccessError): channel_join(user['token'], userchannel_id['channel_id'])
[ "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = chan...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
if the channel is private, but no invite is given to the user, then the owner of flockr can join the channel
def test_channel_join_private_owner(): clear() joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last') user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', False) channel_join(joiner['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' }, { 'u_id' : joiner['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
[ "async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if an inputerror is raised if attempting to add a user as an owner who is already an owner
def test_channel_addowner_already_an_owner(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(InputError): assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
[ "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_addowner_owner_flockr_not_member(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if removing an owner with an invalid user ID raises an inputerror
def test_channel_removeowner_invalid_user_id(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], "invalidemail@gmail.com")
[ "def verifyOwner(owner_id, this_user_id):\n if not (owner_id == this_user_id):\n raise ValueError(\n \"You don't have permission to edit this.\")", "def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if removing an owner without owner permissions raises an accesserror
def test_channel_removeowner_not_owner_permissions(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
[ "def test_cannot_remove_sole_owner(self):\n # Get the current sole owner for the project\n collaborator = self.project.collaborators.filter(role = Collaborator.Role.OWNER).first()\n # Authenticate as the owner from the discovered collaborator\n self.client.force_authenticate(user = colla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if able to remove an owner who is the last owner of the channel
def test_channel_removeowner_last_owner(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) #register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) # removing third user channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
[ "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if owner of the flockr who is not the channel owner can remove owner
def test_channel_removeowner_owner_flockr(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_removeowner_owner_flockr_not_member(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(AccessError): assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
[ "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets a system Hamiltonian to the Hubbard Hamiltonian. Does exactly this. If the system hamiltonian has some other terms on it, there are not touched. So be sure to use this function only in newly created `System` objects.
def set_hamiltonian(self, system): system.clear_hamiltonian() if 'bh' in system.left_block.operators.keys(): system.add_to_hamiltonian(left_block_op='bh') if 'bh' in system.right_block.operators.keys(): system.add_to_hamiltonian(right_block_op='bh') system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U)) # system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U) system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
[ "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n sys...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the block Hamiltonian to the Hubbard model block Hamiltonian.
def set_block_hamiltonian(self, tmp_matrix_for_bh, system): # If you have a block hamiltonian in your block, add it if 'bh' in system.growing_block.operators.keys(): system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id') system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U)) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U)) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
[ "def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_bloc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ExponentialFamily class initialization.
def test_exponential_family_init(): D = 4 N = 100 exp_fam = ExponentialFamily(D) assert exp_fam.D == D assert exp_fam.support_layer is None assert exp_fam.D_eta == D with raises(TypeError): exp_fam = ExponentialFamily('foo') with raises(ValueError): exp_fam = ExponentialFamily(0) with raises(TypeError): exp_fam = ExponentialFamily(4, int) with raises(NotImplementedError): exp_fam.sample_eta(N) mu = np.zeros((D,)) with raises(NotImplementedError): exp_fam.mu_to_eta(mu) eta = np.zeros((D,)) with raises(NotImplementedError): exp_fam.eta_to_mu(eta) z = np.zeros((D,)) with raises(NotImplementedError): exp_fam.T(z) return None
[ "def test_FGDA_init():\n FGDA(metric='riemann')", "def test_FgMDM_init():\n mdm = FgMDM(metric='riemann')", "def test_init(self):\n # Test simple initialization\n sm = self._mock_supermarket_instance()\n self.assertIs(type(sm),supermarket_register.SupermarketRegister)\n\n # Tes...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the printable length of the Entry's Text
def getTextLength(self): return 0
[ "def length(self, txt):\n return self.fontMetrics().width(self.__surrounding.format(txt))", "def __len__(self):\n return len(self.text)", "def width(self, text):\n return len(text) * (self.font_width + 1)", "def get_length(self) -> int:\n return sum([text.get_length() for text in self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to generate jitted lanczos function used in JaxBackend.eigsh_lanczos. The function `jax_lanczos` returned by this higherorder function has the following
def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable: @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6)) def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho): """ Jitted lanczos routine. Args: matvec: A callable implementing the matrix-vector product of a linear operator. arguments: Arguments to `matvec` additional to an input vector. `matvec` will be called as `matvec(init, *args)`. init: An initial input state to `matvec`. ncv: Number of krylov iterations (i.e. dimension of the Krylov space). neig: Number of eigenvalue-eigenvector pairs to be computed. landelta: Convergence parameter: if the norm of the current Lanczos vector falls below `landelta`, iteration is stopped. reortho: If `True`, reorthogonalize all krylov vectors at each step. This should be used if `neig>1`. Returns: jax.numpy.ndarray: Eigenvalues list: Eigenvectors """ def body_modified_gram_schmidt(i, vals): vector, krylov_vectors = vals v = krylov_vectors[i, :] vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors] def body_lanczos(vals): current_vector, krylov_vectors, vector_norms = vals[0:3] diagonal_elements, matvec, args, _ = vals[3:7] threshold, i, maxiteration = vals[7:] norm = jax.numpy.linalg.norm(current_vector) normalized_vector = current_vector / norm normalized_vector, krylov_vectors = jax.lax.cond( reortho, True, lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt, [normalized_vector, krylov_vectors]), False, lambda x: [normalized_vector, krylov_vectors]) Av = matvec(normalized_vector, *args) diag_element = jax.numpy.vdot(normalized_vector, Av) res = jax.numpy.reshape( jax.numpy.ravel(Av) - jax.numpy.ravel(normalized_vector) * diag_element - krylov_vectors[i - 1] * norm, Av.shape) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :], jax.numpy.ravel(normalized_vector)) vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1], norm) diagonal_elements = jax.ops.index_update(diagonal_elements, jax.ops.index[i - 1], diag_element) return [ res, krylov_vectors, vector_norms, diagonal_elements, matvec, args, norm, threshold, i + 1, maxiteration ] def cond_fun(vals): _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals def check_thresh(check_vals): val, thresh = check_vals return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x) return jax.lax.cond(iteration <= maxiteration, [norm, threshold], check_thresh, False, lambda x: x) numel = jax.numpy.prod(init.shape) krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype) norms = jax.numpy.zeros(ncv, dtype=init.dtype) diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype) norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0) norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0), dtype=init.dtype)).dtype initvals = [ init, krylov_vecs, norms, diag_elems, matvec, arguments, norms_dtype.type(1.0), landelta, 1, ncv ] output = jax.lax.while_loop(cond_fun, body_lanczos, initvals) final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :], jax.numpy.ravel(final_state)) A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag( norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1) eigvals, U = jax.numpy.linalg.eigh(A_tridiag) eigvals = eigvals.astype(A_tridiag.dtype) def body_vector(i, vals): krv, unitary, states = vals dim = unitary.shape[1] n, m = jax.numpy.divmod(i, dim) states = jax.ops.index_add(states, jax.ops.index[n, :], krv[m + 1, :] * unitary[m, n]) return [krv, unitary, states] state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype) _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1), body_vector, [krylov_vecs, U, state_vectors]) return jax.numpy.array(eigvals[0:neig]), [ jax.numpy.reshape(vectors[n, :], init.shape) / jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig) ] return jax_lanczos
[ "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to create a jitted arnoldi factorization. The function returns a function `_arnoldi_fact` which performs an mstep arnoldi factorization. `_arnoldi_fact` computes an mstep arnoldi factorization of an input callable `matvec`, with m = min(`it`,`num_krylov_vecs`). `_arnoldi_fact` will do at most `num_krylov_vecs` steps. `_arnoldi_fact` returns arrays `kv` and `H` which satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable: @jax.jit def modified_gram_schmidt_step_arnoldi(j, vals): """ Single step of a modified gram-schmidt orthogonalization. Args: j: Integer value denoting the vector to be orthogonalized. vals: A list of variables: `vector`: The current vector to be orthogonalized to all previous ones `krylov_vectors`: jax.array of collected krylov vectors `n`: integer denoting the column-position of the overlap <`krylov_vector`|`vector`> within `H`. Returns: updated vals. """ vector, krylov_vectors, n, H = vals v = krylov_vectors[j, :] h = jax.numpy.vdot(v, vector) H = jax.ops.index_update(H, jax.ops.index[j, n], h) vector = vector - h * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors, n, H] @functools.partial(jax.jit, static_argnums=(5, 6, 7)) def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): """ Compute an m-step arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm - Vm @ Hm - fm * em = 0 ``` with `matrix` the matrix representation of `matvec` and `Vm = jax.numpy.transpose(kv[:it, :])`, `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1) and `em` a cartesian basis vector of shape `(1, kv.shape[1])` with `em[0, -1] == 1` and 0 elsewhere. Note that the caller is responsible for dtype consistency between the inputs, i.e. dtypes between all input arrays have to match. Args: matvec: The matrix vector product. args: List of arguments to `matvec`. v0: Initial state to `matvec`. krylov_vectors: An array for storing the krylov vectors. The individual vectors are stored as columns. The shape of `krylov_vecs` has to be (num_krylov_vecs + 1, np.ravel(v0).shape[0]). H: Matrix of overlaps. The shape has to be (num_krylov_vecs + 1,num_krylov_vecs + 1). start: Integer denoting the start position where the first produced krylov_vector should be inserted into `krylov_vectors` num_krylov_vecs: Number of krylov iterations, should be identical to `krylov_vectors.shape[0] + 1` eps: Convergence parameter. Iteration is terminated if the norm of a krylov-vector falls below `eps`. Returns: kv: An array of krylov vectors H: A matrix of overlaps it: The number of performed iterations. """ Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps return _arnoldi_fact
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute an mstep arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps
[ "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization.
def implicitly_restarted_arnoldi_method( matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter, res_thresh) -> Tuple[List[Tensor], List[Tensor]]: N = np.prod(initial_state.shape) p = num_krylov_vecs - numeig num_krylov_vecs = np.min([num_krylov_vecs, N]) if (p <= 1) and (num_krylov_vecs < N): raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <" f" `num_krylov_vecs` <= N={N}," f" `num_krylov_vecs`={num_krylov_vecs}") dtype = initial_state.dtype # initialize arrays krylov_vectors = jax.numpy.zeros( (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]), dtype=dtype) H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype) # perform initial arnoldi factorization Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args, initial_state, krylov_vectors, H, 0, num_krylov_vecs, eps) # obtain an m-step arnoldi factorization Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits) it = 0 if which == 'LR': _which = 0 elif which == 'LM': _which = 1 else: raise ValueError(f"which = {which} not implemented") # make sure the dtypes are matching if maxiter > 0: if Vm.dtype == np.float64: dtype = np.complex128 elif Vm.dtype == np.float32: dtype = np.complex64 elif Vm.dtype == np.complex128: dtype = Vm.dtype elif Vm.dtype == np.complex64: dtype = Vm.dtype else: raise TypeError(f'dtype {Vm.dtype} not supported') Vm = Vm.astype(dtype) Hm = Hm.astype(dtype) fm = fm.astype(dtype) while (it < maxiter) and (not converged): evals, _ = jax.numpy.linalg.eig(Hm) krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig, p, _which, res_thresh) if converged: break v0 = jax.numpy.reshape(fk, initial_state.shape) # restart Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0, krylov_vectors, H, numeig, num_krylov_vecs, eps) Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs) it += 1 ev_, U_ = np.linalg.eig(np.array(Hm)) eigvals = jax.numpy.array(ev_) U = jax.numpy.array(U_) _, inds = LR_sort(eigvals, _which) vectors = get_vectors(Vm, U, inds, numeig) return eigvals[inds[0:numeig]], [ jax.numpy.reshape(vectors[n, :], initial_state.shape) for n in range(numeig) ]
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solve A x = b for x using the mrestarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(A_args) gmres_m solves Ax = b (1) where x and b are lengthn vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M).
def gmres_m(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x0: jax.ShapedArray, tol: float, atol: float, num_krylov_vectors: int, maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]: num_krylov_vectors = min(num_krylov_vectors, b.size) x = x0 b_norm = jnp.linalg.norm(b) tol = max(tol * b_norm, atol) for n_iter in range(maxiter): done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol, b_norm) if done: break return x, beta, n_iter, done
[ "def solve_gmres(matvec: Callable,\n b: Any,\n ridge: Optional[float] = None,\n tol: float = 1e-5,\n **kwargs) -> Any:\n if ridge is not None:\n matvec = _make_ridge_matvec(matvec, ridge=ridge)\n return jax.scipy.sparse.linalg.gmres(matvec, b, tol=tol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the residual vector r and its norm, beta, which is minimized by GMRES.
def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]: r = b - A_mv(x, *A_args) beta = jnp.linalg.norm(r) return r, beta
[ "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.A...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a single iteration of gmres_krylov. See that function for a more detailed description.
def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType: gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables tol, A_mv, A_args, b_norm, _ = gmres_constants V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol) R_col, givens = apply_givens_rotation(H[:, k], givens, k) R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:]) # Update the residual vector. cs, sn = givens[:, k] * beta_vec[k] beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn) err = jnp.abs(sn) / b_norm gmres_variables = (k + 1, V, R, beta_vec, err, givens) return (gmres_variables, gmres_constants)
[ "def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n num_krylov_vectors = min(num_krylov_vectors, b.size)\n x = x0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Successively applies each of the rotations stored in givens to H_col.
def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> jax.ShapedArray: rotation_carry = (H_col, 0, k, givens) def loop_condition(carry): i = carry[1] k = carry[2] return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0) def apply_ith_rotation(carry): H_col, i, k, givens = carry cs = givens[0, i] sn = givens[1, i] H_i = cs * H_col[i] - sn * H_col[i + 1] H_ip1 = sn * H_col[i] + cs * H_col[i + 1] H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i) H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1) return (H_col, i + 1, k, givens) rotation_carry = jax.lax.while_loop(loop_condition, apply_ith_rotation, rotation_carry) H_col = rotation_carry[0] return H_col
[ "def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors.
def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]: # This call successively applies each of the # Givens rotations stored in givens[:, :k] to H_col. H_col = apply_rotations(H_col, givens, k) cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1]) givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k) givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k) r_k = cs_k * H_col[k] - sn_k * H_col[k + 1] R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k) R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.) return R_col, givens
[ "def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if quote already exists in Nostalgiabot's memory for this Person.
def has_said(self, quote: str) -> bool: return any(q for q in self.quotes if q.content.lower() == quote.lower())
[ "def check_penseive_quote_exists(self, pitem):\n calais_quotes = pitem.quoteitem_set.all().filter(entity__type__source='C')\n if calais_quotes.count() > 0:\n return True\n return False", "def _is_term_exist(self, term):\r\n return term in self.postingDict", "def _is_term_e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the stay time of visit
def update_stay_time(self): # It would not be better to simply self.stay_time = self.get_length() ?? self.stay_time = self.get_length()
[ "def _update_time(self):\n self.prev_time = time.time()", "def _RecordVisitTime(self, mr, now=None):\n now = now or int(time.time())\n if not settings.read_only and mr.auth.user_id:\n user_pb = mr.auth.user_pb\n if (user_pb.last_visit_timestamp <\n now - framework_constants.VISIT_R...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeats a message multiple times.
async def repeat(self,ctx, times: int, content='repeating...'): for i in range(times): await ctx.send(content)
[ "async def repeat(ctx, times: int, *, message):\n for i in range(times):\n await ctx.send(message)", "async def repeat(times : int, content='repeating...'):\r\n for i in range(times):\r\n await bot.say(content)", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create data that use Choice model
def create_choices(question_model, text="text", total_votes = 0): return Choice.objects.create(question=question_model, text=text, total_votes=total_votes)
[ "def test_choices_can_be_added():\n decision = Decision()\n decision.add_choice(\"Truck\")\n assert decision.choices == {\"Truck\": []}\n decision.add_choice(\"Van\")\n assert decision.choices == {\"Truck\": [], \"Van\": []}", "def create_choice(question, choice_text, votes=0):\n return question...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create data that use Answer model
def create_answer(question, user): return Answer.objects.create(question=question,answered_by=user)
[ "def __create_answering_data(self, answer):\n return {\"is_same\": answer, \"is_skipped\": False, \"_input_value_names\": \"\"}", "def create_answer(self, answer_form):\n return # osid.assessment.Answer", "def generate_questions(self):", "def create_quiz():\n user = User.objects.create(userna...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
same as create_user but using user manager
def create_user_using_manager(username,password): manager = UserManager() return manager.create_user(username=username, password=password)
[ "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_normal_user(self, **kwargs):\n return self.UserModel.objects.create_user(\n **kwargs\n )", "def create_user(user):\n create_edx_user(user)\n create_edx_auth_token(user)",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
populate question object with random string and user
def populate_poll(user="",total=10): user_list = None #create random user only when user argument empty if user == "": create_random_user(20) user_list = User.objects.all() for i in range(total): Question.objects.create( created_by=random.choice(user_list) if user_list is not None else user, title=create_random_string(seed_random(10)), text=create_random_string(seed_random(300)), slug=create_random_string(seed_random(100)) )
[ "def create_random_question(username):\n global questions\n user = users[username]\n available_questions_ids = list(questions.keys() - user[\"questions_asked\"])\n if len(questions) == 0:\n return None\n question_id = random.choice(available_questions_ids)\n question = questions[question_id...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create CreatePollQuestion dummy form
def create_dummy_form(title,text,fill_choice=[],choice_length=[]): # fill it with blank for dummy choices count=0 choices=[] while count < 8: choices.append(None) count+=1 # fill choices based on value on fill_choice for i in fill_choice: try : length = choice_length[i] except IndexError : length = 10 choices[i] = create_random_string(length) dummy_form=CreatePollQuestion( {"question_title":title, "question_text" :text, "choice_1":choices[0], "choice_2":choices[1], "choice_3":choices[2], "choice_4":choices[3], "choice_5":choices[4], "choice_6":choices[5], "choice_7":choices[6], "choice_8":choices[7], }) return dummy_form
[ "def create_question(self, question_form):\n return # osid.assessment.Question", "def testPollAddQuestion(self):\n pass", "def __init__(self, radio_poll, *args, **kwargs):\n super(RadioPollChoiceForm, self).__init__(*args, **kwargs)\n choices = (((None, '----'),) +\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts an array with WCS to altitude and azimuth coordinates
def getAltAz(arr,header,time,location): soln = wcs.WCS(header) coords = cartesian([arange(arr.shape[1]),arange(arr.shape[0])]) world = soln.wcs_pix2world(coords,0) radec = SkyCoord(ra=world[:,0],dec=world[:,1],frame='icrs',unit='deg') altaz = radec.transform_to(AltAz(obstime=time,location=telescope)) return altaz.alt.deg,altaz.az.deg,coords[:,0],coords[:,1]
[ "def getAltAzArray(ra, dec, timeArr):\n altitude = []\n azimuth = []\n for tt in timeArr:\n alt, az = findAltAz(ra, dec, jdT = tt) \n altitude.append(alt)\n azimuth.append(az)\n return np.array(altitude), np.array(azimuth)", "def AltAziConv(self): # Originally in CORRECT.PAS\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rotates the ADP of 'atom' to match the orientation of 'source_atom.
def rotate_3D(atom, source_atom): from lauescript.cryst.match import get_transform lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]] lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]] matrix = get_transform(lst1, lst2, matrix=True) adp = source_atom.adp['cart_int'] atom.adp['cart_int'] = rotate_adp(adp, matrix)
[ "def orient_to_source(self):\n self.heading = self.start_horizontal.az - (90 * degree)\n self.heading = self.heading.to('degree')", "def assign_rotating_atoms(atom1,atom2,atoms): \n atomsToRotate = [atom2]\n for atom in atomsToRotate:\n atom.rotate = True\n for connectedAtom...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the measured ADP from the xd.res file. The parameters are stored in atom.adp['frac_meas'] and atom.adp['cart_meas']
def read_meas_adp(data, path='xd.res', use='meas'): use2 = 'frac_' + use switch = False filepointer = open(path, 'r') atomname = None for line in filepointer: if switch: split = [i for i in line.split(' ') if len(i) > 0] if not len(split) == 6: print('WARNING!!! Inconsistend number of floats while\ reading measured ADP.') data['exp'][atomname].adp[use2] = split switch = False if '(' in line: split = [i for i in line.split(' ') if len(i) > 0] if split[0][-1] == ')': switch = True atomname = split[0] use = 'cart_' + use for atom in data['exp'].atoms: # if use == 'cart_neut': print(atom) atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.frac2cartmatrix, atom.molecule.cell) return data
[ "def test_readPDAS(self):\n st = readPDAS(self.testfile)\n self.assertTrue(isinstance(st, Stream))\n self.assertTrue(len(st) == 1)\n tr = st[0]\n expected = [('COMMENT', 'GAINRANGED'),\n ('DATASET', 'P1246001108'),\n ('FILE_TYPE', 'LONG'),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the ADP after reflection on the plane defined by its normal vector 'planev'.
def reflect_adp(adp, planev): M = np.identity(4) M[:3, :3] -= 2.0 * np.outer(planev, planev) M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev return rotate_adp(adp, M[:3, :3])
[ "def GetPlane(plane):\r\n pass", "def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates an ADP in its matrix representation from the three principle axis representing the displacement ellipsoid. The three principle axis of the ellipsoid are needed as arguments. A Matrix representation of the ADP is returned.
def get_adp_from_calc(vx, vy, vz): ## lx=np.linalg.norm(vx) ## ly=np.linalg.norm(vy) ## lz=np.linalg.norm(vz) lx = vx ly = vy lz = vz L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]]) ## Vx=vx/lx ## Vy=vy/ly ## Vz=vz/lz Vx = np.array([1, 0, 0]) Vy = np.array([0, 1, 0]) Vz = np.array([0, 0, 1]) V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy[2], Vz[2]]]) Vinv = np.linalg.inv(V) #print V,Vinv M = np.dot(np.dot(Vinv, L), V) #print M return M
[ "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines the the quaternion representing the best possible transformation of two coordinate systems into each other using a least sqare approach. This function is used by the get_refined_rotation() function.
def get_best_quaternion(coordlist1, coordlist2): M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) if len(coordlist1) <= len(coordlist2): number = len(coordlist1) else: number = len(coordlist2) for i in xrange(number): aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i])) M = M + aaa N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2]) N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2]) N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2]) N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2]) N12 = float(M[1][:, 2] - M[2][:, 1]) N13 = float(M[2][:, 0] - M[0][:, 2]) N14 = float(M[0][:, 1] - M[1][:, 0]) N21 = float(N12) N23 = float(M[0][:, 1] + M[1][:, 0]) N24 = float(M[2][:, 0] + M[0][:, 2]) N31 = float(N13) N32 = float(N23) N34 = float(M[1][:, 2] + M[2][:, 1]) N41 = float(N14) N42 = float(N24) N43 = float(N34) N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32, N33, N34], [N41, N42, N43, N44]]) values, vectors = np.linalg.eig(N) w = list(values) quat = vectors[:, w.index(max(w))] quat = np.array(quat).reshape(-1, ).tolist() return quat, max(w)
[ "def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function.
def get_rotation_matrix_from_quaternion(q): R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] * q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]]) return R
[ "def quaternion_to_rotation_matrix(q):\n\n # Original C++ method ('SetQuaternionRotation()') is defined in\n # pba/src/pba/DataInterface.h.\n # Parallel bundle adjustment (pba) code (used by visualsfm) is provided\n # here: http://grail.cs.washington.edu/projects/mcba/\n qq = math...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the geometrical center of a set of points.
def get_geom_center(coordlist): return sum(coordlist) / len(coordlist)
[ "def get_center(points):\r\n tot_x = 0\r\n tot_y = 0\r\n for point in points:\r\n tot_x += point[0]\r\n tot_y += point[1]\r\n\r\n x = int(tot_x / len(points))\r\n y = int(tot_y / len(points))\r\n return x, y", "def center(self):\n center_lat = (max((x[0] for x in self._point...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves the geometrical center of the atoms in atomlist to the given point.
def move_center_to_point(atomlist, point): for atom in range(len(atomlist)): atomlist[atom] = atomlist[atom] - point return atomlist
[ "def recenter(self, point=(0, 0)):\n self.center = Point(*point)", "def centerOn(self, point):\n rect = self.rect()\n x = point.x() - rect.width() / 2.0\n y = point.y() - rect.height() / 2.0\n \n self.setPos(x, y)", "def set_frame_center(self, point):\n self.fram...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rotates the adp with its corresponding rotation matrix.
def rotate_adp(adp, rotmat): adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmatT = np.transpose(rotmat) adp = np.dot(rotmatT, adp) adp = np.dot(adp, rotmat) # print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n' adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]
[ "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the normal vector of a plane defined by the points p1,p2 and p3.
def get_normal_vector_of_plane(p1, p2, p3): v12 = np.array(p1) - np.array(p2) v13 = np.array(p1) - np.array(p3) nvec = np.cross(v12, v13) ## print 'norm: '+str(np.linalg.norm(nvec)) return nvec / np.linalg.norm(nvec)
[ "def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()", "def normal(plane):\n return plane[:3].copy()", "def surface_normal(points, normalize=True):\n p1 = points[..., 0, :]\n p2 = points[..., 1, :]\n p3 = points[..., 2, :]\n normal = np.cr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list where every element is a list of three atomnames. The second and third names are the closest neighbours of the first names. The argument is a list as returned by frac_to_cart and the number of neighbours to be returned.
def get_closest_neighbours(atomlist, neighbours=2): print('atomlist', atomlist) neighbourlist = [] for atom in atomlist: listline = [atom[0][0]] dists = [] distsc = [] for partner in atomlist: dists.append(np.linalg.norm(atom[1] - partner[1])) distsc.append(np.linalg.norm(atom[1] - partner[1])) dists.remove(min(dists)) for _ in range(neighbours): if min(dists) < 2.5: listline.append(atomlist[distsc.index(min(dists))][0][0]) dists.remove(min(dists)) #listline.append(atomlist[distsc.index(min(dists))][0][0]) neighbourlist.append(listline) return neighbourlist
[ "def get_neighbors(a):\n\t\t\tif a == 0: neighbors = [a+1]\n\t\t\telif a == N-1: neighbors = [a-1]\n\t\t\telse: neighbors = [a-1, a+1]\n\t\t\treturn neighbors", "def find_C_with_N_terminals(atoms):\n CNH3_list = []\n for _ in range(len(atoms)):\n name = atoms[_].get_atom_name()\n if name != 'C...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates for every atom the distances to all other atoms in atomlist. Returns a list where every element is a list of all distances.
def calculate_distance_matrix(atomlist): distlist = [] for atom in atomlist: atomdict = {} for partner in atomlist: if not str(int(partner[0][1])) in atomdict.keys(): atomdict[str(int(partner[0][1]))] = [] atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) else: atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) atomdict[str(int(partner[0][1]))].sort() distlist.append(atomdict) return distlist
[ "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.
def read_multiple_coordinates(fragmentnames): fragdict = {} for name in fragmentnames: path = name + '/' cell, pos = read_coordinates(path) atomlist = frac_to_cart(cell, pos) atomdict = {} for atom in atomlist: atomdict[atom[0][0]] = atom[1] fragdict[name] = atomlist return fragdict
[ "def load_info(self):\n info = dict()\n\n coordinates = {\n 'coordinates': open(self.temp_prefix, 'r'),\n }\n\n nb_files = {\n 'np': open(self.temp_prefix + 'np.vol', 'r'),\n 'fp': open(self.temp_prefix + 'pxpy.vol', 'r'),\n 'px': open(self.tem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the compound name and the cell parameters from a xd.mas style file specified by 'path'.
def read_xd_master_file(path, errorpointer): filepointer = open(path, 'r') for line in filepointer.readlines(): if 'TITLE' in line: compound_name = line.partition('!')[2].lstrip().rstrip() if 'CELL' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break filepointer.close() try: return compound_name, cell except: errorpointer.write(path + '\n') return None, None
[ "def parse_stellar_parameters(path):\n\n basename = os.path.basename(path).split(\"_\")[-1]\n parent_folder = path.split(\"/\")[-2]\n \n teff, logg, mh = (float(each) for each in \\\n (basename[1:5], basename[6:10], basename[11:16].rstrip(\"x\")))\n alpha_mh = 0.4 if \"alpha04\" in parent_fol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads the cell parameters from a 'xd.mas' file and the atomic positions from a 'xd.res' file. The function returns a list with the cell parameters and an dictionary which keys the atom name to its fractional coordinates.
def read_coordinates(path='', sort=True): maspointer = open(path + 'xd.mas', 'r') respointer = open(path + 'xd.res', 'r') positions = {} keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function. for line in maspointer.readlines(): if 'CELL ' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break for line in respointer.readlines(): if '(' in line and not '!' in line: coords = [float(i) for i in line.split(" ") if '.' in i] coords = coords[:-1] key = line.split(" ")[0] keylist.append(key) positions[key] = coords if sort: sortkeylist = [] for i in xrange(len(keylist)): j = i + 1 for key in keylist: number = get_number(key) if j == int(number): sortkeylist.append(key) else: sortkeylist = keylist return cell, positions, sortkeylist
[ "def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number in the brackets of an atomname.
def get_number(atomname): switch = False number = '' for char in atomname: if char == ')': switch = False if switch: number += char if char == '(': switch = True return number
[ "def int_atom(atom):\n global __ATOM_LIST__\n #print(atom)\n atom = atom.lower()\n return __ATOM_LIST__.index(atom) + 1", "def get_number(name):\n try:\n num = int(re.findall(\"[0-9]+\", name)[0])\n except:\n num = -1\n return num", "def get_tag_number(element):\n reg =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and init a conv1d layer with spectral normalization
def _conv1d_spect(ni, no, ks=1, stride=1, padding=0, bias=False): conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) nn.init.kaiming_normal_(conv.weight) if bias: conv.bias.data.zero_() return spectral_norm(conv)
[ "def __conv_initialize(layer: nn.Module) -> None:\n if type(layer) == nn.Conv2d:\n nn.init.kaiming_normal_(layer.weight)", "def conv_init(conv, act='linear'):\r\n n = conv.kernel_size[0] * conv.kernel_size[1] * conv.out_channels\r\n conv.weight.data.normal_(0, math.sqrt(2. / n))", "def t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function that returns dedicated directory for Post media. This organizes user uploaded Post content and is used by `ministry.models.Post.attachment` to save uploaded content. Arguments =========
def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT): if instance.ministry: _ministry = instance.ministry elif instance.campaign: _ministry = instance.campaign.ministry else: e = 'There was an unknown error finding a dir for %s' % instance.title raise AttributeError(e) return path.join(generic_media_dir(_ministry, prepend=prepend), 'post_media', filename)
[ "def upload(self, post):\n # TODO: handle filename conflicts\n directory = \".\".join(self.filename.split(\".\")[:-1])\n\n self.abspath = os.path.join(self.root_dir, directory)\n self.localpath = os.path.join(\"/static/gallery\", directory)\n if not os.path.exists(self.abspath):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function that creates a dedicated directory for Post media. Arguments =========
def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT): for _ in (post_media_dir,): _path = path.split(_(instance, "", prepend=prepend))[0] try: mkdir(_path) except FileExistsError: pass except FileNotFoundError: if instance.ministry: _ministry = instance.ministry elif instance.campaign: _campaign = instance.campaign _ministry = _campaign.ministry else: e = 'There was an unknown error finding a dir for %s' % instance.name raise AttributeError(e) # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory create_news_post_dir(instance, prepend=prepend)
[ "def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT):\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _ministry = instance.campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.title\n rai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decrypts input ciphertext using a symmetric CryptoKey.
def decrypt_symmetric(self, ciphertext): from google.cloud import kms_v1 # Creates an API client for the KMS API. client = kms_v1.KeyManagementServiceClient() # The resource name of the CryptoKey. name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id, self.crypto_key_id) # Use the KMS API to decrypt the data. response = client.decrypt(name, ciphertext) return response.plaintext
[ "def decrypt_symmetric(secret_key, ciphertext, ttl=None):\n f = Fernet(secret_key)\n # fernet requires the ciphertext to be bytes, it will raise an exception\n # if it is a string\n return f.decrypt(bytes(ciphertext), ttl)", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that decrypts a file using the decrypt_symmetric method and writes the output of this decryption to a file named gcpkey.json
def decrypt_from_file(self, file_path): # open and decrypt byte file f = open(file_path, "rb").read() decrypted = self.decrypt_symmetric(f) json_string = decrypted.decode("utf-8") # write string to json file destination_file_name = Path("downloaded-key/gcp-key.json") destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist destination_file_name.write_text(json_string)
[ "def decrypt_symmetric(filename, password):\n built = read_bytes_in_image(filename)\n key = keygen(password)\n return read_stream(key, built)", "def decrypt(directory, keyfile):\n credentials = service_account.Credentials.from_service_account_file(keyfile)\n # Creates an API client for the KMS API....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads key for configured service account and stores it in the folder generatedkey/
def download_key_from_blob(self): source_blob_name = "generated-keys/{}".format(self.service_account_email) destination_name = self.service_account_email # generate destination folder and file if they do not yet exist Path("downloaded-key/").mkdir(parents=True, exist_ok=True) # creates folder if not exists folder = Path("downloaded-key/") # folder where all the newly generated keys go destination_file_name = folder / "{}".format(destination_name) # file named after service-account name destination_file_name.touch(exist_ok=True) # download the file and store it locally storage_client = storage.Client() bucket = storage_client.get_bucket(self.bucket_name) blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name) # prints source and destination indicating successful download print('Encrypted key downloaded to -----> \n {}.'.format( source_blob_name, destination_file_name)) return destination_file_name
[ "def download_default_key_pair():\n pass", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "def downloadauthkey():\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate overlap among trajectories
def trajectory_overlap(gt_trajs, pred_traj): max_overlap = 0 max_index = 0 for t, gt_traj in enumerate(gt_trajs): s_viou = viou_sx(gt_traj['sub_traj'], gt_traj['duration'], pred_traj['sub_traj'], pred_traj['duration']) o_viou = viou_sx(gt_traj['obj_traj'], gt_traj['duration'], pred_traj['obj_traj'], pred_traj['duration']) so_viou = min(s_viou, o_viou) if so_viou > max_overlap: max_overlap = so_viou max_index = t return max_overlap, max_index
[ "def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)", "def poverlap(t1, t2, size1, size2):\n x0 = t1[0]\n y0 = t1[1]\n x1 = t1[0] + size1[0]\n y1 = t1[1] + size1[1]\n\n x2 = t2[0]\n y2 = t2[1]\n x3 = t2[0] + size2[0]\n y3 = t2[1] + size2[1]\n \n o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes a single string to a list of strings.
def decode (self, s): if s == "null": return [] return s.split(chr(257))
[ "def decode(self, s):\n lststr = s.split(',')\n if s=='': return []\n rst = []\n for i in range(len(lststr)):\n rst.append(lststr[i])\n return rst", "def decode(self, s: str) -> [str]:", "def convert2list(string):\n if isinstance(string, list):\n return st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sort list of objects randomly then update everything in this world
def update(self, dt): random.shuffle(self.gameObjects) for item in self.gameObjects: description = item.update(dt)
[ "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def sort(self):\n self.red_bucket = (140, 0)\n self.green_bucket = (230, 0)\n self.blue_bucket = (-230, 0)\n self.rest();\n while l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add this to the world
def add_to_world(self, thing): thing.set_world_info(self.current_id, self) self.gameObjects.append(thing) self.current_id += 1
[ "def create_the_world(cls):\n from muddery.server.mappings.element_set import ELEMENT\n world = ELEMENT(\"WORLD\")()\n world.setup_element(\"\")\n cls._world_data = world", "def create_world(self, parent):\n raise NotImplementedError", "def world(self, value):\n self.wo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge data from an apdex metric object.
def merge_apdex_metric(self, metric): self[0] += metric.satisfying self[1] += metric.tolerating self[2] += metric.frustrating self[3] = ((self[0] or self[1] or self[2]) and min(self[3], metric.apdex_t) or metric.apdex_t) self[4] = max(self[4], metric.apdex_t)
[ "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge data from a time metric object.
def merge_time_metric(self, metric): self.merge_raw_time_metric(metric.duration, metric.exclusive)
[ "def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"star...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record a single value metric, merging the data with any data from prior value metrics with the same name.
def record_custom_metric(self, name, value): if isinstance(value, dict): if len(value) == 1 and 'count' in value: new_stats = CountStats(call_count=value['count']) else: new_stats = TimeStats(*c2t(**value)) else: new_stats = TimeStats(1, value, value, value, value, value**2) stats = self.__stats_table.get(name) if stats is None: self.__stats_table[name] = new_stats else: stats.merge_stats(new_stats)
[ "def _add_single_metric(self, timestamp, metric_name, value):\n # note that this method is built this way to make it possible to\n # support live-refreshing charts in Bokeh at some point in the future.\n self._data[\"timestamp\"].append(timestamp)\n self._data[\"metric_name\"].append(met...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an iterator over the set of value metrics. The items returned are a tuple consisting of the metric name and accumulated stats for the metric.
def metrics(self): return six.iteritems(self.__stats_table)
[ "def values(self):\r\n return MetricaValues(self)", "def values(self) -> tuple:\n return tuple(self.__metrics__.values())", "def values(self):\n if not self.is_value:\n raise GrizzlyError(\"GrizzlySeries is not evaluated and does not have values. Try calling 'evaluate()' first.\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge data from a slow sql node object.
def merge_slow_sql_node(self, node): duration = node.duration self[1] += duration self[2] = self[0] and min(self[2], duration) or duration self[3] = max(self[3], duration) if self[3] == duration: self[4] = node # Must update the call count last as update of the # minimum call time is dependent on initial value. self[0] += 1
[ "def concatenate_data():", "def merge(self, obj, **kwargs):\r\n raise NotImplementedError\r\n # if type(obj) == StreamFork:\r\n # node = obj.node\r\n # else:\r\n # node = obj\r\n #\r\n # self.stream.append(node)\r\n #\r\n # merge = MergeNode(*...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a count of the number of unique metrics currently recorded for apdex, time and value metrics.
def metrics_count(self): return len(self.__stats_table)
[ "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def count_metrics(self):\n\n return set(self._counts.ke...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record a single apdex metric, merging the data with any data from prior apdex metrics with the same name.
def record_apdex_metric(self, metric): if not self.__settings: return # Note that because we are using a scope here of an empty string # we can potentially clash with an unscoped metric. Using None, # although it may help to keep them separate in the agent will # not make a difference to the data collector which treats None # as an empty string anyway. key = (metric.name, '') stats = self.__stats_table.get(key) if stats is None: stats = ApdexStats(apdex_t=metric.apdex_t) self.__stats_table[key] = stats stats.merge_apdex_metric(metric) return key
[ "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] +...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record the apdex metrics supplied by the iterable for a single transaction, merging the data with any data from prior apdex metrics with the same name.
def record_apdex_metrics(self, metrics): if not self.__settings: return for metric in metrics: self.record_apdex_metric(metric)
[ "def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record a single time metric, merging the data with any data from prior time metrics with the same name and scope.
def record_time_metric(self, metric): if not self.__settings: return # Scope is forced to be empty string if None as # scope of None is reserved for apdex metrics. key = (metric.name, metric.scope or '') stats = self.__stats_table.get(key) if stats is None: stats = TimeStats(call_count=1, total_call_time=metric.duration, total_exclusive_call_time=metric.exclusive, min_call_time=metric.duration, max_call_time=metric.duration, sum_of_squares=metric.duration ** 2) self.__stats_table[key] = stats else: stats.merge_time_metric(metric) return key
[ "def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"star...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record the time metrics supplied by the iterable for a single transaction, merging the data with any data from prior time metrics with the same name and scope.
def record_time_metrics(self, metrics): if not self.__settings: return for metric in metrics: self.record_time_metric(metric)
[ "def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if transaction is the slowest transaction and update accordingly.
def _update_slow_transaction(self, transaction): slowest = 0 name = transaction.path if self.__slow_transaction: slowest = self.__slow_transaction.duration if name in self.__slow_transaction_map: slowest = max(self.__slow_transaction_map[name], slowest) if transaction.duration > slowest: # We are going to replace the prior slow transaction. # We need to be a bit tricky here. If we are overriding # an existing slow transaction for a different name, # then we need to restore in the transaction map what # the previous slowest duration was for that, or remove # it if there wasn't one. This is so we do not incorrectly # suppress it given that it was never actually reported # as the slowest transaction. if self.__slow_transaction: if self.__slow_transaction.path != name: if self.__slow_transaction_old_duration: self.__slow_transaction_map[ self.__slow_transaction.path] = ( self.__slow_transaction_old_duration) else: del self.__slow_transaction_map[ self.__slow_transaction.path] if name in self.__slow_transaction_map: self.__slow_transaction_old_duration = ( self.__slow_transaction_map[name]) else: self.__slow_transaction_old_duration = None self.__slow_transaction = transaction self.__slow_transaction_map[name] = transaction.duration
[ "def LastTransaction(self) -> bool:", "def merge_slow_sql_node(self, node):\n\n duration = node.duration\n\n self[1] += duration\n self[2] = self[0] and min(self[2], duration) or duration\n self[3] = max(self[3], duration)\n\n if self[3] == duration:\n self[4] = node\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if transaction is a synthetics trace and save it to __synthetics_transactions.
def _update_synthetics_transaction(self, transaction): settings = self.__settings if not transaction.synthetics_resource_id: return maximum = settings.agent_limits.synthetics_transactions if len(self.__synthetics_transactions) < maximum: self.__synthetics_transactions.append(transaction)
[ "def isSStx(tx):\n try:\n checkSStx(tx)\n\n except Exception as e:\n log.debug(\"isSStx: {}\".format(e))\n\n else:\n return True", "def save(self, trade: Trade) -> Trade:\n\n pass # pragma: no cover", "def isTx(self):\n\t\treturn self.extension == '.tx'", "def trace2Snaps...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record any apdex and time metrics for the transaction as well as any errors which occurred for the transaction. If the transaction qualifies to become the slow transaction remember it for later.
def record_transaction(self, transaction): if not self.__settings: return settings = self.__settings # Record the apdex, value and time metrics generated from the # transaction. Whether time metrics are reported as distinct # metrics or into a rollup is in part controlled via settings # for minimum number of unique metrics to be reported and thence # whether over a time threshold calculated as percentage of # overall request time, up to a maximum number of unique # metrics. This is intended to limit how many metrics are # reported for each transaction and try and cut down on an # explosion of unique metric names. The limits and thresholds # are applied after the metrics are reverse sorted based on # exclusive times for each metric. This ensures that the metrics # with greatest exclusive time are retained over those with # lesser time. Such metrics get reported into the performance # breakdown tab for specific web transactions. self.record_apdex_metrics(transaction.apdex_metrics(self)) self.merge_custom_metrics(transaction.custom_metrics.metrics()) self.record_time_metrics(transaction.time_metrics(self)) # Capture any errors if error collection is enabled. # Only retain maximum number allowed per harvest. error_collector = settings.error_collector if (error_collector.enabled and settings.collect_errors and len(self.__transaction_errors) < settings.agent_limits.errors_per_harvest): self.__transaction_errors.extend(transaction.error_details()) self.__transaction_errors = self.__transaction_errors[: settings.agent_limits.errors_per_harvest] if (error_collector.capture_events and error_collector.enabled and settings.collect_error_events): events = transaction.error_events(self.__stats_table) for event in events: self._error_events.add(event, priority=transaction.priority) # Capture any sql traces if transaction tracer enabled. if settings.slow_sql.enabled and settings.collect_traces: for node in transaction.slow_sql_nodes(self): self.record_slow_sql_node(node) # Remember as slowest transaction if transaction tracer # is enabled, it is over the threshold and slower than # any existing transaction seen for this period and in # the historical snapshot of slow transactions, plus # recording of transaction trace for this transaction # has not been suppressed. transaction_tracer = settings.transaction_tracer if (not transaction.suppress_transaction_trace and transaction_tracer.enabled and settings.collect_traces): # Transactions saved for Synthetics transactions # do not depend on the transaction threshold. self._update_synthetics_transaction(transaction) threshold = transaction_tracer.transaction_threshold if threshold is None: threshold = transaction.apdex_t * 4 if transaction.duration >= threshold: self._update_slow_transaction(transaction) # Create the transaction event and add it to the # appropriate "bucket." Synthetic requests are saved in one, # while transactions from regular requests are saved in another. if transaction.synthetics_resource_id: event = transaction.transaction_event(self.__stats_table) self._synthetics_events.add(event) elif (settings.collect_analytics_events and settings.transaction_events.enabled): event = transaction.transaction_event(self.__stats_table) self._transaction_events.add(event, priority=transaction.priority) # Merge in custom events if (settings.collect_custom_events and settings.custom_insights_events.enabled): self.custom_events.merge(transaction.custom_events) # Merge in span events if (settings.distributed_tracing.enabled and settings.span_events.enabled and settings.collect_span_events): if settings.infinite_tracing.enabled: for event in transaction.span_protos(settings): self._span_stream.put(event) elif transaction.sampled: for event in transaction.span_events(self.__settings): self._span_events.add(event, priority=transaction.priority)
[ "def _update_slow_transaction(self, transaction):\n\n slowest = 0\n name = transaction.path\n\n if self.__slow_transaction:\n slowest = self.__slow_transaction.duration\n if name in self.__slow_transaction_map:\n slowest = max(self.__slow_transaction_map[name], slow...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a count of the number of unique metrics.
def metric_data_count(self): if not self.__settings: return 0 return len(self.__stats_table)
[ "def count_metrics(self):\n\n return set(self._counts.keys())", "def metrics_count(self):\n\n return len(self.__stats_table)", "def getUnseenCount():", "def unique_counts(self) -> Self:\n return self._from_pyexpr(self._pyexpr.unique_counts())", "def get_number_of_metrics(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of slow transaction data collected during the reporting period.
def transaction_trace_data(self, connections): _logger.debug('Generating transaction trace data.') if not self.__settings: return [] # Create a set 'traces' that is a union of slow transaction, # and Synthetics transactions. This ensures we don't send # duplicates of a transaction. traces = set() if self.__slow_transaction: traces.add(self.__slow_transaction) traces.update(self.__synthetics_transactions) # Return an empty list if no transactions were captured. if not traces: return [] # We want to limit the number of explain plans we do across # these. So work out what were the slowest and tag them. # Later the explain plan will only be run on those which are # tagged. agent_limits = self.__settings.agent_limits explain_plan_limit = agent_limits.sql_explain_plans_per_harvest maximum_nodes = agent_limits.transaction_traces_nodes database_nodes = [] if explain_plan_limit != 0: for trace in traces: for node in trace.slow_sql: # Make sure we clear any flag for explain plans on # the nodes in case a transaction trace was merged # in from previous harvest period. node.generate_explain_plan = False # Node should be excluded if not for an operation # that we can't do an explain plan on. Also should # not be one which would not be included in the # transaction trace because limit was reached. if (node.node_count < maximum_nodes and node.connect_params and node.statement.operation in node.statement.database.explain_stmts): database_nodes.append(node) database_nodes = sorted(database_nodes, key=lambda x: x.duration)[-explain_plan_limit:] for node in database_nodes: node.generate_explain_plan = True else: for trace in traces: for node in trace.slow_sql: node.generate_explain_plan = True database_nodes.append(node) # Now generate the transaction traces. We need to cap the # number of nodes capture to the specified limit. trace_data = [] for trace in traces: transaction_trace = trace.transaction_trace( self, maximum_nodes, connections) data = [transaction_trace, list(trace.string_table.values())] if self.__settings.debug.log_transaction_trace_payload: _logger.debug('Encoding slow transaction data where ' 'payload=%r.', data) json_data = json_encode(data) level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION zlib_data = zlib.compress(six.b(json_data), level) pack_data = base64.standard_b64encode(zlib_data) if six.PY3: pack_data = pack_data.decode('Latin-1') root = transaction_trace.root if trace.record_tt: force_persist = True else: force_persist = False if trace.include_transaction_trace_request_uri: request_uri = trace.request_uri else: request_uri = None trace_data.append([transaction_trace.start_time, root.end_time - root.start_time, trace.path, request_uri, pack_data, trace.guid, None, force_persist, None, trace.synthetics_resource_id, ]) return trace_data
[ "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__setti...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list containing any slow transaction data collected during the reporting period. NOTE Currently only the slowest transaction for the reporting period is retained.
def slow_transaction_data(self): # XXX This method no longer appears to be used. Being replaced # by the transaction_trace_data() method. if not self.__settings: return [] if not self.__slow_transaction: return [] maximum = self.__settings.agent_limits.transaction_traces_nodes transaction_trace = self.__slow_transaction.transaction_trace( self, maximum) data = [transaction_trace, list(self.__slow_transaction.string_table.values())] if self.__settings.debug.log_transaction_trace_payload: _logger.debug('Encoding slow transaction data where ' 'payload=%r.', data) json_data = json_encode(data) level = self.__settings.agent_limits.data_compression_level level = level or zlib.Z_DEFAULT_COMPRESSION zlib_data = zlib.compress(six.b(json_data), level) pack_data = base64.standard_b64encode(zlib_data) if six.PY3: pack_data = pack_data.decode('Latin-1') root = transaction_trace.root trace_data = [[root.start_time, root.end_time - root.start_time, self.__slow_transaction.path, self.__slow_transaction.request_uri, pack_data]] return trace_data
[ "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the accumulated statistics back to initial state for metric data.
def reset_metric_stats(self): self.__stats_table = {}
[ "def reset_statistics(self):\n self._statistics = {}", "def _reset_stats(self):\n self.confusion_matrix = None", "def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"rows\"] = 0\n STATS[\"correct_format\"] = 0\n STATS[\"incorrect_format\"] = 0\n STATS[\"first_val\"] = 0", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges data from a single transaction. Snapshot is an instance of StatsEngine that contains stats for the single transaction.
def merge(self, snapshot): if not self.__settings: return self.merge_metric_stats(snapshot) self._merge_transaction_events(snapshot) self._merge_synthetics_events(snapshot) self._merge_error_events(snapshot) self._merge_error_traces(snapshot) self._merge_custom_events(snapshot) self._merge_span_events(snapshot) self._merge_sql(snapshot) self._merge_traces(snapshot)
[ "def merge_transaction(self, transaction):\n self.inputs += transaction.inputs\n self.outputs += transaction.outputs\n self.shuffle()\n self.update_totals()\n self.sign_and_update()", "def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a "rollback" merge after a failed harvest. Snapshot is a copy of the main StatsEngine data that we attempted to harvest, but failed. Not all types of data get merged during a rollback.
def rollback(self, snapshot): if not self.__settings: return _logger.debug('Performing rollback of data into ' 'subsequent harvest period. Metric data and transaction events' 'will be preserved and rolled into next harvest') self.merge_metric_stats(snapshot) self._merge_transaction_events(snapshot, rollback=True) self._merge_synthetics_events(snapshot, rollback=True) self._merge_error_events(snapshot) self._merge_custom_events(snapshot, rollback=True) self._merge_span_events(snapshot, rollback=True)
[ "def rollback(self, stage, enodes, exception):", "def revert_to_snapshot(self, context, share, snapshot):\n\n reservations = self._handle_revert_to_snapshot_quotas(\n context, share, snapshot)\n\n try:\n if share.get('has_replicas'):\n self._revert_to_replicated_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }