query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
|---|---|---|---|
View all the images in the dataset, on a 3 by X grid size.
|
def view_images(dataset, size):
images, labels = dataset
assert images.shape[0] == labels.shape[0]
num_images = images.shape[0]
num_cols = 3
num_rows = np.ceil(num_images / num_cols).astype("int")
plt.figure(figsize=size)
for i in range(num_images):
image = images[i]
label = labels[i]
ax = plt.subplot(num_rows, num_cols, i + 1)
plt.imshow(np.array(image, dtype="float"))
plt.title("Number: " + str(label))
plt.axis("off")
|
[
"def display_three_train_images(train_dataset):\r\n plt.figure(figsize=(10, 14))\r\n for input_images, _ in train_dataset.take(1):\r\n for i in range(3):\r\n plt.subplot(3, 1, i+1)\r\n plt.imshow(np.squeeze(input_images[i]), cmap='gray')\r\n plt.show()",
"def draw_dataset(dataset, num_images, num_col, figsize=(15, 40)):\n fig = plt.figure(figsize=figsize)\n for idx, image in enumerate(dataset):\n plt.subplot(math.ceil(num_images / num_col), num_col, idx + 1)\n plt.imshow(image)\n plt.title(idx)\n if idx + 1 == num_images:\n break\n plt.show()\n return fig",
"def display_sample_images(self):\n if self.train_dataset is None:\n self.init_datasets()\n\n images, labels = next(self.train_dataset)\n plt.figure(figsize=(5,5))\n for n in range(min(25, images.shape[0])):\n ax = plt.subplot(5,5,n+1)\n plt.imshow(images[n])\n if len(labels.shape) == 1:\n plt.title(self.class_names[int(labels[n])].title())\n else:\n m = np.argmax(labels[n])\n plt.title(self.class_names[int(labels[n, m])].title())\n plt.axis('off')\n\n plt.tight_layout()\n plt.show()",
"def display_images(imbatch):\n n_im = imbatch.shape[0]\n n_rows = n_im // 4 + 1\n _, axes = plt.subplots(n_rows, 4, squeeze=False, figsize=(4 * 6.4, n_rows * 4.8))\n for r in range(n_rows):\n for c in range(4):\n axes[r][c].axis(\"off\")\n for i in range(imbatch.shape[0]):\n display_image(imbatch[i], axes[i // 4][i % 4])",
"def show_imagegrid_dataset(dataset,\n num=10,\n shuffle=True,\n classes='auto',\n figsize=None,\n fontsize=20,\n image_attr={'cmap': plt.cm.Greys_r}):\n sample = dataset[0]\n if isinstance(sample, tuple) and len(sample) == 2:\n images_per_class = get_labeled_imagegrid(dataset,\n num=num,\n shuffle=shuffle,\n classes=classes)\n num = min(num, max(map(len, images_per_class.values())))\n classes = list(images_per_class.keys())\n\n if figsize is None:\n figsize = (2 * num, 2 * len(classes))\n fig, axs = plt.subplots(figsize=figsize, nrows=len(classes), ncols=num)\n if len(classes) == 1:\n axs = np.expand_dims(axs, 0)\n if num == 1:\n axs = np.expand_dims(axs, -1)\n for i, (class_name, class_images) in enumerate(images_per_class.items()):\n for j, img in enumerate(class_images):\n show_image(img, axs[i][j], image_attr)\n axs[i][0].set_ylabel(str(class_name), fontsize=fontsize)\n elif isinstance(sample, (Image, torch.Tensor, np.ndarray)):\n image_list = get_imagegrid(dataset,\n num=num,\n shuffle=shuffle)\n num = min(len(image_list), num)\n nrows = math.ceil(math.sqrt(num))\n ncols = math.ceil(num / nrows)\n if figsize is None:\n figsize = (2 * nrows, 2 * ncols)\n fig, axs = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols)\n axs = axs.flatten()\n for i, img in enumerate(image_list):\n show_image(img, axs[i], image_attr)",
"def show_images(self, split):\n\n # get split\n data_split = getattr(self, split, None)\n if data_split is None:\n raise ValueError('Invalid data split')\n\n # display images\n util.disp_imdata(data_split.x, self.image_size, [6, 10])\n\n plt.show()",
"def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)",
"def use_dataset_imgs(self):\n self.cam = DatasetCamera()\n idx = '00000{}'.format(self.viewset.numViews)\n im = cv2.imread('tests/image_0/{}.png'.format(idx[-6:]))\n return im",
"def show_images(self):\n\t\tself.im.show()\n\t\tself.kmeans_colorset_im.show()",
"def visulize_5(X):\n fig, axes1 = plt.subplots(5,5,figsize=(3,3))\n for j in range(5):\n for k in range(5):\n i = np.random.choice(range(len(X)))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(X[:,i].reshape(32, 32, 3))\n plt.show()",
"def display_image_samples(dataset):\r\n\tsamples = sample_images(dataset.feature, dataset.label)\r\n\tsubtitles = load_names()\r\n\tdisplay_image_grid_alt(samples, title=\"Image samples\", subtitle=subtitles)",
"def display_grid(samples, titles, rows, cols,figsize=(12, 6)):\n \n #Aqui en lugar de un array con los arrays de imagenes se pasan las rutas de las imagenes\n assert len(samples)==(rows*cols), 'Mismatch between df length and input sizes'\n fig, ax = plt.subplots(rows, cols, figsize=figsize)\n i = 0\n for r in range(rows):\n for c in range(cols):\n print(samples[i])\n ax[r, c].imshow(plt.imread(f\"train_images/{samples[i]}\"), cmap='gray')\n ax[r, c].set_title(titles[i])\n ax[r, c].set_xticklabels([])\n ax[r, c].set_yticklabels([])\n i += 1\n fig.tight_layout()\n plt.show()",
"def imshow_grid(images, shape=[2, 8]):\n fig = plt.figure(1)\n grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)\n\n size = shape[0] * shape[1]\n for i in range(size):\n grid[i].axis('off')\n grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.\n\n plt.show()",
"def show_images(self, idxs, title):\n fig = plt.figure(figsize=(15, 3))\n \n fig_i = 1\n for i in idxs:\n img, label = self.image_dataset[i]\n img = img.numpy()\n ax = fig.add_subplot(1, len(idxs), fig_i)\n ax.axis(\"off\")\n fig.suptitle(title)\n imshow(img)\n fig_i += 1\n \n return fig",
"def show_train_images(train_data, train_labels):\n plt.figure(1, figsize=(8, 8))\n n = 0\n\n for i in range(16):\n n += 1\n # each time random images are loaded\n # r = np.random.randint(0, train_data.shape[0], 1)\n plt.subplot(4, 4, n)\n plt.subplots_adjust(hspace=0.5, wspace=0.5)\n plt.imshow(train_data[i] / 255.)\n plt.title('{}'.format(train_labels[i]))\n plt.xticks([]), plt.yticks([])\n plt.show()",
"def Mkgrid():\n\tinputs, classes = next(iter(dataset_loader_t['train']))\n\tout = torchvision.utils.make_grid(inputs)\n\thalf = round(len(classes)/2) -1\n\tprint(half)\n\ttitles = [class_names[x].split(\"_\")[1][0:3] for x in classes]\n\ttitles[half] = \"\".join([titles[half], \"\\n\"])\n\ttitle_joined = \", \".join(titles)\n\t\n\timshow(out, title=title_joined)\n\tplt.savefig(\"grid.jpg\")",
"def show_images(images, db):\n images = [int(image) for image in images]\n files = get_img_files(images, db)\n show_files(files)",
"def show_image_grid(imgs):\n grd = make_grid(imgs)\n npimg = grd.numpy()\n plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')\n plt.ion()\n plt.show()",
"def plot_n_image(X, n):\n pic_size = int(np.sqrt(X.shape[1]))\n grid_size = int(np.sqrt(n))\n\n first_n_images = X[:n, :]\n\n fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size,sharey=True, sharex=True, figsize=(8, 8))\n\n for r in range(grid_size):\n for c in range(grid_size):\n ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size)))\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Normalises and reshapes the images in the dataset.
|
def normalise(dataset):
# Scale images to the [0, 1] range
dataset = dataset.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
return np.expand_dims(dataset, -1)
|
[
"def normalize_dataset(self):",
"def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_train'] = normalize_other_inputs(data['X_train'], Args)\n data['X_val'] = normalize_other_inputs(data['X_val'], Args)\n for key in data['Y_train'].keys():\n data['Y_train'][key] = (data['Y_train'][key] - mean) / std\n data['Y_val'][key] = (data['Y_val'][key] - mean) / std\n blend_cat['std'] = std\n blend_cat['mean'] = mean\n return data",
"def normalize_images(images):\n # normalize using imagenet mean and std\n mean = torch.zeros(images.data.size()).type(dtype)\n std = torch.zeros(images.data.size()).type(dtype)\n for i in range(3):\n mean[:, i, :, :] = MEAN_IMAGE[i]\n std[:, i, :, :] = STD_IMAGE[i]\n return (images - Variable(mean, requires_grad=False)) / Variable(std, requires_grad=False)",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def normalize(images):\r\n # normalize images to range of zero mean, unit variance\r\n for i in range(len(images)):\r\n im = images[i]\r\n im = (im - np.mean(im)) / np.std(im)\r\n images[i] = im\r\n\r\n return images",
"def _reshape_and_normalize_img(self, image):\n\t\timage = skimage.transform.resize(image, (self.height, self.width, self.channels), mode='reflect', preserve_range=True)\n\t\timage = np.reshape(image, (1, self.height, self.width, self.channels))\n\t\timage = image - VGG_MEANS\n\t\treturn image",
"def normalise(image):",
"def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images",
"def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs",
"def scale_images(X_data):\n\n scaler = StandardScaler()\n return scaler.fit_transform(X_data)",
"def normalise(self):\n self.img = self.img.astype(np.float32) / 255.0\n self.img -= self.img.mean()",
"def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1",
"def _normalization(self, test_all_descriptors, test_crop_descriptors):\n test_all_descriptors = np.vstack(test_all_descriptors)\n test_crop_descriptors = np.vstack(test_crop_descriptors)\n\n # sklearn.preprocessing.normalize(test_all_descriptors, copy=False)\n # sklearn.preprocessing.normalize(test_crop_descriptors, copy=False)\n\n test_all_descriptors = self._pca_model.transform(test_all_descriptors)\n test_crop_descriptors = self._pca_model.transform(test_crop_descriptors)\n\n # sklearn.preprocessing.normalize(test_all_descriptors, copy=False)\n # sklearn.preprocessing.normalize(test_crop_descriptors, copy=False)\n return test_all_descriptors, test_crop_descriptors",
"def reshape_dataset(self, dataset, params):\n assert hasattr(params, \"vectorize_data\"), (\n \"Model params must set vectorize_data.\")\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n dataset[key].images = dp.reshape_data(dataset[key].images, params.vectorize_data)[0]\n dataset[key].shape = dataset[key].images.shape\n return dataset",
"def data_normalization(data):\n data_mean = data.mean(axis=1)\n data_mean = data_mean.reshape(data_mean.shape[0], 1)\n data_std = data.std(axis=1)\n data_std = data_std.reshape(data_std.shape[0], 1)\n return (data - data_mean) / data_std",
"def normalize_images(imgs, mu=None, sigma=None, eps=1e-9):\n if mu is None:\n if len(imgs.shape) == 4:\n chans = imgs.shape[1]\n mu = np.asarray(\n [np.mean(imgs[:, i, :, :]) for i in range(chans)]\n ).reshape(1, -1, 1, 1)\n elif len(imgs.shape) == 5: # glimpses\n chans = imgs.shape[2]\n mu = np.asarray(\n [np.mean(imgs[:, :, i, :, :]) for i in range(chans)]\n ).reshape(1, 1, -1, 1, 1)\n sigma = np.asarray(\n [np.std(imgs[:, :, i, :, :]) for i in range(chans)]\n ).reshape(1, 1, -1, 1, 1)\n else:\n raise Exception(\"unknown number of dims for normalization\")\n\n if sigma is None:\n if len(imgs.shape) == 4:\n chans = imgs.shape[1]\n sigma = np.asarray(\n [np.std(imgs[:, i, :, :]) for i in range(chans)]\n ).reshape(1, -1, 1, 1)\n elif len(imgs.shape) == 5: # glimpses\n chans = imgs.shape[2]\n sigma = np.asarray(\n [np.std(imgs[:, :, i, :, :]) for i in range(chans)]\n ).reshape(1, 1, -1, 1, 1)\n else:\n raise Exception(\"unknown number of dims for normalization\")\n\n return (imgs - mu) / (sigma + eps), [mu, sigma]",
"def _normalize_and_scale(self, delta_im, mean, std):\n delta_im.data += 1 # now 0..2\n delta_im.data *= 0.5 # now 0..1\n\n # normalize image color channels\n for c in range(self.ncInput):\n delta_im.data[:,c,:,:] = (delta_im.data[:,c,:,:] - mean[c]) / std[c]\n\n # threshold each channel of each image in deltaIm according to inf norm\n # do on a per image basis as the inf norm of each image could be different\n bs = delta_im.size(0)\n for i in range(bs):\n # do per channel l_inf normalization\n for ci in range(self.ncInput):\n l_inf_channel = delta_im[i,ci,:,:].detach().cpu().abs().max()\n mag_in_scaled_c = self.mag_in/(255.0*std[ci])\n delta_im[i,ci,:,:].data *= torch.tensor(np.minimum(1.0, mag_in_scaled_c / l_inf_channel)).float().cuda()\n\n return delta_im",
"def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8",
"def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks the date and time, and then decides if a shift from master to slave (or vice versa) is needed. If necessary, makes the shift.
|
def main():
date = time.gmtime().tm_mday
if date == 1 or date == 2: # in case it missed once
# shift from slave to master, checking to ensure it hasn't already happened
status = check_status()
if status == 'slave':
slave_to_master()
elif status == 'master':
print("Shift has probably already happened")
else:
print("In a forbidden state:", status)
elif date == 22 or date == 23: #in case it missed once
# shift from master to slave, checking to ensure it hasn't already happened
status = check_status()
if status == 'master':
master_to_slave()
elif status == 'slave':
print("Shift has probably already happened")
else:
print("In a forbidden state:", status)
else:
pass
|
[
"def check_shift(self, shift):\n start_time = shift.start_time.astimezone(tz=shift.user.userprofile.timezone)\n end_time = shift.end_time.astimezone(tz=shift.user.userprofile.timezone)\n # shift_date = start_time.date() if start_time.date() == end_time.date() else None\n\n # The code below works for both cases, this commented out code works if the start_time and end_time\n # are on the same day.\n # if shift_date:\n # # hurray the shift is on a single date from the users perspective and we only need to reference\n # # a single availability record\n # av = self.get_availability_for_date(user=shift.user, av_date=shift_date)\n #\n # # if no availability record is present, we return True and assume the user is availabile\n # if av is None:\n # return True\n # if av.datetime_is_in_range(start_time) and av.datetime_is_in_range(end_time):\n # # if the shift start_time and end_time is within the Availability period or\n # # no Availability record exists\n # return True\n # return False\n # else:\n # The shift spans more than one day (from the perspective of the user's timezone) so\n # we need to reference two potential availability records (they could be the same)\n\n av1 = self.get_availability_for_date(user=shift.user, av_date=start_time.date())\n av2 = self.get_availability_for_date(user=shift.user, av_date=end_time.date())\n\n # if both None then we assume the user is available so we return True\n if av1 is None and av2 is None:\n return True\n\n if av1:\n if av1.timeoffrequest_set.count() > 0:\n # If a TimeOffRecord exists then the user has approved time off and is not available for scheduling\n return False\n\n # if two availability records\n if av1 and av2:\n if av1.datetime_is_in_range(start_time) and av2.datetime_is_in_range(end_time):\n return True\n elif av1:\n # if just av1 is present then we're only bound by the single Availability record\n if av1.datetime_is_in_range(start_time):\n return True\n elif av2:\n # if just av2 is present then we're only bound by the single Availability record\n if av2.datetime_is_in_range(end_time):\n return True\n return False",
"def register_for_shift(self, shift_id, staff_id):\n try:\n result = False\n conflict = False\n\n date = datetime.now()\n mysql_date = f'{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}:00'\n\n shifts_registered_on = self.db_handler.get_staff_registered_shifts_by_id(staff_id)\n shift_pretending = self.db_handler.get_shift_extended_info_by_id(shift_id)\n\n if shifts_registered_on.__len__() > 0:\n for shift in shifts_registered_on:\n diff = shift[1] - shift_pretending[8]\n\n if diff > timedelta(minutes=0): # shift is later\n interval = (diff - (shift_pretending[9] - shift_pretending[8])).seconds / 3600\n if interval >= int(self.get_config_value('HOURS_BETWEEN_SHIFTS')):\n conflict = False\n else:\n conflict = True\n break\n else: # pretending is later\n diff = shift_pretending[8] - shift[1]\n interval = (diff.days * 24) + (diff - (shift[2] - shift[1])).seconds / 3600\n if interval >= int(self.get_config_value('HOURS_BETWEEN_SHIFTS')):\n conflict = False\n else:\n conflict = True\n break\n\n if conflict:\n result = False\n else:\n if self.db_handler.get_shift_registration_by_staff_id_and_shift_id(staff_id, shift_id) is not None:\n self.db_handler.reregister_staff_to_shift(shift_id, staff_id, mysql_date)\n else:\n self.db_handler.register_staff_to_shift(shift_id, staff_id, mysql_date)\n self.logger.write_to_log('staff registered to shift', 'model')\n result = True\n\n return result\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def check_manual_circuit(*args):\n if args[0] == 0:\n setTimeRegime(1)\n elif args[0] == 1:\n setTimeRegime(0)",
"def check_start_time(self):\n \n # make sure the time is in gps time\n zen_start = self.get_UTC_date_time(self.header.gpsweek,\n self.gps_stamps['time'][0]+\\\n self._leap_seconds)\n # set the zen schedule to the first gps stamp\n self.zen_schedule = zen_start\n zen_time = time.strptime(zen_start, datetime_fmt)\n \n # calculate the scheduled start time\n s_start = '{0},{1}'.format(self.schedule.Date, self.schedule.Time)\n schedule_time = time.strptime(s_start, datetime_fmt)\n \n # reset the data and time in the schedule meta data so there is no\n # confusion on when the time series starts\n self.schedule.Date = zen_start.split(',')[0]\n self.schedule.Time = zen_start.split(',')[1]\n \n # estimate the time difference between the two \n time_diff = time.mktime(zen_time)-time.mktime(schedule_time)\n print(' Scheduled time was {0} (GPS time)'.format(s_start))\n print(' 1st good stamp was {0} (GPS time)'.format(zen_start))\n print(' difference of {0:.2f} seconds'.format(time_diff))",
"def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]",
"def test_when_checking_the_same_panel(\n self, convention1_uid, room1_uid, panel_time, panel1_uid,\n ):\n assert (\n is_time_frame_avalible(\n convention1_uid,\n room1_uid,\n panel_time[0],\n panel_time[1],\n panel1_uid\n )\n is True\n )",
"def checkShiftAssignments(roster):\r\n\r\n # for each employee i\r\n for i in roster.employees:\r\n ename = i.fName + \" \" + i.lName\r\n onList, offList = roster.getOnOffDays(ename)\r\n\r\n # for each day he/she neither has an offday or vacation day\r\n for j in onList:\r\n shiftassignments = 0 # counts how many shifts have been assigned to one employee per day\r\n\r\n # for each shift\r\n for k in roster.shiftTypes:\r\n\r\n # check if employee i works shift k on day j\r\n # make a list with working employees for the day and shift\r\n EmployeesWorkingShiftThatDay = roster.getWorkingEmployees(j, k['name'])\r\n\r\n # check if current employee i is in list\r\n for e in EmployeesWorkingShiftThatDay:\r\n if (e == ename):\r\n shiftassignments = shiftassignments + 1\r\n\r\n # if one employee has more than one shift per day assigned, return false\r\n if (shiftassignments > 1):\r\n return False\r\n return True",
"def check_date(self):\n if self.get_createdate() != '':\n return\n\n jobstartdate = self.get_jobstartdate()\n if jobstartdate != '':\n self.set_createdate(jobstartdate)",
"def checkMinMaxConsec(roster):\r\n currentTime = datetime.strptime(roster.start, '%Y-%m-%d')\r\n\r\n start_date = date(currentTime.year, currentTime.month, currentTime.day)\r\n end_period = currentTime + timedelta(days=roster.cntDays)\r\n end_date = date(end_period.year, end_period.month, end_period.day)\r\n\r\n # for each day\r\n for single_date in daterange(start_date, end_date): # checkt er hier auch letzten Tag?\r\n sdDay = single_date.strftime(\"%Y-%m-%d\")\r\n if (sdDay != roster.start): # for all days not being the first day in the period\r\n sdDayBefore = (single_date - timedelta(days=1)).strftime(\"%Y-%m-%d\")\r\n # for each employee\r\n for i in roster.employees:\r\n if (i.lName != \"Nurse\"): # for all nurses except Leih Nurses\r\n startDay = getConsecStartDay(sdDay, roster, i)\r\n eWorks = getShiftByEmployeeByDate01(roster, sdDay, i)\r\n eWorksY = getShiftByEmployeeByDate01(roster, sdDayBefore, i)\r\n\r\n if ((eWorks - eWorksY - startDay > 0)\r\n or (eWorks - startDay < 0)\r\n or (startDay + eWorksY > 1)):\r\n return False\r\n else: # for first day in period\r\n # for each employee\r\n for i in roster.employees:\r\n if (i.lName != \"Nurse\"):\r\n startDay = getConsecStartDay(sdDay, roster, i)\r\n oldPeriod = 0\r\n if (i.history[\"lastAssignedShiftType\"] == \"Spaet\" or i.history[\"lastAssignedShiftType\"] == \"Frueh\"):\r\n oldPeriod = 1\r\n eWorks = getShiftByEmployeeByDate01(roster, sdDay, i)\r\n if ((eWorks - oldPeriod - startDay > 0)\r\n or (eWorks - startDay < 0)\r\n or (startDay + oldPeriod > 1)):\r\n return False\r\n return True",
"def checkUpstreamScheduler():",
"def check_date(self):\n\n date_now = str(datetime.datetime.today().date())\n if date_now in self.schedule.schedule.keys():\n return self\n return False",
"def set_system_time(self):\n self.switch_to_GPS()\n # if self.is_fix():\n # settime = Popen(\"sudo date -u\", shell=True).wait()\n # return True\n return False",
"def assert_clock_sync(self):\n log.error(\"====== Sung ====== assert_clock_sync\")\n dt = self.assert_get(WorkhorseParameter.TIME)\n lt = time.strftime(\"%Y/%m/%d,%H:%M:%S\", time.gmtime(time.mktime(time.localtime())))\n self.assertTrue(lt[:13].upper() in dt.upper())",
"def schedule_monitor(schedule):\n if schedule[\"state\"] == \"stopped\":\n if (\n date.today()\n - datetime.strptime(schedule[\"lastStateChange\"], \"%Y-%d-%m\").date()\n ).days >= 7 - int(schedule[\"schedule\"]):\n schedule[\"state\"] = \"started\"\n schedule[\"lastStateChange\"] = str(date.today())\n elif schedule[\"state\"] == \"started\":\n if (\n date.today()\n - datetime.strptime(schedule[\"lastStateChange\"], \"%Y-%d-%m\").date()\n ).days >= int(schedule[\"schedule\"]):\n schedule[\"state\"] = \"stopped\"\n schedule[\"lastStateChange\"] = str(date.today())\n else:\n return schedule, False\n\n return schedule, True",
"def could_do_this_job(s,e): #shift,start time,employee\n if(s.job_id not in e.jobs): #in array check employee tru and false\n return(False)\n for t in e.availability:\n if in_time(s.time, t):\n return(True)\n return(False)",
"def check(self):\n\t\tfails = 0\n\t\tworktime_month = timedelta(hours=0)\n\t\tworktime_homeoffice = timedelta(hours=0)\n\t\tfor num in self.workdays:\n\t\t\tday = self.workdays[num]\n\t\t\tif day.daytype == DayType.work:\t\n\t\t\t\tfails += day.check(num)\n\t\t\t\tworktime = day.getWorkingTime()\n\t\t\t\tworktime_month += worktime\n\t\t\t\thotime = day.getHomeofficeTime()\n\t\t\t\tworktime_homeoffice += hotime\t\t\t\t\n\t\tif (worktime_homeoffice > timedelta(days=10)):\n\t\t\tprRed('! {:02d}. max. mtl. Heimarbeit überschritten ({} <= 10days)'.format(num, worktime))\n\t\t\tfails += 1\n\t\tprint('----------------')\n\t\tif fails == 0:\n\t\t\tprGreen('Keine Verstöße erkannt')\n\t\telse:\n\t\t\tprRed('{0} Verstöße erkannt'.format(fails))",
"def check_availability(car):\n plate_num = int(car.plate[-1]) # Get the last number of the plate\n date = car.date # Get the date \n weekday = (date.weekday() + 1)*2 # Get the number of the week day\n time = date.time() # Get the time \n restricted = [(weekday-1) , weekday % 10] # Create an interval of restrictions\n check_time = (time <= morning_end.time() and time >= morning_in.time()) or \\\n (time <= afternoon_end.time() and time >= afternoon_in.time())\n # Boolean that verify the time \n if check_time and plate_num in restricted:\n car.availability = False\n else:\n car.availability = True",
"def check_study_time(self) -> bool:\n\n if self._current_time.weekday() != 6 \\\n and self._lesson_calls[0]['start']['h'] <= self._current_time.hour <= self._lesson_calls[5]['end']['h']:\n if self._current_time.hour == self._lesson_calls[5]['end']['h'] \\\n and self._current_time.minute > self._lesson_calls[5]['end']['m']:\n return False\n\n # if smoke break\n return not self.check_coffee_break()\n return False",
"def _check_board(self):\n winner = self._check_winner()\n if winner:\n self.winner = winner\n self.turn = None\n else:\n if self._board_is_full():\n self.stalemate = True\n self.turn = None\n self.save()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check the status of the application, i.e., whether it is running on the master or slave. Also check to see if there are any issues, like the web dyno on the slave running, or both workers running etc.
|
def check_status():
# assume no web dynos on master - there should never be a web dyno on master
r = req.get(f"{MASTER_API_URL}/formation/worker", headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get master worker formation")
print(r.status_code, ":", r.text)
return 'unknown:1'
master_worker = r.json()['quantity'] # this is guaranteed to work i think
r = req.get(f"{SLAVE_API_URL}/formation/worker", headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get slave worker formation")
print(r.status_code, ":", r.text)
return 'unknown:2'
slave_worker = r.json()['quantity']
r = req.get(f"{SLAVE_API_URL}/formation/web", headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get slave web formation")
print(r.status_code, ":", r.text)
return 'unknown:3'
slave_web = r.json()['quantity']
# all done
if slave_web != 0:
return 'forbidden-web'
elif master_worker != 0 and slave_worker != 0:
return 'both'
elif master_worker != 0:
return 'master'
elif slave_worker != 0:
return 'slave'
else:
return 'none'
|
[
"def check_master_vpn_worker(self):\n LOG.info(\"Checking master/vpn\")\n success = True\n if not self.configurations[0].get(\"masterInstance\") or self.configurations[0].get(\"vpnInstance\"):\n success = False\n for configuration in self.configurations[1:]:\n if not configuration.get(\"vpnInstance\") or configuration.get(\"masterInstance\"):\n success = False\n return success",
"def check_configuration_server(self) -> bool:\n return (\n self.container is not None\n and self.container.exec_run(\n \"bash -c 'curl -s --head http://localhost:19071/ApplicationStatus'\"\n )\n .output.decode(\"utf-8\")\n .split(\"\\r\\n\")[0]\n == \"HTTP/1.1 200 OK\"\n )",
"def check_status():\n status = \"\"\n if os.path.exists(current_app.config[\"ACI_STARTED_FILE\"]):\n logger.debug(\"application started flag is set\")\n # check mongo connection \n try:\n from . utils import get_db\n assert len(get_db().collection_names()) >= 0\n except Exception as e:\n logger.debug(\"failed to connect to mongo db: %s\", e)\n return (False, \"failed to connect to mongo database\")\n # check redis connection\n try:\n from . utils import get_redis\n assert get_redis().dbsize() >= 0\n except Exception as e:\n logger.debug(\"failed to connect to redis db: %s\", e)\n return (False, \"failed to connect to redis database\")\n # started flag and successfully connected to mongo and redis\n return (True, \"started\")\n\n logger.debug(\"application started flag not found, checking for status\")\n if os.path.exists(current_app.config[\"ACI_STATUS_FILE\"]):\n try:\n with open(current_app.config[\"ACI_STATUS_FILE\"], \"r\") as f:\n status = f.read()\n logger.debug(\"application status: %s\" % status)\n except Exception as e:\n logger.debug(\"failed to open status file: %s\" % e)\n else:\n logger.debug(\"application status flag not found\")\n status = \"not-ready\"\n return (False, status)",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)",
"def readiness():\n return run_health_check()",
"def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1",
"def is_spark_ready(master_node):\n result = subprocess.check_output(\n f'ssh {master_node}'\n f' /home/ddps2107/Soft/miniconda3/bin/python '\n f' {project_dir}/system_under_test/spark/deployment/is_cluster_live.py',\n shell=True\n )\n\n print(str(result))\n if 'live' in str(result):\n return True\n else:\n return False",
"def _check_running(self):\n if self.kvm_short is not None:\n cmd = (\"sudo virsh list |grep %s\" % self.kvm_short)\n\n response, err = self._exec_ssh_cmd(self.compute_hostname, cmd)\n logging.info((\"running:\\n%s\\non: %s\\nresponse:\\n%s\") %\n (cmd, self.compute_hostname, response))\n\n if 'exception' in response:\n logging.error((\"Exception in _check_running:\\n\"\n \"response:%s\\nerr:%s\\n\") % (response, err))\n self.exceptions += 1\n self._check_exit_condition()\n elif 'running' in response:\n self.instance_running = True\n self._get_virsh_short(response)\n if self.virsh_short is not None:\n self._get_virsh_xml()\n return",
"def available(cls):\n return 0 == shell('{} juju status'.format(cls.cmd_prefix)).code",
"def check_status():\n\n try:\n pid = os.fork()\n except Exception as e:\n raise (e)\n\n if (pid == 0):\n url = \"http://localhost:{}/\".format(K8S_PORT) + \\\n \"api/v1/namespaces/default/pods\"\n resp = requests.get(url)\n\n if resp.status_code != 200:\n # This means something went wrong.\n raise Exception(\"Error with code \" +\n str(resp.status_code))\n else:\n print(\"Success with status code 200, \\\n parsing response...\")\n\n parse_status_json(resp.json())\n\n else:\n os.waitpid(pid, 0)",
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"def is_running(self):\n cmd = [\"machinectl\", \"--no-pager\", \"status\", self.name]\n try:\n subprocess.check_call(cmd)\n return True\n except subprocess.CalledProcessError as ex:\n logger.info(\"nspawn container %s is not running probably: %s\",\n self.name, ex.output)\n return False",
"def isRunning(self):\n exec_filename = self.getExec()\n log_func.info(u'Checking a running OLAP server using <%s>' % exec_filename)\n return sys_func.isActiveProcess(exec_filename)",
"def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )",
"def isMainRunning():\n result = False\n if checkCmd(CHECK_LOCAL_TFVERSION, TFVERSION_SUCCESS):\n result = True\n print(\"test of \", CHECK_LOCAL_TFVERSION, \" : SUCCESS\")\n\n else:\n print(\"test of \", CHECK_LOCAL_TFVERSION, \" : FAILURE\")\n\n return result",
"def check_running(self):\n\t\t\n\t\treturn_to_os=False \n\t\ttry:\n\t\t\tfile_pid=open(self.pid_file,'r')\n\t\t\tpid=file_pid.read()\n\t\t\tfile_pid.close()\n\t\t\tos.kill(int(pid),0)\n\t\t\tprint(\"replica process already running with pid %s\" % (pid, ))\n\t\t\treturn_to_os=True\n\t\t\tif self.global_config.log_dest=='file':\n\t\t\t\tos.remove(self.global_config.log_file)\n\t\texcept:\n\t\t\tpid=os.getpid()\n\t\t\tfile_pid=open(self.pid_file,'w')\n\t\t\tfile_pid.write(str(pid))\n\t\t\tfile_pid.close()\n\t\t\treturn_to_os=False\n\t\treturn return_to_os",
"def is_running(self):\n\n pid_files = {\n 'solr': os.path.join(self.params.INSTALLATION_DIRECTORY, \"shared/tmp/pids/solr-production.pid\"),\n 'nginx': os.path.join(self.params.INSTALLATION_DIRECTORY, \"shared/tmp/pids/nginx.pid\"),\n 'jetty': os.path.join(self.params.INSTALLATION_DIRECTORY, \"shared/tmp/pids/jetty.pid\"),\n 'scheduler': os.path.join(self.params.INSTALLATION_DIRECTORY, \"shared/tmp/pids/scheduler.production.pid\"),\n 'worker': os.path.join(self.params.INSTALLATION_DIRECTORY, \"shared/tmp/pids/worker.production.pid\"),\n 'postgres': os.path.join(self.params.DATA_DIRECTORY, \"db/postmaster.pid\")\n }\n\n not_running = []\n\n for process, pidFile in pid_files.iteritems():\n pid = None\n\n if process == 'postgres':\n with open(pidFile, 'r') as filehandle:\n pid = int(filehandle.readlines()[0])\n\n if not utilities.is_process_running(pidFile, pid):\n not_running.append(process)\n\n if len(not_running) > 0:\n return not_running\n else:\n return True",
"def check_running(self, fail_on_error=True):\n status = True\n state = self.check_mount_state(self.running_hosts)\n if state[\"unmounted\"] or state[\"nodirectory\"]:\n self.log.error(\n \"Error: dfuse not running on %s\",\n str(state[\"unmounted\"].union(state[\"nodirectory\"])))\n status = False\n if fail_on_error:\n raise CommandFailure(\"dfuse not running\")\n return status"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Shift the process from master to slave, shifting data as needed.
|
def master_to_slave():
print("Shifting from master to slave")
stop_master_worker()
setup_slave_web()
prepare_push()
push_to_slave()
stop_slave_web()
start_slave_worker()
print("DONE!")
|
[
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def slave_operation():\n # receive data from master node\n x = comm.recv(source=0, tag=1)\n y = comm.recv(source=0, tag=2)\n\n # multiply the received matrix and send the result back to master\n z = multiply_matrix(x, y)\n comm.send(z, dest=0, tag=rank)",
"def become_master(slave_process, old_name):\n s = socket.socket()\n s.bind((\"\", 23456))\n print(\"[*] Listening for command from orchestrator to become master ...\", file=sys.stdout)\n s.listen(2)\n c, address = s.accept()\n print(\"Received command from orchestrator to become master: \" + c.recv(1024).decode(), file=sys.stdout)\n slave_process.terminate()\n\n os.environ[\"WORKER_TYPE\"] = \"master\"\n os.environ[\"NODE_NAME\"] = \"master\"\n\n client = docker.DockerClient(base_url=\"tcp://172.17.0.1:4444\")\n cnt = client.containers.get(old_name)\n cnt.rename(\"master\")\n\n logging.basicConfig()\n zk = KazooClient(hosts=zookeeper_hostname)\n zk.start()\n\n node_name = \"/worker/\" + os.environ[\"NODE_NAME\"]\n if not zk.exists(node_name):\n msg = \"Creating node: \" + node_name\n print(msg, file=sys.stdout)\n db_name = os.environ[\"DB_HOSTNAME\"]\n zk.create(node_name, db_name.encode(), ephemeral=True)\n\n time.sleep(3)\n zk.delete(\"/worker/\" + old_name)\n\n rpc_server = RpcServer(queue_name='writeQ', func=writedb, is_master=True)\n rpc_server.start()",
"def bring_slave_back(self):\n self.containers.start_container('pg_slave_1')\n time.sleep(10)",
"def master_operation():\n distribute_matrix_data()\n assemble_matrix_data()",
"def slave_run(self):\n\n # Initialize PDS data store here because only slaves need to do it.\n self.pds_store = {}\n\n while True:\n data = self.comm.bcast(None, root=0)\n\n op = data[0]\n if op == self.OP_PARALLELIZE:\n pds_id = data[1]\n self.__rec_pds_id = pds_id\n pds = self.parallelize([])\n self.pds_store[pds.pds_id] = pds\n\n\n elif op == self.OP_MAP:\n pds_id, pds_id_result, function_packed = data[1:]\n self.__rec_pds_id, self.__rec_pds_id_result = pds_id, pds_id_result\n\n #Use cloudpickle to convert back function string to a function\n func = cloudpickle.loads(function_packed)\n #Set the function's backend to current class\n #so it can access bds_store properly\n # func.backend = self\n\n\n # Access an existing PDS\n pds = self.pds_store[pds_id]\n pds_res = self.map(func, pds)\n\n # Store the result in a newly gnerated PDS pds_id\n self.pds_store[pds_res.pds_id] = pds_res\n\n elif op == self.OP_BROADCAST:\n self.__bds_id = data[1]\n self.broadcast(None)\n\n elif op == self.OP_COLLECT:\n pds_id = data[1]\n\n # Access an existing PDS from data store\n pds = self.pds_store[pds_id]\n\n self.collect(pds)\n\n elif op == self.OP_DELETEPDS:\n pds_id = data[1]\n del self.pds_store[pds_id]\n\n elif op == self.OP_DELETEBDS:\n bds_id = data[1]\n del self.bds_store[bds_id]\n\n elif op == self.OP_FINISH:\n quit()\n else:\n raise Exception(\"Slave recieved unknown command code\")",
"def move_memory_slave(self, from_index, to_index, debug=False):\n s_count = self.get_number_of_memory_slaves()\n if to_index >= s_count:\n to_index = s_count - 1\n\n if from_index == to_index:\n return\n\n graph_dict = self.get_nodes_dict()\n\n # Find the slave at the from_index.\n from_node = None\n for key in graph_dict.keys():\n if graph_dict[key].node_type != NodeType.SLAVE or \\\n graph_dict[key].slave_type != SlaveType.MEMORY or \\\n graph_dict[key].slave_index != from_index:\n continue\n from_node = graph_dict[key]\n break\n\n if from_node is None:\n raise SlaveError(\"Slave with from index %d not found\" % (from_index))\n\n # Find the slave at the to_index.\n to_node = None\n for key in graph_dict.keys():\n if graph_dict[key].node_type != NodeType.SLAVE or \\\n graph_dict[key].slave_type != SlaveType.MEMORY or \\\n graph_dict[key].slave_index != to_index:\n continue\n to_node = graph_dict[key]\n break\n\n if to_node is None:\n raise SlaveError(\"Slave with to index %d not found\" % (to_index))\n\n if debug:\n print \"before move:\"\n print \"\\tslave %s at position %d with name: %s\" % \\\n (from_node.name, from_node.slave_index, from_node.unique_name)\n print \"\\tslave %s at position %d with name: %s\" % \\\n (to_node.name, to_node.slave_index, to_node.unique_name)\n\n from_node.slave_index = to_index\n from_unique = get_unique_name(from_node.name,\n from_node.node_type,\n from_node.slave_type,\n from_node.slave_index)\n\n mapping = {from_node.unique_name : from_unique}\n\n if debug:\n print \"from.unique_name: \" + from_node.unique_name\n print \"from_unique: \" + from_unique\n\n print \"keys\"\n for name in graph_dict.keys():\n print \"key: \" + name\n\n self.graph = nx.relabel_nodes(self.graph,\n {from_node.unique_name : from_unique})\n from_node = self.get_node(from_unique)\n from_node.slave_index = to_index\n from_node.unique_name = from_unique\n\n to_node.slave_index = from_index\n to_unique = get_unique_name(to_node.name,\n to_node.node_type,\n to_node.slave_type,\n to_node.slave_index)\n self.graph = nx.relabel_nodes(self.graph, {to_node.unique_name:to_unique})\n\n to_node = self.get_node(to_unique)\n to_node.slave_index = from_index\n to_node.unique_name = to_unique\n\n if debug:\n print \"after move:\"\n print \"\\tslave %s at position %d with name: %s\" % \\\n (from_node.name, from_node.slave_index, from_node.unique_name)\n print \"\\tslave %s at position %d with name: %s\" % (\\\n to_node.name, to_node.slave_index, to_node.unique_name)\n\n graph_dict = self.get_nodes_dict()\n print \"keys\"\n for name in graph_dict.keys():\n print \"key: \" + name",
"def swap_rebalance_master(self):\n task = self.__async_swap_rebalance(master=True)\n task.result()",
"def onSlave(self):",
"def __initiate_slave_node(self):\n master_cmd = self.__spark_installation_path + '/sbin/start-slave.sh spark://' + self.__host_ip + ':7077'\n print(master_cmd)\n #os.chdir(self.__home_dir)\n output = os.system(master_cmd)\n if output != 0:\n raise Exception(\"Terminating process!\")\n self.__logger.log('Enabled slave node..')",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")",
"def move(self, *args):\n if self._cluster:\n return self.execute(u'MOVE', *args, shard_key=args[0])\n return self.execute(u'MOVE', *args)",
"def replicate_slave_from_master(master):\n if len(env.hosts) > 1:\n exit('This job is currently only setup to run against one slave at a time')\n\n with settings(host_string=master):\n # `--single-transaction` in conjunction with `--master-data` avoids\n # locking tables for any significant length of time. See\n # https://web.archive.org/web/20160308163516/https://dev.mysql.com/doc/refman/5.5/en/mysqldump.html#option_mysqldump_single-transaction\n run('sudo -i mysqldump -u root --all-databases --master-data --single-transaction --quick --add-drop-database > dump.sql')\n\n with settings(host_string=master, forward_agent=True):\n run('scp dump.sql {0}:~'.format(env.hosts[0]))\n\n with settings(host_string=master):\n run('rm dump.sql')\n\n run_mysql_command(\"STOP SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=OFF\")\n\n with hide('running', 'stdout'):\n database_file_size = run(\"stat --format='%s' dump.sql\")\n\n print('Importing MySQL database which is {0}GB, this might take a while...'.format(round(int(database_file_size) / (1024 * 1024 * 1024 * 1.0), 1)))\n run('sudo -i mysql -uroot < dump.sql')\n\n run('rm dump.sql')\n\n run_mysql_command(\"START SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=ON\")\n\n slave_status()",
"def transform_to_master(self, transform_to_master):\n\n self._transform_to_master = transform_to_master",
"def _runMaster(run, comm, logger):\n\n from mpi4py import MPI\n stat = MPI.Status()\n rank = comm.rank\n size = comm.size\n\n logger.info(\"BHSnapshotData._runMaster()\")\n logger.debug(\"Rank %d/%d\" % (rank, size))\n\n # Make sure output directory exists\n fname = _GET_BH_SINGLE_SNAPSHOT_FILENAME(run, 0)\n zio.check_path(fname)\n\n # Load BH Mergers\n logger.info(\"Loading BH Mergers\")\n mrgs = mergers.load_fixed_mergers(run, loadsave=True, verbose=False)\n numMergers = mrgs[MERGERS.NUM]\n logger.debug(\"- Loaded %d mrgs\" % (numMergers))\n\n # Init status file\n statFileName = bh_constants._GET_STATUS_FILENAME(__file__, run=run, version=_VERSION)\n statFile = open(statFileName, 'w')\n logger.debug(\"Opened status file '%s'\" % (statFileName))\n statFile.write('%s\\n' % (str(datetime.now())))\n beg = datetime.now()\n\n num_pos = 0\n num_neg = 0\n num_new = 0\n countDone = 0\n count = 0\n times = np.zeros(NUM_SNAPS-1)\n\n # Iterate Over Snapshots\n # ----------------------\n # Go over snapshots in random order to get a better estimate of ETA/duration\n snapList = np.arange(NUM_SNAPS-1)\n np.random.shuffle(snapList)\n logger.info(\"Iterating over snapshots\")\n pbar = zio.getProgressBar(NUM_SNAPS-1)\n for snapNum in snapList:\n logger.debug(\"- Snap %d, count %d, done %d\" % (snapNum, count, countDone))\n\n # Get Mergers occuring just after Snapshot `snapNum`\n mrgs = mrgs[MERGERS.MAP_STOM][snapNum+1]\n nums = len(mrgs)\n targetIDs = mrgs[MERGERS.IDS][mrgs]\n logger.debug(\"- %d Mergers from snapshot %d\" % (nums, snapNum+1))\n\n # Look for available slave process\n data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=stat)\n src = stat.Get_source()\n tag = stat.Get_tag()\n logger.debug(\"- Received signal from %d\" % (src))\n\n # Track number of completed profiles\n if (tag == MPI_TAGS.DONE):\n durat, pos, neg, new = data\n logger.debug(\"- - Done after %s, pos %d, neg %d, new %d\" % (durat, pos, neg, new))\n\n times[countDone] = durat\n num_pos += pos\n num_neg += neg\n num_new += new\n countDone += 1\n\n # Distribute tasks\n logger.debug(\"- Sending new task to %d\" % (src))\n comm.send([snapNum, mrgs, targetIDs, numMergers], dest=src, tag=MPI_TAGS.START)\n logger.debug(\"- New task sent\")\n\n # Write status to file and log\n dur = (datetime.now()-beg)\n fracDone = 1.0*countDone/(NUM_SNAPS-1)\n statStr = 'Snap %3d (rank %03d) %8d/%8d = %.4f in %s %8d pos %8d neg %3d new\\n' % \\\n (snapNum, src, countDone, NUM_SNAPS-1, fracDone, str(dur), num_pos, num_neg, num_new)\n statFile.write(statStr)\n statFile.flush()\n logger.debug(statStr)\n count += 1\n pbar.update(count)\n\n statFile.write('\\n\\nDone after %s' % (str(datetime.now()-beg)))\n statFile.close()\n pbar.finish()\n\n # Close out all Processes\n # -----------------------\n numActive = size-1\n logger.info(\"Exiting %d active processes\" % (numActive))\n while(numActive > 0):\n # Find available slave process\n data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=stat)\n src = stat.Get_source()\n tag = stat.Get_tag()\n logger.debug(\"- Received signal from %d\" % (src))\n\n # If we're recieving exit confirmation, count it\n if (tag == MPI_TAGS.EXIT): numActive -= 1\n else:\n # If a process just completed, count it\n if (tag == MPI_TAGS.DONE):\n durat, pos, neg, new = data\n logger.debug(\"- - %d Done after %s, pos %d, neg %d, new %d\" %\n (src, durat, pos, neg, new))\n times[countDone] = durat\n countDone += 1\n num_pos += pos\n num_neg += neg\n num_new += new\n\n # Send exit command\n logger.debug(\"Sending exit to %d. %d Active.\" % (src, numActive))\n comm.send(None, dest=src, tag=MPI_TAGS.EXIT)\n\n fracDone = 1.0*countDone/(NUM_SNAPS-1)\n logger.debug(\"%d/%d = %.4f Completed tasks!\" % (countDone, NUM_SNAPS-1, fracDone))\n logger.debug(\"Average time %.4f +- %.4f\" % (np.average(times), np.std(times)))\n logger.info(\"Totals: pos = %5d neg = %5d new = %3d\" % (num_pos, num_neg, num_new))\n\n return",
"def distribute_matrix_data():\n def split_matrix(seq, p):\n \"\"\"\n Split matrix into small parts according to the no of workers. These\n parts will be send to slaves by master node\n \"\"\"\n rows = []\n n = int(len(seq) / p)\n r = len(seq) % p\n b, e = 0, n + min(1, r)\n for i in range(p):\n print(b, \" =============================\", e)\n rows.append(seq[b:e])\n r = max(0, r - 1)\n b, e = e, e + n + min(1, r) \n\n return rows\n\n rows = split_matrix(mtrx1, workers)\n\n pid = 1\n for row in rows:\n comm.send(row, dest=pid, tag=1)\n comm.send(mtrx2, dest=pid, tag=2)\n pid = pid + 1",
"def slave_backup(self):\r\n self.assert_is_master(False)\r\n if self.slave_lock_backups() != 0:\r\n self.log.error(\"Cannot obtain backup lock.\")\r\n sys.exit(1)\r\n\r\n try:\r\n self.slave_pause(waitcomplete=1)\r\n\r\n try:\r\n self.slave_rotate_backups()\r\n src = self.cf.getfile(\"slave_data\")\r\n dst = self.cf.getfile(\"full_backup\")\r\n\r\n start_time = time.localtime()\r\n cmdline = [\"cp\", \"-a\", src, dst ]\r\n self.log.info(\"Executing %s\", \" \".join(cmdline))\r\n if not self.not_really:\r\n self.exec_cmd(cmdline)\r\n stop_time = time.localtime()\r\n\r\n # Obtain the last restart point information\r\n ctl = PgControlData(self.cf.getfile(\"slave_bin\", \"\"), dst, True)\r\n\r\n # TODO: The newly created backup directory probably still contains\r\n # backup_label.old and recovery.conf files. Remove these.\r\n\r\n if not ctl.is_valid:\r\n self.log.warning(\"Unable to determine last restart point, backup_label not created.\")\r\n else:\r\n # Write backup label and history file\r\n\r\n backup_label = \\\r\n\"\"\"START WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s)\r\nCHECKPOINT LOCATION: %(xlogid)X/%(xrecoff)X\r\nSTART TIME: %(start_time)s\r\nLABEL: SlaveBackup\"\r\n\"\"\"\r\n backup_history = \\\r\n\"\"\"START WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s)\r\nSTOP WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s)\r\nCHECKPOINT LOCATION: %(xlogid)X/%(xrecoff)X\r\nSTART TIME: %(start_time)s\r\nLABEL: SlaveBackup\"\r\nSTOP TIME: %(stop_time)s\r\n\"\"\"\r\n\r\n label_params = {\r\n \"xlogid\": ctl.xlogid,\r\n \"xrecoff\": ctl.xrecoff,\r\n \"wal_name\": ctl.wal_name,\r\n \"start_time\": time.strftime(\"%Y-%m-%d %H:%M:%S %Z\", start_time),\r\n \"stop_time\": time.strftime(\"%Y-%m-%d %H:%M:%S %Z\", stop_time),\r\n }\r\n\r\n # Write the label\r\n filename = os.path.join(dst, \"backup_label\")\r\n if self.not_really:\r\n self.log.info(\"Writing backup label to %s\", filename)\r\n else:\r\n lf = open(filename, \"w\")\r\n lf.write(backup_label % label_params)\r\n lf.close()\r\n\r\n # Now the history\r\n histfile = \"%s.%08X.backup\" % (ctl.wal_name, ctl.xrecoff % ctl.wal_size)\r\n completed_wals = self.cf.getfile(\"completed_wals\")\r\n filename = os.path.join(completed_wals, histfile)\r\n if os.path.exists(filename):\r\n self.log.warning(\"%s: already exists, refusing to overwrite.\", filename)\r\n else:\r\n if self.not_really:\r\n self.log.info(\"Writing backup history to %s\", filename)\r\n else:\r\n lf = open(filename, \"w\")\r\n lf.write(backup_history % label_params)\r\n lf.close()\r\n\r\n self.slave_purge_wals()\r\n finally:\r\n self.slave_continue()\r\n finally:\r\n self.slave_resume_backups()",
"def master_sync(self, daemon_mode=False):\r\n\r\n self.assert_is_master(True)\r\n\r\n use_xlog_functions = self.cf.getint(\"use_xlog_functions\", False)\r\n data_dir = self.cf.getfile(\"master_data\")\r\n xlog_dir = os.path.join(data_dir, \"pg_xlog\")\r\n master_bin = self.cf.getfile(\"master_bin\", \"\")\r\n\r\n dst_loc = os.path.join(self.cf.getfile(\"partial_wals\"), \"\")\r\n\r\n db = None\r\n if use_xlog_functions:\r\n try:\r\n db = self.get_database(\"master_db\", autocommit=1)\r\n except:\r\n self.log.warning(\"Database unavailable, record based log shipping not possible.\")\r\n if daemon_mode:\r\n return\r\n\r\n if db:\r\n cur = db.cursor()\r\n cur.execute(\"select file_name, file_offset from pg_xlogfile_name_offset(pg_current_xlog_location())\")\r\n (file_name, file_offs) = cur.fetchone()\r\n\r\n if not self.walchunk or self.walchunk.filename != file_name:\r\n # Switched to new WAL segment. Don't bother to copy the last bits - it\r\n # will be obsoleted by the archive_command.\r\n if self.walchunk and self.walchunk.sync_count > 0:\r\n self.log.info(\"Switched in %d seconds, %f sec in %d interim syncs, avg %f\",\r\n time.time() - self.walchunk.start_time,\r\n self.walchunk.sync_time,\r\n self.walchunk.sync_count,\r\n self.walchunk.sync_time / self.walchunk.sync_count)\r\n self.walchunk = WalChunk(file_name, 0, file_offs)\r\n else:\r\n self.walchunk.bytes = file_offs - self.walchunk.pos\r\n\r\n if self.walchunk.bytes > 0:\r\n self.master_send_partial(xlog_dir, self.walchunk, daemon_mode)\r\n else:\r\n files = os.listdir(xlog_dir)\r\n files.sort()\r\n\r\n last = self.get_last_complete()\r\n if last:\r\n self.log.info(\"%s: last complete\", last)\r\n else:\r\n self.log.info(\"last complete not found, copying all\")\r\n\r\n # obtain the last checkpoint wal name, this can be used for\r\n # limiting the amount of WAL files to copy if the database\r\n # has been cleanly shut down\r\n ctl = PgControlData(master_bin, data_dir, False)\r\n checkpoint_wal = None\r\n if ctl.is_valid:\r\n if not ctl.is_shutdown:\r\n # cannot rely on the checkpoint wal, should use some other method\r\n self.log.info(\"Database state is not 'shut down', copying all\")\r\n else:\r\n # ok, the database is shut down, we can use last checkpoint wal\r\n checkpoint_wal = ctl.wal_name\r\n self.log.info(\"last checkpoint wal: %s\", checkpoint_wal)\r\n else:\r\n self.log.info(\"Unable to obtain control file information, copying all\")\r\n\r\n for fn in files:\r\n # check if interesting file\r\n if len(fn) < 10:\r\n continue\r\n if fn[0] < \"0\" or fn[0] > '9':\r\n continue\r\n if fn.find(\".\") > 0:\r\n continue\r\n # check if too old\r\n if last:\r\n dot = last.find(\".\")\r\n if dot > 0:\r\n xlast = last[:dot]\r\n if fn < xlast:\r\n continue\r\n else:\r\n if fn <= last:\r\n continue\r\n # check if too new\r\n if checkpoint_wal and fn > checkpoint_wal:\r\n continue\r\n\r\n # got interesting WAL\r\n xlog = os.path.join(xlog_dir, fn)\r\n # copy data\r\n self.log.info('Syncing %s', xlog)\r\n if self.exec_rsync([xlog, dst_loc], not daemon_mode) != 0:\r\n self.log.error('Cannot sync %s', xlog)\r\n break\r\n else:\r\n self.log.info(\"Partial copy done\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Shift the process from slave to master, shifting data as needed.
|
def slave_to_master():
print("Shifting from slave to master")
stop_slave_worker()
setup_slave_web()
pull_from_slave()
commit_pull_to_db()
stop_slave_web()
start_master_worker()
print("DONE!")
|
[
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def become_master(slave_process, old_name):\n s = socket.socket()\n s.bind((\"\", 23456))\n print(\"[*] Listening for command from orchestrator to become master ...\", file=sys.stdout)\n s.listen(2)\n c, address = s.accept()\n print(\"Received command from orchestrator to become master: \" + c.recv(1024).decode(), file=sys.stdout)\n slave_process.terminate()\n\n os.environ[\"WORKER_TYPE\"] = \"master\"\n os.environ[\"NODE_NAME\"] = \"master\"\n\n client = docker.DockerClient(base_url=\"tcp://172.17.0.1:4444\")\n cnt = client.containers.get(old_name)\n cnt.rename(\"master\")\n\n logging.basicConfig()\n zk = KazooClient(hosts=zookeeper_hostname)\n zk.start()\n\n node_name = \"/worker/\" + os.environ[\"NODE_NAME\"]\n if not zk.exists(node_name):\n msg = \"Creating node: \" + node_name\n print(msg, file=sys.stdout)\n db_name = os.environ[\"DB_HOSTNAME\"]\n zk.create(node_name, db_name.encode(), ephemeral=True)\n\n time.sleep(3)\n zk.delete(\"/worker/\" + old_name)\n\n rpc_server = RpcServer(queue_name='writeQ', func=writedb, is_master=True)\n rpc_server.start()",
"def slave_operation():\n # receive data from master node\n x = comm.recv(source=0, tag=1)\n y = comm.recv(source=0, tag=2)\n\n # multiply the received matrix and send the result back to master\n z = multiply_matrix(x, y)\n comm.send(z, dest=0, tag=rank)",
"def bring_slave_back(self):\n self.containers.start_container('pg_slave_1')\n time.sleep(10)",
"def master_operation():\n distribute_matrix_data()\n assemble_matrix_data()",
"def swap_rebalance_master(self):\n task = self.__async_swap_rebalance(master=True)\n task.result()",
"def slave_run(self):\n\n # Initialize PDS data store here because only slaves need to do it.\n self.pds_store = {}\n\n while True:\n data = self.comm.bcast(None, root=0)\n\n op = data[0]\n if op == self.OP_PARALLELIZE:\n pds_id = data[1]\n self.__rec_pds_id = pds_id\n pds = self.parallelize([])\n self.pds_store[pds.pds_id] = pds\n\n\n elif op == self.OP_MAP:\n pds_id, pds_id_result, function_packed = data[1:]\n self.__rec_pds_id, self.__rec_pds_id_result = pds_id, pds_id_result\n\n #Use cloudpickle to convert back function string to a function\n func = cloudpickle.loads(function_packed)\n #Set the function's backend to current class\n #so it can access bds_store properly\n # func.backend = self\n\n\n # Access an existing PDS\n pds = self.pds_store[pds_id]\n pds_res = self.map(func, pds)\n\n # Store the result in a newly gnerated PDS pds_id\n self.pds_store[pds_res.pds_id] = pds_res\n\n elif op == self.OP_BROADCAST:\n self.__bds_id = data[1]\n self.broadcast(None)\n\n elif op == self.OP_COLLECT:\n pds_id = data[1]\n\n # Access an existing PDS from data store\n pds = self.pds_store[pds_id]\n\n self.collect(pds)\n\n elif op == self.OP_DELETEPDS:\n pds_id = data[1]\n del self.pds_store[pds_id]\n\n elif op == self.OP_DELETEBDS:\n bds_id = data[1]\n del self.bds_store[bds_id]\n\n elif op == self.OP_FINISH:\n quit()\n else:\n raise Exception(\"Slave recieved unknown command code\")",
"def move_memory_slave(self, from_index, to_index, debug=False):\n s_count = self.get_number_of_memory_slaves()\n if to_index >= s_count:\n to_index = s_count - 1\n\n if from_index == to_index:\n return\n\n graph_dict = self.get_nodes_dict()\n\n # Find the slave at the from_index.\n from_node = None\n for key in graph_dict.keys():\n if graph_dict[key].node_type != NodeType.SLAVE or \\\n graph_dict[key].slave_type != SlaveType.MEMORY or \\\n graph_dict[key].slave_index != from_index:\n continue\n from_node = graph_dict[key]\n break\n\n if from_node is None:\n raise SlaveError(\"Slave with from index %d not found\" % (from_index))\n\n # Find the slave at the to_index.\n to_node = None\n for key in graph_dict.keys():\n if graph_dict[key].node_type != NodeType.SLAVE or \\\n graph_dict[key].slave_type != SlaveType.MEMORY or \\\n graph_dict[key].slave_index != to_index:\n continue\n to_node = graph_dict[key]\n break\n\n if to_node is None:\n raise SlaveError(\"Slave with to index %d not found\" % (to_index))\n\n if debug:\n print \"before move:\"\n print \"\\tslave %s at position %d with name: %s\" % \\\n (from_node.name, from_node.slave_index, from_node.unique_name)\n print \"\\tslave %s at position %d with name: %s\" % \\\n (to_node.name, to_node.slave_index, to_node.unique_name)\n\n from_node.slave_index = to_index\n from_unique = get_unique_name(from_node.name,\n from_node.node_type,\n from_node.slave_type,\n from_node.slave_index)\n\n mapping = {from_node.unique_name : from_unique}\n\n if debug:\n print \"from.unique_name: \" + from_node.unique_name\n print \"from_unique: \" + from_unique\n\n print \"keys\"\n for name in graph_dict.keys():\n print \"key: \" + name\n\n self.graph = nx.relabel_nodes(self.graph,\n {from_node.unique_name : from_unique})\n from_node = self.get_node(from_unique)\n from_node.slave_index = to_index\n from_node.unique_name = from_unique\n\n to_node.slave_index = from_index\n to_unique = get_unique_name(to_node.name,\n to_node.node_type,\n to_node.slave_type,\n to_node.slave_index)\n self.graph = nx.relabel_nodes(self.graph, {to_node.unique_name:to_unique})\n\n to_node = self.get_node(to_unique)\n to_node.slave_index = from_index\n to_node.unique_name = to_unique\n\n if debug:\n print \"after move:\"\n print \"\\tslave %s at position %d with name: %s\" % \\\n (from_node.name, from_node.slave_index, from_node.unique_name)\n print \"\\tslave %s at position %d with name: %s\" % (\\\n to_node.name, to_node.slave_index, to_node.unique_name)\n\n graph_dict = self.get_nodes_dict()\n print \"keys\"\n for name in graph_dict.keys():\n print \"key: \" + name",
"def onSlave(self):",
"def move(self, *args):\n if self._cluster:\n return self.execute(u'MOVE', *args, shard_key=args[0])\n return self.execute(u'MOVE', *args)",
"def transform_to_master(self, transform_to_master):\n\n self._transform_to_master = transform_to_master",
"def __initiate_slave_node(self):\n master_cmd = self.__spark_installation_path + '/sbin/start-slave.sh spark://' + self.__host_ip + ':7077'\n print(master_cmd)\n #os.chdir(self.__home_dir)\n output = os.system(master_cmd)\n if output != 0:\n raise Exception(\"Terminating process!\")\n self.__logger.log('Enabled slave node..')",
"def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def master_sync(self, daemon_mode=False):\r\n\r\n self.assert_is_master(True)\r\n\r\n use_xlog_functions = self.cf.getint(\"use_xlog_functions\", False)\r\n data_dir = self.cf.getfile(\"master_data\")\r\n xlog_dir = os.path.join(data_dir, \"pg_xlog\")\r\n master_bin = self.cf.getfile(\"master_bin\", \"\")\r\n\r\n dst_loc = os.path.join(self.cf.getfile(\"partial_wals\"), \"\")\r\n\r\n db = None\r\n if use_xlog_functions:\r\n try:\r\n db = self.get_database(\"master_db\", autocommit=1)\r\n except:\r\n self.log.warning(\"Database unavailable, record based log shipping not possible.\")\r\n if daemon_mode:\r\n return\r\n\r\n if db:\r\n cur = db.cursor()\r\n cur.execute(\"select file_name, file_offset from pg_xlogfile_name_offset(pg_current_xlog_location())\")\r\n (file_name, file_offs) = cur.fetchone()\r\n\r\n if not self.walchunk or self.walchunk.filename != file_name:\r\n # Switched to new WAL segment. Don't bother to copy the last bits - it\r\n # will be obsoleted by the archive_command.\r\n if self.walchunk and self.walchunk.sync_count > 0:\r\n self.log.info(\"Switched in %d seconds, %f sec in %d interim syncs, avg %f\",\r\n time.time() - self.walchunk.start_time,\r\n self.walchunk.sync_time,\r\n self.walchunk.sync_count,\r\n self.walchunk.sync_time / self.walchunk.sync_count)\r\n self.walchunk = WalChunk(file_name, 0, file_offs)\r\n else:\r\n self.walchunk.bytes = file_offs - self.walchunk.pos\r\n\r\n if self.walchunk.bytes > 0:\r\n self.master_send_partial(xlog_dir, self.walchunk, daemon_mode)\r\n else:\r\n files = os.listdir(xlog_dir)\r\n files.sort()\r\n\r\n last = self.get_last_complete()\r\n if last:\r\n self.log.info(\"%s: last complete\", last)\r\n else:\r\n self.log.info(\"last complete not found, copying all\")\r\n\r\n # obtain the last checkpoint wal name, this can be used for\r\n # limiting the amount of WAL files to copy if the database\r\n # has been cleanly shut down\r\n ctl = PgControlData(master_bin, data_dir, False)\r\n checkpoint_wal = None\r\n if ctl.is_valid:\r\n if not ctl.is_shutdown:\r\n # cannot rely on the checkpoint wal, should use some other method\r\n self.log.info(\"Database state is not 'shut down', copying all\")\r\n else:\r\n # ok, the database is shut down, we can use last checkpoint wal\r\n checkpoint_wal = ctl.wal_name\r\n self.log.info(\"last checkpoint wal: %s\", checkpoint_wal)\r\n else:\r\n self.log.info(\"Unable to obtain control file information, copying all\")\r\n\r\n for fn in files:\r\n # check if interesting file\r\n if len(fn) < 10:\r\n continue\r\n if fn[0] < \"0\" or fn[0] > '9':\r\n continue\r\n if fn.find(\".\") > 0:\r\n continue\r\n # check if too old\r\n if last:\r\n dot = last.find(\".\")\r\n if dot > 0:\r\n xlast = last[:dot]\r\n if fn < xlast:\r\n continue\r\n else:\r\n if fn <= last:\r\n continue\r\n # check if too new\r\n if checkpoint_wal and fn > checkpoint_wal:\r\n continue\r\n\r\n # got interesting WAL\r\n xlog = os.path.join(xlog_dir, fn)\r\n # copy data\r\n self.log.info('Syncing %s', xlog)\r\n if self.exec_rsync([xlog, dst_loc], not daemon_mode) != 0:\r\n self.log.error('Cannot sync %s', xlog)\r\n break\r\n else:\r\n self.log.info(\"Partial copy done\")",
"def _runMaster(run, comm, logger):\n\n from mpi4py import MPI\n stat = MPI.Status()\n rank = comm.rank\n size = comm.size\n\n logger.info(\"BHSnapshotData._runMaster()\")\n logger.debug(\"Rank %d/%d\" % (rank, size))\n\n # Make sure output directory exists\n fname = _GET_BH_SINGLE_SNAPSHOT_FILENAME(run, 0)\n zio.check_path(fname)\n\n # Load BH Mergers\n logger.info(\"Loading BH Mergers\")\n mrgs = mergers.load_fixed_mergers(run, loadsave=True, verbose=False)\n numMergers = mrgs[MERGERS.NUM]\n logger.debug(\"- Loaded %d mrgs\" % (numMergers))\n\n # Init status file\n statFileName = bh_constants._GET_STATUS_FILENAME(__file__, run=run, version=_VERSION)\n statFile = open(statFileName, 'w')\n logger.debug(\"Opened status file '%s'\" % (statFileName))\n statFile.write('%s\\n' % (str(datetime.now())))\n beg = datetime.now()\n\n num_pos = 0\n num_neg = 0\n num_new = 0\n countDone = 0\n count = 0\n times = np.zeros(NUM_SNAPS-1)\n\n # Iterate Over Snapshots\n # ----------------------\n # Go over snapshots in random order to get a better estimate of ETA/duration\n snapList = np.arange(NUM_SNAPS-1)\n np.random.shuffle(snapList)\n logger.info(\"Iterating over snapshots\")\n pbar = zio.getProgressBar(NUM_SNAPS-1)\n for snapNum in snapList:\n logger.debug(\"- Snap %d, count %d, done %d\" % (snapNum, count, countDone))\n\n # Get Mergers occuring just after Snapshot `snapNum`\n mrgs = mrgs[MERGERS.MAP_STOM][snapNum+1]\n nums = len(mrgs)\n targetIDs = mrgs[MERGERS.IDS][mrgs]\n logger.debug(\"- %d Mergers from snapshot %d\" % (nums, snapNum+1))\n\n # Look for available slave process\n data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=stat)\n src = stat.Get_source()\n tag = stat.Get_tag()\n logger.debug(\"- Received signal from %d\" % (src))\n\n # Track number of completed profiles\n if (tag == MPI_TAGS.DONE):\n durat, pos, neg, new = data\n logger.debug(\"- - Done after %s, pos %d, neg %d, new %d\" % (durat, pos, neg, new))\n\n times[countDone] = durat\n num_pos += pos\n num_neg += neg\n num_new += new\n countDone += 1\n\n # Distribute tasks\n logger.debug(\"- Sending new task to %d\" % (src))\n comm.send([snapNum, mrgs, targetIDs, numMergers], dest=src, tag=MPI_TAGS.START)\n logger.debug(\"- New task sent\")\n\n # Write status to file and log\n dur = (datetime.now()-beg)\n fracDone = 1.0*countDone/(NUM_SNAPS-1)\n statStr = 'Snap %3d (rank %03d) %8d/%8d = %.4f in %s %8d pos %8d neg %3d new\\n' % \\\n (snapNum, src, countDone, NUM_SNAPS-1, fracDone, str(dur), num_pos, num_neg, num_new)\n statFile.write(statStr)\n statFile.flush()\n logger.debug(statStr)\n count += 1\n pbar.update(count)\n\n statFile.write('\\n\\nDone after %s' % (str(datetime.now()-beg)))\n statFile.close()\n pbar.finish()\n\n # Close out all Processes\n # -----------------------\n numActive = size-1\n logger.info(\"Exiting %d active processes\" % (numActive))\n while(numActive > 0):\n # Find available slave process\n data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=stat)\n src = stat.Get_source()\n tag = stat.Get_tag()\n logger.debug(\"- Received signal from %d\" % (src))\n\n # If we're recieving exit confirmation, count it\n if (tag == MPI_TAGS.EXIT): numActive -= 1\n else:\n # If a process just completed, count it\n if (tag == MPI_TAGS.DONE):\n durat, pos, neg, new = data\n logger.debug(\"- - %d Done after %s, pos %d, neg %d, new %d\" %\n (src, durat, pos, neg, new))\n times[countDone] = durat\n countDone += 1\n num_pos += pos\n num_neg += neg\n num_new += new\n\n # Send exit command\n logger.debug(\"Sending exit to %d. %d Active.\" % (src, numActive))\n comm.send(None, dest=src, tag=MPI_TAGS.EXIT)\n\n fracDone = 1.0*countDone/(NUM_SNAPS-1)\n logger.debug(\"%d/%d = %.4f Completed tasks!\" % (countDone, NUM_SNAPS-1, fracDone))\n logger.debug(\"Average time %.4f +- %.4f\" % (np.average(times), np.std(times)))\n logger.info(\"Totals: pos = %5d neg = %5d new = %3d\" % (num_pos, num_neg, num_new))\n\n return",
"def replicate_slave_from_master(master):\n if len(env.hosts) > 1:\n exit('This job is currently only setup to run against one slave at a time')\n\n with settings(host_string=master):\n # `--single-transaction` in conjunction with `--master-data` avoids\n # locking tables for any significant length of time. See\n # https://web.archive.org/web/20160308163516/https://dev.mysql.com/doc/refman/5.5/en/mysqldump.html#option_mysqldump_single-transaction\n run('sudo -i mysqldump -u root --all-databases --master-data --single-transaction --quick --add-drop-database > dump.sql')\n\n with settings(host_string=master, forward_agent=True):\n run('scp dump.sql {0}:~'.format(env.hosts[0]))\n\n with settings(host_string=master):\n run('rm dump.sql')\n\n run_mysql_command(\"STOP SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=OFF\")\n\n with hide('running', 'stdout'):\n database_file_size = run(\"stat --format='%s' dump.sql\")\n\n print('Importing MySQL database which is {0}GB, this might take a while...'.format(round(int(database_file_size) / (1024 * 1024 * 1024 * 1.0), 1)))\n run('sudo -i mysql -uroot < dump.sql')\n\n run('rm dump.sql')\n\n run_mysql_command(\"START SLAVE\")\n run_mysql_command(\"SET GLOBAL slow_query_log=ON\")\n\n slave_status()",
"def slave_backup(self):\r\n self.assert_is_master(False)\r\n if self.slave_lock_backups() != 0:\r\n self.log.error(\"Cannot obtain backup lock.\")\r\n sys.exit(1)\r\n\r\n try:\r\n self.slave_pause(waitcomplete=1)\r\n\r\n try:\r\n self.slave_rotate_backups()\r\n src = self.cf.getfile(\"slave_data\")\r\n dst = self.cf.getfile(\"full_backup\")\r\n\r\n start_time = time.localtime()\r\n cmdline = [\"cp\", \"-a\", src, dst ]\r\n self.log.info(\"Executing %s\", \" \".join(cmdline))\r\n if not self.not_really:\r\n self.exec_cmd(cmdline)\r\n stop_time = time.localtime()\r\n\r\n # Obtain the last restart point information\r\n ctl = PgControlData(self.cf.getfile(\"slave_bin\", \"\"), dst, True)\r\n\r\n # TODO: The newly created backup directory probably still contains\r\n # backup_label.old and recovery.conf files. Remove these.\r\n\r\n if not ctl.is_valid:\r\n self.log.warning(\"Unable to determine last restart point, backup_label not created.\")\r\n else:\r\n # Write backup label and history file\r\n\r\n backup_label = \\\r\n\"\"\"START WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s)\r\nCHECKPOINT LOCATION: %(xlogid)X/%(xrecoff)X\r\nSTART TIME: %(start_time)s\r\nLABEL: SlaveBackup\"\r\n\"\"\"\r\n backup_history = \\\r\n\"\"\"START WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s)\r\nSTOP WAL LOCATION: %(xlogid)X/%(xrecoff)X (file %(wal_name)s)\r\nCHECKPOINT LOCATION: %(xlogid)X/%(xrecoff)X\r\nSTART TIME: %(start_time)s\r\nLABEL: SlaveBackup\"\r\nSTOP TIME: %(stop_time)s\r\n\"\"\"\r\n\r\n label_params = {\r\n \"xlogid\": ctl.xlogid,\r\n \"xrecoff\": ctl.xrecoff,\r\n \"wal_name\": ctl.wal_name,\r\n \"start_time\": time.strftime(\"%Y-%m-%d %H:%M:%S %Z\", start_time),\r\n \"stop_time\": time.strftime(\"%Y-%m-%d %H:%M:%S %Z\", stop_time),\r\n }\r\n\r\n # Write the label\r\n filename = os.path.join(dst, \"backup_label\")\r\n if self.not_really:\r\n self.log.info(\"Writing backup label to %s\", filename)\r\n else:\r\n lf = open(filename, \"w\")\r\n lf.write(backup_label % label_params)\r\n lf.close()\r\n\r\n # Now the history\r\n histfile = \"%s.%08X.backup\" % (ctl.wal_name, ctl.xrecoff % ctl.wal_size)\r\n completed_wals = self.cf.getfile(\"completed_wals\")\r\n filename = os.path.join(completed_wals, histfile)\r\n if os.path.exists(filename):\r\n self.log.warning(\"%s: already exists, refusing to overwrite.\", filename)\r\n else:\r\n if self.not_really:\r\n self.log.info(\"Writing backup history to %s\", filename)\r\n else:\r\n lf = open(filename, \"w\")\r\n lf.write(backup_history % label_params)\r\n lf.close()\r\n\r\n self.slave_purge_wals()\r\n finally:\r\n self.slave_continue()\r\n finally:\r\n self.slave_resume_backups()",
"def swap_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n grp.layout.cmd_swap_main()\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_shuffle_down()\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets up the web server on the slave, then checks it.
|
def setup_slave_web():
print("Starting slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the web dyno on slave")
print(r.text)
return False
#wait a bit for the web process to start up
print("Waiting a bit")
time.sleep(10)
r = req.get(SLAVE_URL)
if not r.text.startswith("Index"):
print("Something is wrong with slave:")
print(r.text)
return False
print("Got response from slave:", r.text)
return True
|
[
"def webserver_start():\n run(_webserver_command())",
"def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())",
"def setup():\n # Setup requires root privleges\n env.user = \"root\"\n env.disable_known_hosts = True\n jmeter_version=\"2.13\"\n\n config = _load_config()\n\n try:\n print colors.green(\"Checking the status of the server\")\n for key in config['servers']:\n if config['servers'][key]['host'] == env.host:\n droplet = digitalocean.Droplet(token=config['token'], id=config['servers'][key]['id'])\n droplet.load()\n actions = droplet.get_actions()\n for action in actions:\n if action.type == 'create' and action.status != 'completed':\n raise Exception('Cannot continue, server is not active', 'setup')\n print colors.green(\"Server is active\")\n except Exception as e:\n pprint.pprint(e)\n print colors.red(\"Failed to load server: %s\" % e)\n return\n\n if not files.exists('/home/jmeter/apache-jmeter/bin/jmeter-server'):\n run('apt-get update > /dev/null; apt-get install git-all unzip openjdk-7-jre-headless snmpd iftop -y > /dev/null')\n run('id jmeter > /dev/null 2&>1 || adduser jmeter --disabled-password --system --shell /bin/bash')\n run('test -f /home/jmeter/apache-jmeter-' + jmeter_version + '.tgz || wget -P /home/jmeter http://apache.mirrors.ionfish.org//jmeter/binaries/apache-jmeter-' + jmeter_version + '.tgz')\n run('tar -C /home/jmeter/ -xf /home/jmeter/apache-jmeter-' + jmeter_version + '.tgz;')\n run('test -d /home/jmeter/apache-jmeter || mv /home/jmeter/apache-jmeter-' + jmeter_version + ' /home/jmeter/apache-jmeter')\n run('test -f home/jmeter/JMeterPlugins-Standard-1.1.3.zip || wget -P /home/jmeter http://jmeter-plugins.org/downloads/file/JMeterPlugins-Standard-1.1.3.zip')\n run('test -f home/jmeter/JMeterPlugins-Extras-1.2.1.zip || wget -P /home/jmeter http://jmeter-plugins.org/downloads/file/JMeterPlugins-Extras-1.2.1.zip')\n run('unzip -o /home/jmeter/JMeterPlugins-Standard-1.1.3.zip -d /home/jmeter/apache-jmeter/')\n run('unzip -o /home/jmeter/JMeterPlugins-Extras-1.2.1.zip -d /home/jmeter/apache-jmeter/')\n\n run('mkdir -p /var/log/jmeter; chown jmeter /var/log/jmeter')\n\n put('%s/files/jmeter' % os.path.dirname(env.real_fabfile), '/home/jmeter/apache-jmeter/bin/jmeter')\n run('ln -s /home/jmeter/apache-jmeter/bin/jmeter /usr/local/bin/jmeter')\n\n run('mkdir -p /home/jmeter/.ssh/')\n put('%s/files/jmeter-id_rsa.pub' % os.path.dirname(env.real_fabfile), '/home/jmeter/.ssh/authorized_keys')\n run('chown jmeter -R /home/jmeter')\n run('chmod 700 /home/jmeter/.ssh')\n\n if not files.exists('/home/jmeter/.ssh/config'):\n run('echo -e \"StrictHostKeyChecking no\\n\" > /home/jmeter/.ssh/config')",
"def _start(self):\n\n ip = mh.cfg['Extensions']['TestEnv']['server_ip']\n port = mh.cfg['Extensions']['TestEnv']['server_port']\n self._server = application(urls, globals())\n httpserver.runsimple(self._server.wsgifunc(), (str(ip), port))",
"def setup_server():\n cherrypy.config.update('server.conf')\n cherrypy.tree.mount(StringGeneratorWebService(), '/', 'server.conf')",
"def server_setup(self):\n raise NotImplementedError",
"def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)",
"def run_web_start_server(self):\r\n\r\n if isfunction(self.start_server):\r\n self.start_server()\r\n\r\n if self.start_server not in self.frameworks:\r\n raise Exception(f\"'start_server'({self.start_server}) not in {','.join(self.frameworks)} and also not a function which starts the webframework\")\r\n\r\n if self.start_server == \"flask-socketio\":\r\n self.socketio.run(self.app, host=self.host, port=self.port)\r\n \r\n elif self.start_server == \"flask\":\r\n if self.app: \r\n import waitress\r\n waitress.serve(self.app, host=self.host, port=self.port)\r\n else:\r\n os.system(f\"waitress-serve --host={self.host} --port={self.port} main:app\")\r\n\r\n elif self.start_server == \"fastapi\": \r\n if self.app:\r\n import uvicorn\r\n uvicorn.run(self.app, host=self.host, port=self.port, log_level=\"info\")\r\n else:\r\n os.system(f\"uvicorn --host {self.host} --port {self.port} main:app\")\r\n\r\n elif self.start_server == \"django\":\r\n if self.app:\r\n import waitress\r\n waitress.serve(self.app, host=self.host, port=self.port)\r\n else:\r\n contents = os.listdir(os.getcwd())\r\n for content in contents:\r\n if os.path.isdir(content):\r\n files = os.listdir(content)\r\n if 'wsgi.py' in files:\r\n break \r\n django_project = os.path.basename(content) \r\n os.system(f\"waitress-serve --host={self.host} --port={self.port} {django_project}.wsgi:application\")",
"def startup_a_server():\r\n\tlocalServe.create_a_server()",
"def server():\n\n os.environ['SITE_CONFIG'] = 'website.config.Dev'\n\n from website import site\n site.run(host='0.0.0.0')",
"def master_server():\n os.system('python master_server.py')",
"def start():\n port = cfg.web.port\n\n events.dispatcher.register_target(event_logger)\n\n logging.info(\"Starting web server: port=%d\" % port)\n utils.DaemonThread(\n target=bottle.run, kwargs={\"host\": cfg.web.bind, \"port\": cfg.web.port}\n ).start()",
"def server(c):\n print('==================================================')\n print('Building Web+App Server')\n print('==================================================')\n # Run apt update\n c.sudo('apt-get update')\n # Nginx & PHP\n nginx.install(c)\n php.install(c)\n nginx.configure(c)\n php.configure(c)\n # Papertrail\n if (c.enabled('papertrail')):\n papertrail.install(c)\n papertrail.configure(c)\n # S3FS\n if (c.enabled('s3fs')):\n s3fs.install(c)\n s3fs.configure(c)\n # Supervisor\n supervisor.install(c)\n print('==================================================')\n print('... done Building Web+App Server')\n print('==================================================')",
"def webserver(ctx):\n\n # Lazy import to avoid importing web packages when using pure cli\n import jotquote.web\n jotquote.web.run_server()",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def setup_remote_site(self):\n raise NotImplementedError",
"def setup_webserver(webserver):\n\n scheme = \"http://\"\n if (webserver.startswith('http://') or\n webserver.startswith('chrome://') or\n webserver.startswith('file:///')):\n scheme = \"\"\n elif '://' in webserver:\n print \"Unable to parse user defined webserver: '%s'\" % (webserver)\n sys.exit(2)\n\n url = urlparse.urlparse('%s%s' % (scheme, webserver))\n port = url.port\n\n if port:\n import mozhttpd\n return mozhttpd.MozHttpd(host=url.hostname, port=int(port), docroot=here)\n else:\n print \"WARNING: unable to start web server without custom port configured\"\n return None",
"def register_web_server(manager):\n global web_server, config_hash\n\n if not manager.is_daemon:\n return\n\n config = manager.config.get('web_server')\n if get_config_hash(config) == config_hash:\n logger.debug('web server config has\\'nt changed')\n return\n\n config_hash = get_config_hash(config)\n web_server_config = prepare_config(config)\n\n # Removes any existing web server instances if exists\n stop_server(manager)\n\n if not web_server_config:\n return\n\n logger.info(\n 'Running web server at IP {}:{}', web_server_config['bind'], web_server_config['port']\n )\n # Register API\n api_app.secret_key = get_secret()\n\n logger.info(\"Initiating API\")\n register_app('/api', api_app, 'API')\n\n # Register WebUI\n if web_server_config.get('web_ui'):\n if web_server_config.get('run_v1'):\n logger.info('Registering WebUI v1')\n register_web_ui_v1(manager)\n\n logger.info('Registering WebUI v2')\n register_web_ui_v2(web_server_config)\n\n web_server = setup_server(web_server_config)",
"def start_server():\n app.run(host = Config.HOSTNAME, port = Config.PORT)\n logger.info('Http Server is running at http://%s:%s' \n % (Config.HOSTNAME, Config.PORT))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stops the web process on the slave.
|
def stop_slave_web():
print("Stopping slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the web dyno on slave")
print(r.text)
return False
#wait a bit for the web process to stop
print("Waiting a bit")
time.sleep(2)
return True
|
[
"def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)",
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def stop():\n server = current_server()\n server.stop()",
"def stop(self):\n\n self._stop_server = True\n\n self.join()\n self.httpd.server_close()",
"def stop_slave(instance):\n stmt = \"stop slave\"\n instance.get_connection()\n instance.execute_stmt(connection=instance.connection, stmt=stmt)\n if is_slave_running(instance):\n print \"Error: unable stop slave replication.\"\n return 1",
"def stop(self):\n cherrypy.server.stop()",
"def stop(self):\n self._kill_process()",
"def stop() -> None:\n pid = process_management.find_pid_on_port(\"5000\")\n process_management.kill_process(pid)\n print(\"Backend server stopped at localhost:5000.\")",
"def stop(self):\n self.scion_sh('stop')",
"def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))",
"def _stop_ht_master():\n run('%s/%s/bin/stop-servers.sh --no-hyperspace --no-rangeserver ' \\\n '--no-dfsbroker --no-thriftbroker' % (ht_install_dir(), ht_version())\n )",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def stop(self):\n self._server.stop_build(self._job.name, self.number)",
"def stop(self) -> 'ZenithPageserver':\n\n if self.running:\n self.zenith_cli.run(['stop'])\n self.running = False\n\n return self",
"def stop(self) -> None:\n self.server.stop_listening()",
"def stop(self):\n\n log_info(\"Stopping LiteServ ...\")\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()",
"def stop(self):\n\n print utilities.run(os.path.join(\"source \" + self.params.INSTALLATION_DIRECTORY, \"chorus_path.sh\") + \" && chorus_control.sh stop\", communicate=\"\", user=self.user())",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stopwasp():\n\n\trespond = send_command('stopwasp')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Starts the worker process on the master.
|
def start_master_worker():
print("Starting master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on master")
print(r.text)
return False
#wait a bit for the worker process to start
print("Waiting a bit")
time.sleep(10)
return True
|
[
"def __start_local_master():\n import params\n component_name = __get_component_name()\n\n __setup_hdfs_dirs()\n\n utils.exec_hawq_operation(\n hawq_constants.START,\n \"{0} -a -v\".format(component_name),\n not_if=utils.chk_hawq_process_status_cmd(params.hawq_master_address_port, component_name))\n Logger.info(\"Master {0} started\".format(params.hostname))",
"def start (self):\n\t\tself.log.info(\"starting workers.\")\n\t\tself.spawn(max(0,self.low-len(self.worker)))",
"def start(self):\n self.handler = None\n self.master_node = None\n self.uri = None\n handler = rosmaster.master_api.ROSMasterHandler(self.num_workers)\n master_node = rosgraph.xmlrpc.XmlRpcNode(self.port, handler)\n master_node.start()\n while not master_node.uri:\n time.sleep(0.0001)\n self.handler = handler\n self.master_node = master_node\n self.uri = master_node.uri\n logging.getLogger('rosmaster.master').info(\"Master initialized: port[%s], uri[%s]\", self.port, self.uri)",
"def master_service():\n master = Master()\n logger.info(\"Master running on ip: {}\".format(get_host_ip()))\n\n master.server.run()",
"def start(self):\n log.write(\"start workers\\n\")\n log.flush()\n\n self.sparkContainerID = check_output([\"docker\",\n \"run\",\n \"--net=host\", \n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\",\n \"-e\", \"\\\"SPARK_MASTER_IP=\"+self.masterIP+\":\"+SPARK_MASTER_PORT+\"\\\"\",\n \"-e\", \"SPARK_LOCAL_DIRS=/ephemeral/spark/local\",\n \"-e\", \"SPARK_WORKER_DIR=/ephemeral/spark/work\",\n \"quay.io/ucsc_cgl/apache-spark-worker:1.5.2\", \n self.masterIP+\":\"+SPARK_MASTER_PORT])[:-1]\n self.hdfsContainerID = check_output([\"docker\",\n \"run\",\n \"--net=host\",\n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\",\n \"quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2\", self.masterIP])[:-1]\n return",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()",
"def start_worker_process():\n # register class\n BaseManager.register(\"JobHolder\", JobHolder)\n BaseManager.register(\"StatusHolder\", StatusHolder)\n manager = BaseManager()\n manager.start()\n\n # all job hodler\n Controller.job_holder = manager.JobHolder()\n \"\"\":type: JobHolder\"\"\"\n\n # all job status holder\n Controller.status_holder = manager.StatusHolder()\n \"\"\":type: StatusHolder\"\"\"\n\n # start worker process\n for i in xrange(config.worker_process_count):\n p = Process(target=Controller.worker_process_func, args=[Controller.job_holder, Controller.status_holder])\n p.start()\n Controller.process_list.append(p)\n\n # start dispatch process\n p = Process(target=Controller.dispatch_process_func, args=[Controller.job_holder])\n p.start()\n Controller.process_list.append(p)",
"def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)",
"def main():\n setup()\n master = Master()\n master.start()",
"def start_master():\n import params\n\n if not params.hostname in [params.hawqmaster_host, params.hawqstandby_host]:\n Fail(\"Host should be either active Hawq master or Hawq standby.\")\n\n __check_dfs_truncate_enforced()\n\n is_active_master = __is_active_master()\n __setup_passwordless_ssh()\n\n if __is_local_initialized():\n __start_local_master()\n return\n\n if is_active_master:\n __init_active()\n elif __is_standby_host():\n __init_standby()",
"def __initiate_master_node(self):\n master_cmd = self.__spark_installation_path + '/sbin/start-master.sh'\n print(master_cmd)\n #os.chdir(self.__home_dir)\n output = os.system(master_cmd)\n if output != 0:\n raise Exception(\"Terminating process!\")\n self.__logger.log('Enabled master node..')",
"def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()",
"def _StartWorkerProcess(self, process_name):",
"def _start_ht_master():\n run('%s/%s/bin/start-master.sh %s'\n % (ht_install_dir(), ht_version(), ht_config_option()), pty=False\n )",
"def master_server():\n os.system('python master_server.py')",
"def _start(self, workerid, job_count=None, job_name=None):\n return slurm.submit(\n \"{} -m cluster_tools.remote {}\".format(sys.executable, workerid),\n job_resources=self.job_resources,\n job_name=self.job_name if self.job_name is not None else job_name,\n additional_setup_lines=self.additional_setup_lines,\n job_count=job_count,\n )",
"def start(self):\n\n self._check_initialization()\n\n self.start_dfs()\n self.start_map_reduce()\n\n self.running = True",
"def start(self):\n if self.working:\n return\n self.working = True\n for i in range(self.num_workers):\n w = threading.Thread(\n name=\"Worker Thread #{i}\".format(i=i),\n target=self._worker,\n args=(self.clients[i], self.settings,),\n )\n w.daemon = True\n w.start()\n print(\"{0} client_id = {1}\".format(\n w.getName(), self.clients[i]))\n self.workers.append(w)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stops the worker process on the master.
|
def stop_master_worker():
print("Stopping master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on master")
print(r.text)
return False
#wait a bit for the worker process to stop
print("Waiting a bit")
time.sleep(2)
return True
|
[
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def stop(self):\n self._kill_process()",
"def kill_worker(self, proc):\n worker_id = self.svr.svr_ip + \"_\" + str(proc.pid)\n if proc.is_alive():\n proc.terminate()\n\n self.clear_process_metrics(proc.pid)\n self.workers_group.pop(worker_id)",
"def stop(self):\n if self._was_stopped:\n LOG.debug(\"not running, nothing to do to stop\")\n return\n LOG.info(\"Stop worker %s\", self)\n self.stop_event_loop()\n self._cleanup()\n self._stopped()\n self._was_stopped = True",
"def stop(self):\n log.write(\"stop workers\\n\")\n log.flush()\n\n call([\"docker\", \"exec\", self.sparkContainerID, \"rm\", \"-r\", \"/ephemeral/spark\"])\n call([\"docker\", \"stop\", self.sparkContainerID])\n call([\"docker\", \"rm\", self.sparkContainerID])\n call([\"docker\", \"exec\", self.hdfsContainerID, \"rm\", \"-r\", \"/ephemeral/hdfs\"])\n call([\"docker\", \"stop\", self.hdfsContainerID])\n call([\"docker\", \"rm\", self.hdfsContainerID])\n\n return",
"def master_stop(self):\r\n data_dir = self.cf.getfile(\"master_data\")\r\n restart_cmd = self.cf.getfile(\"master_restart_cmd\", \"\")\r\n\r\n self.assert_is_master(True)\r\n self.log.info(\"Disabling WAL archiving\")\r\n\r\n self.master_configure_archiving(False, restart_cmd)\r\n\r\n # if we have a restart command, then use it, otherwise signal\r\n if restart_cmd:\r\n self.log.info(\"Restarting postmaster\")\r\n self.exec_system(restart_cmd)\r\n else:\r\n self.log.info(\"Sending SIGHUP to postmaster\")\r\n self.signal_postmaster(data_dir, signal.SIGHUP)\r\n\r\n # stop any running syncdaemons\r\n pidfile = self.cf.getfile(\"pidfile\", \"\")\r\n if os.path.exists(pidfile):\r\n self.log.info('Pidfile %s exists, attempting to stop syncdaemon.', pidfile)\r\n self.exec_cmd([self.script, self.cfgfile, \"syncdaemon\", \"-s\"])\r\n\r\n self.log.info(\"Done\")",
"def stop(self):\n self._server.stop_build(self._job.name, self.number)",
"def stopWorker(self):\n self.isrun = False",
"def stop(self):\n self.scion_sh('stop')",
"def stop(self):\n self.working = False\n for w in self.workers:\n w.join()\n self.workers = []",
"def stop(self):\n result = mpc_command(['stop'])",
"def _stop_ht_master():\n run('%s/%s/bin/stop-servers.sh --no-hyperspace --no-rangeserver ' \\\n '--no-dfsbroker --no-thriftbroker' % (ht_install_dir(), ht_version())\n )",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def stop(self):\r\n self.stopped = True\r\n #self.worker.join()\r\n self.FPSThread.join()",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))",
"def stop():\n server = current_server()\n server.stop()",
"def stop(self):\n with self._cv:\n self._terminate.value = 1\n self._cv.notify()\n self._process.join()",
"def monitor_stop(self):\n \n self.mon.stop()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Starts the worker process on the slave.
|
def start_slave_worker():
print("Starting slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on slave")
print(r.text)
return False
#wait a bit for the worker process to start up
print("Waiting a bit")
time.sleep(10)
return True
|
[
"def __initiate_slave_node(self):\n master_cmd = self.__spark_installation_path + '/sbin/start-slave.sh spark://' + self.__host_ip + ':7077'\n print(master_cmd)\n #os.chdir(self.__home_dir)\n output = os.system(master_cmd)\n if output != 0:\n raise Exception(\"Terminating process!\")\n self.__logger.log('Enabled slave node..')",
"def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()",
"def start_slave(instance):\n stmt = \"start slave\"\n instance.get_connection()\n instance.execute_stmt(connection=instance.connection, stmt=stmt)\n if is_slave_running(instance):\n print \"Error: unable start slave replication.\"\n return 1",
"def start(self):\n self.handler = None\n self.master_node = None\n self.uri = None\n handler = rosmaster.master_api.ROSMasterHandler(self.num_workers)\n master_node = rosgraph.xmlrpc.XmlRpcNode(self.port, handler)\n master_node.start()\n while not master_node.uri:\n time.sleep(0.0001)\n self.handler = handler\n self.master_node = master_node\n self.uri = master_node.uri\n logging.getLogger('rosmaster.master').info(\"Master initialized: port[%s], uri[%s]\", self.port, self.uri)",
"def __start_local_master():\n import params\n component_name = __get_component_name()\n\n __setup_hdfs_dirs()\n\n utils.exec_hawq_operation(\n hawq_constants.START,\n \"{0} -a -v\".format(component_name),\n not_if=utils.chk_hawq_process_status_cmd(params.hawq_master_address_port, component_name))\n Logger.info(\"Master {0} started\".format(params.hostname))",
"def start_master_worker():\n print(\"Starting master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to start\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def start (self):\n\t\tself.log.info(\"starting workers.\")\n\t\tself.spawn(max(0,self.low-len(self.worker)))",
"def _start(self, workerid, job_count=None, job_name=None):\n return slurm.submit(\n \"{} -m cluster_tools.remote {}\".format(sys.executable, workerid),\n job_resources=self.job_resources,\n job_name=self.job_name if self.job_name is not None else job_name,\n additional_setup_lines=self.additional_setup_lines,\n job_count=job_count,\n )",
"def __initiate_master_node(self):\n master_cmd = self.__spark_installation_path + '/sbin/start-master.sh'\n print(master_cmd)\n #os.chdir(self.__home_dir)\n output = os.system(master_cmd)\n if output != 0:\n raise Exception(\"Terminating process!\")\n self.__logger.log('Enabled master node..')",
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def start(self):\n log.write(\"start workers\\n\")\n log.flush()\n\n self.sparkContainerID = check_output([\"docker\",\n \"run\",\n \"--net=host\", \n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\",\n \"-e\", \"\\\"SPARK_MASTER_IP=\"+self.masterIP+\":\"+SPARK_MASTER_PORT+\"\\\"\",\n \"-e\", \"SPARK_LOCAL_DIRS=/ephemeral/spark/local\",\n \"-e\", \"SPARK_WORKER_DIR=/ephemeral/spark/work\",\n \"quay.io/ucsc_cgl/apache-spark-worker:1.5.2\", \n self.masterIP+\":\"+SPARK_MASTER_PORT])[:-1]\n self.hdfsContainerID = check_output([\"docker\",\n \"run\",\n \"--net=host\",\n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\",\n \"quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2\", self.masterIP])[:-1]\n return",
"def start_broker_slave(broker_id, start_port, server_ip=\"127.0.0.1\", verbosity=\"info\"):\n logging.set_verbosity(VERBOSITY_MAP.get(verbosity, logging.INFO))\n\n broker_slave = BrokerSlave(server_ip, broker_id, start_port)\n broker_slave.start()",
"def master_service():\n master = Master()\n logger.info(\"Master running on ip: {}\".format(get_host_ip()))\n\n master.server.run()",
"def start_slaves(slave_dir,exe_rel_path,pst_rel_path,num_slaves=None,slave_root=\"..\",\n port=4004,rel_path=None,local=True,cleanup=True,master_dir=None):\n\n assert os.path.isdir(slave_dir)\n assert os.path.isdir(slave_root)\n if num_slaves is None:\n num_slaves = mp.cpu_count()\n else:\n num_slaves = int(num_slaves)\n #assert os.path.exists(os.path.join(slave_dir,rel_path,exe_rel_path))\n exe_verf = True\n\n if rel_path:\n if not os.path.exists(os.path.join(slave_dir,rel_path,exe_rel_path)):\n #print(\"warning: exe_rel_path not verified...hopefully exe is in the PATH var\")\n exe_verf = False\n else:\n if not os.path.exists(os.path.join(slave_dir,exe_rel_path)):\n #print(\"warning: exe_rel_path not verified...hopefully exe is in the PATH var\")\n exe_verf = False\n if rel_path is not None:\n assert os.path.exists(os.path.join(slave_dir,rel_path,pst_rel_path))\n else:\n assert os.path.exists(os.path.join(slave_dir,pst_rel_path))\n if local:\n hostname = \"localhost\"\n else:\n hostname = socket.gethostname()\n\n base_dir = os.getcwd()\n port = int(port)\n\n if master_dir is not None:\n if master_dir != '.' and os.path.exists(master_dir):\n try:\n shutil.rmtree(master_dir)#, onerror=del_rw)\n except Exception as e:\n raise Exception(\"unable to remove existing master dir:\" + \\\n \"{0}\\n{1}\".format(master_dir,str(e)))\n if master_dir != '.':\n try:\n shutil.copytree(slave_dir,master_dir)\n except Exception as e:\n raise Exception(\"unable to copy files from slave dir: \" + \\\n \"{0} to new slave dir: {1}\\n{2}\".\\\n format(slave_dir,master_dir,str(e)))\n\n args = [exe_rel_path, pst_rel_path, \"/h\", \":{0}\".format(port)]\n if rel_path is not None:\n cwd = os.path.join(master_dir,rel_path)\n else:\n cwd = master_dir\n print(\"master:{0} in {1}\".format(' '.join(args),cwd))\n try:\n os.chdir(cwd)\n master_p = sp.Popen(args)#,stdout=sp.PIPE,stderr=sp.PIPE)\n os.chdir(base_dir)\n except Exception as e:\n raise Exception(\"error starting master instance: {0}\".\\\n format(str(e)))\n time.sleep(1.5) # a few cycles to let the master get ready\n\n\n tcp_arg = \"{0}:{1}\".format(hostname,port)\n procs = []\n slave_dirs = []\n for i in range(num_slaves):\n new_slave_dir = os.path.join(slave_root,\"slave_{0}\".format(i))\n if os.path.exists(new_slave_dir):\n try:\n shutil.rmtree(new_slave_dir)#, onerror=del_rw)\n except Exception as e:\n raise Exception(\"unable to remove existing slave dir:\" + \\\n \"{0}\\n{1}\".format(new_slave_dir,str(e)))\n try:\n shutil.copytree(slave_dir,new_slave_dir)\n except Exception as e:\n raise Exception(\"unable to copy files from slave dir: \" + \\\n \"{0} to new slave dir: {1}\\n{2}\".format(slave_dir,new_slave_dir,str(e)))\n try:\n if exe_verf:\n # if rel_path is not None:\n # exe_path = os.path.join(rel_path,exe_rel_path)\n # else:\n exe_path = exe_rel_path\n else:\n exe_path = exe_rel_path\n args = [exe_path, pst_rel_path, \"/h\", tcp_arg]\n #print(\"starting slave in {0} with args: {1}\".format(new_slave_dir,args))\n if rel_path is not None:\n cwd = os.path.join(new_slave_dir,rel_path)\n else:\n cwd = new_slave_dir\n\n os.chdir(cwd)\n print(\"slave:{0} in {1}\".format(' '.join(args),cwd))\n with open(os.devnull,'w') as f:\n p = sp.Popen(args,stdout=f,stderr=f)\n procs.append(p)\n os.chdir(base_dir)\n except Exception as e:\n raise Exception(\"error starting slave: {0}\".format(str(e)))\n slave_dirs.append(new_slave_dir)\n\n if master_dir is not None:\n # while True:\n # line = master_p.stdout.readline()\n # if line != '':\n # print(str(line.strip())+'\\r',end='')\n # if master_p.poll() is not None:\n # print(master_p.stdout.readlines())\n # break\n master_p.wait()\n time.sleep(1.5) # a few cycles to let the slaves end gracefully\n # kill any remaining slaves\n for p in procs:\n p.kill()\n\n for p in procs:\n p.wait()\n if cleanup:\n for dir in slave_dirs:\n shutil.rmtree(dir)",
"def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)",
"def master_server():\n os.system('python master_server.py')",
"def _start_child(self):\n parent_pipe, child_pipe = mp.Pipe()\n self._poll.register(parent_pipe.fileno(), select.POLLIN | select.POLLPRI)\n\n pid = os.fork()\n if not pid:\n ch = Worker(child_pipe, self.server_socket)\n parent_pipe.close()\n ch.run()\n else:\n self._children[parent_pipe.fileno()] = ManagerChild(pid, parent_pipe)\n child_pipe.close()",
"def _launchSlaveProcesses(self):\n availableCpus = multiprocessing.cpu_count()\n logging.info(\"Available CPUs: %d\" % (availableCpus,))\n logging.info(\"Requested worker processes: %d\" % (self.options.numWorkers,))\n\n # Use all CPUs if numWorkers < 1\n if self.options.numWorkers < 1:\n self.options.numWorkers = availableCpus\n\n # Warn if we make a bad numWorker argument is used\n if self.options.numWorkers > availableCpus:\n logging.warn(\"More worker processes requested (%d) than CPUs available (%d);\"\n \" may result in suboptimal performance.\"\n % (self.options.numWorkers, availableCpus))\n\n self._initQueues()\n\n if self.options.threaded:\n self.options.numWorkers = 1\n WorkerType = KineticWorkerThread\n else:\n WorkerType = KineticWorkerProcess\n\n # Launch the worker processes\n self._workers = []\n for i in xrange(self.options.numWorkers):\n p = WorkerType(self.options, self._workQueue, self._resultsQueue, self.ipdModel)\n self._workers.append(p)\n p.start()\n logging.info(\"Launched worker processes.\")\n\n # Launch result collector\n self._resultCollectorProcess = KineticsWriter(self.options, self._resultsQueue, self.refInfo, self.ipdModel)\n self._resultCollectorProcess.start()\n logging.info(\"Launched result collector process.\")\n\n # Spawn a thread that monitors worker threads for crashes\n self.monitoringThread = threading.Thread(target=monitorChildProcesses,\n args=(self._workers + [self._resultCollectorProcess],))\n self.monitoringThread.start()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stops the worker process on the slave.
|
def stop_slave_worker():
print("Stopping slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on slave")
print(r.text)
return False
#wait a bit for the worker process to stop
print("Waiting a bit")
time.sleep(2)
return True
|
[
"def stop_slave(instance):\n stmt = \"stop slave\"\n instance.get_connection()\n instance.execute_stmt(connection=instance.connection, stmt=stmt)\n if is_slave_running(instance):\n print \"Error: unable stop slave replication.\"\n return 1",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None",
"def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None",
"def stop(self):\n self._kill_process()",
"def stop(self):\n response = self._shell_client.exec_command('{} stop'.format(self._executable_path), async=False)\n\n if not response.is_success():\n self._logger.error('clusterrunner stop failed on host {} with output: {}, error: {}'.format(\n self.host, response.raw_output, response.raw_error))",
"def stop_master_worker():\n print(\"Stopping master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def stop(self):\n log.write(\"stop workers\\n\")\n log.flush()\n\n call([\"docker\", \"exec\", self.sparkContainerID, \"rm\", \"-r\", \"/ephemeral/spark\"])\n call([\"docker\", \"stop\", self.sparkContainerID])\n call([\"docker\", \"rm\", self.sparkContainerID])\n call([\"docker\", \"exec\", self.hdfsContainerID, \"rm\", \"-r\", \"/ephemeral/hdfs\"])\n call([\"docker\", \"stop\", self.hdfsContainerID])\n call([\"docker\", \"rm\", self.hdfsContainerID])\n\n return",
"def stop(self):\n if self._was_stopped:\n LOG.debug(\"not running, nothing to do to stop\")\n return\n LOG.info(\"Stop worker %s\", self)\n self.stop_event_loop()\n self._cleanup()\n self._stopped()\n self._was_stopped = True",
"def stop(self):\n self._server.stop_build(self._job.name, self.number)",
"def stop_slave_web():\n print(\"Stopping slave web\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/web\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the web dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the web process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def kill(self):\n \n self.killSlavePids()",
"def stop(self):\n self.scion_sh('stop')",
"def terminate_slaves(self):\r\n self.master.terminate_slaves()",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stop(self):\n result = mpc_command(['stop'])",
"def stop(self):\n\n log_info(\"Stopping LiteServ ...\")\n\n self.logfile.flush()\n self.logfile.close()\n self.process.kill()\n self.process.wait()\n\n self._verify_not_running()",
"def terminate_slaves(self):\n self.master.terminate_slaves()",
"def stop(self) -> None:\n if self.process is not None:\n logging.info('stopping hwpc-sensor...')\n self.process.terminate()\n self.process.wait()\n self.process = None",
"def stop(self):\n with self._cv:\n self._terminate.value = 1\n self._cv.notify()\n self._process.join()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uses the current cursor position, which is in a code view, and gets the corresponding instruction address that is associated to the code. Returns the start of the function if unable to calculate.
|
def get_src_to_inst(self) -> int:
# get the Qt document
doc: QCodeDocument = self.document()
# get the current position of the cursor
cursor = self.textCursor()
pos = cursor.position()
# get the node at the associated cursor position
current_node = doc.get_stmt_node_at_position(pos)
if (
current_node is not None
and hasattr(current_node, "tags")
and current_node.tags is not None
and "ins_addr" in current_node.tags
):
asm_ins_addr = current_node.tags["ins_addr"]
else:
# the top of the function decompiled
asm_ins_addr = self._code_view.function.addr
return asm_ins_addr
|
[
"def _next_code_addr_core(self):\n\n next_addr = self._next_unscanned_addr()\n if next_addr is None:\n return None\n\n start_addr = next_addr\n\n while True:\n string_length = self._scan_for_printable_strings(start_addr)\n if string_length:\n self._seg_list.occupy(start_addr, string_length, \"string\")\n start_addr += string_length\n\n if self.project.arch.name in (\"X86\", \"AMD64\"):\n cc_length = self._scan_for_repeating_bytes(start_addr, 0xCC, threshold=1)\n if cc_length:\n self._seg_list.occupy(start_addr, cc_length, \"alignment\")\n start_addr += cc_length\n else:\n cc_length = 0\n\n zeros_length = self._scan_for_repeating_bytes(start_addr, 0x00)\n if zeros_length:\n self._seg_list.occupy(start_addr, zeros_length, \"alignment\")\n start_addr += zeros_length\n\n if string_length == 0 and cc_length == 0 and zeros_length == 0:\n # umm now it's probably code\n break\n\n instr_alignment = self._initial_state.arch.instruction_alignment\n if start_addr % instr_alignment > 0:\n # occupy those few bytes\n self._seg_list.occupy(start_addr, instr_alignment - (start_addr % instr_alignment), \"alignment\")\n start_addr = start_addr - start_addr % instr_alignment + instr_alignment\n # trickiness: aligning the start_addr may create a new address that is outside any mapped region.\n if not self._inside_regions(start_addr):\n raise ContinueScanningNotification()\n\n return start_addr",
"def next_code_addr(ea=None, down=True):\n if ea is None:\n ea = ida_kernwin.get_screen_ea()\n if down:\n fl = ida_search.SEARCH_DOWN\n else:\n fl = ida_search.SEARCH_UP\n r = ida_search.find_code(ea, fl)\n if r == idc.BADADDR: # no result found\n return None\n return r",
"def current_instruction(self):\n return self.instruction_at(self.pc)",
"def get_function_start_address(ea):\r\n try:\r\n if ea is None:\r\n return None\r\n\r\n start_adrs = idc.GetFunctionAttr(ea, idc.FUNCATTR_START)\r\n if start_adrs != idc.BADADDR:\r\n return start_adrs\r\n\r\n return ea\r\n\r\n except Exception as ex:\r\n raise RuntimeError(\"Count not locate start address for function %s: %s\" % (hex(ea), ex))",
"def _start_line(func):\n _, line = inspect.getsourcelines(func)\n return line",
"def backtrace_start(self, offset, max_instructions=1024):\n function_start = idc.get_func_attr(offset, idc.FUNCATTR_START)\n blob_start = 0\n last_mov_or_alt = True\n\n trace_instruction_types = [idaapi.NN_mov,\n idaapi.NN_sub,\n idaapi.NN_xor,\n idaapi.NN_lea,\n idaapi.NN_add,\n idaapi.NN_inc,\n idaapi.NN_movupd,\n idaapi.NN_movups,\n idaapi.NN_movaps,\n idaapi.NN_movapd,]\n\n # Back trace\n if offset <= function_start + 64:\n \"\"\"\n Note this can cause issues. If there is a call or other that is expected to have\n initialized data. Do a quick check first before returning the function start \n \"\"\"\n\n if not self._has_call(function_start, offset):\n return function_start\n icount = 0\n while offset >= function_start:\n icount += 1\n\n if icount > max_instructions:\n return 0\n\n ins = ida_ua.insn_t()\n\n idaapi.decode_insn(ins, offset)\n self.logger.debug(\"0x%x %s\" % (offset, idc.generate_disasm_line(idc.prev_head(offset), 0)))\n\n if ins.itype in trace_instruction_types:\n if ins.itype in [idaapi.NN_mov]:\n last_mov_or_alt = True\n elif ins.itype == idaapi.NN_xor:\n if idc.print_operand(offset, 0) != idc.print_operand(offset, 1):\n if idc.get_operand_type(offset, 1) != idaapi.o_imm:\n blob_start = idc.next_head(offset)\n break\n else:\n last_mov_or_alt = True\n else:\n last_mov_or_alt = False\n elif ins.itype == idaapi.NN_sub:\n if not last_mov_or_alt:\n blob_start = idc.next_head(offset)\n break\n last_mov_or_alt = False\n elif ins.itype in [idaapi.NN_lea, idaapi.NN_inc, idaapi.NN_add]:\n last_mov_or_alt = True\n\n blob_start = offset\n\n if offset <= function_start:\n self.logger.debug(\"Error back-tracing ADVBLOB...Using function start\")\n blob_start = function_start\n break\n\n else:\n blob_start = idc.next_head(offset)\n break\n\n offset = idc.prev_head(offset)\n\n if blob_start <= function_start + 64:\n if not self._has_call(function_start, blob_start):\n blob_start = function_start\n\n self.logger.debug(\"BLOB Start: %x\" % blob_start)\n self.logger.debug(idc.print_insn_mnem(blob_start))\n return blob_start",
"def PyFunction_GetCode(space, w_func):\n func = space.interp_w(Function, w_func)\n return func.code # borrowed ref",
"def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str:\n if inst.positions.lineno is None:\n return \"\"\n # The rstrip + \"\\n\" pattern is used throughout this function to handle\n # linecache.getline errors. Error lines are treated as empty strings \"\", but we want\n # to treat them as blank lines \"\\n\".\n first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip()\n if inst.positions.end_lineno is None:\n return first_line\n if inst.positions.col_offset is None or inst.positions.end_col_offset is None:\n return first_line\n\n # character index of the start of the instruction\n start_offset = _fix_offset(first_line, inst.positions.col_offset)\n # character index of the end of the instruction\n # compute later since end may be a different line\n end_offset = None\n # expression corresponding to the instruction so we can get anchors\n segment = \"\"\n # underline markers to be printed - start with `~` marker and replace with `^` later\n markers = []\n\n # Compute segment and initial markers\n if inst.positions.end_lineno == inst.positions.lineno:\n end_offset = _fix_offset(first_line, inst.positions.end_col_offset)\n segment = first_line[start_offset:end_offset]\n markers.append(\" \" * start_offset + \"~\" * (end_offset - start_offset))\n else:\n segment = first_line[start_offset:] + \"\\n\"\n markers.append(\" \" * start_offset + \"~\" * (len(first_line) - start_offset))\n last_line = linecache.getline(\n code.co_filename, inst.positions.end_lineno\n ).rstrip()\n end_offset = _fix_offset(last_line, inst.positions.end_col_offset)\n for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno):\n line = linecache.getline(code.co_filename, lineno).rstrip()\n segment += line + \"\\n\"\n # don't underline leading spaces\n num_spaces = len(line) - len(line.lstrip())\n markers.append(\" \" * num_spaces + \"~\" * (len(line) - num_spaces))\n segment += last_line[:end_offset]\n num_spaces = len(last_line) - len(last_line.lstrip())\n markers.append(\" \" * num_spaces + \"~\" * (end_offset - num_spaces))\n\n anchors: Optional[_Anchors] = None\n try:\n anchors = _extract_anchors_from_expr(segment)\n except AssertionError:\n pass\n\n # replace `~` markers with `^` where necessary\n if anchors is None:\n markers = [marker.replace(\"~\", \"^\") for marker in markers]\n else:\n # make markers mutable\n markers = [list(marker) for marker in markers]\n\n # anchor positions do not take start_offset into account\n if anchors.left_end_lineno == 0:\n anchors.left_end_offset += start_offset\n if anchors.right_start_lineno == 0:\n anchors.right_start_offset += start_offset\n\n # Turn `~`` markers between anchors to `^`\n for line in range(len(markers)):\n for col in range(len(markers[line])):\n if line < anchors.left_end_lineno:\n continue\n if line == anchors.left_end_lineno and col < anchors.left_end_offset:\n continue\n if (\n line == anchors.right_start_lineno\n and col >= anchors.right_start_offset\n ):\n continue\n if line > anchors.right_start_lineno:\n continue\n if markers[line][col] == \"~\":\n markers[line][col] = \"^\"\n\n # make markers into strings again\n markers = [\"\".join(marker) for marker in markers]\n\n result = \"\"\n for i in range(len(markers)):\n result += (\n linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip()\n + \"\\n\"\n )\n result += markers[i] + \"\\n\"\n return result",
"def getInstructionBefore(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...",
"def line_at_cursor(code: str, cursor_pos: int = 0):\n offset = 0\n lines = code.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if not line.endswith('\\n'):\n # If the last line doesn't have a trailing newline, treat it as if\n # it does so that the cursor at the end of the line still counts\n # as being on that line.\n next_offset += 1\n if next_offset > cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)",
"def code_ptr(runtime_addr, runtime_addr_high=None, offset=0):\n\n if runtime_addr_high is None:\n runtime_addr_high = runtime_addr + 1\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n runtime_addr_high = memorymanager.RuntimeAddr(runtime_addr_high)\n binary_addr, _ = movemanager.r2b(runtime_addr)\n binary_addr_high, _ = movemanager.r2b(runtime_addr_high)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr)\n assert memorymanager.is_data_loaded_at_binary_addr(binary_addr_high)\n code_at_runtime_addr = ((memory_binary[binary_addr_high] << 8) | memory_binary[binary_addr]) + offset\n # Label and trace the code at code_at\n label = entry(code_at_runtime_addr, warn=False) # ENHANCE: allow optional user-specified label?\n # Reference that label at addr/addr_high.\n offset_string = \"\" if offset == 0 else (\"%+d\" % -offset)\n if binary_addr_high == binary_addr + 1:\n # The general code in the \"else\" branch would work for this case as\n # well, but since the assembler has support for emitting a little-endian\n # 16-bit word it's nice to use it when we can.\n assert runtime_addr_high == runtime_addr + 1\n # TODO: Use word()/expr() variants which take a binary addr directly?\n word(runtime_addr)\n expr(runtime_addr, utils.LazyString(\"%s%s\", label, offset_string))\n else:\n # TODO: Use byte()/expr() variants which take a binary addr directly?\n byte(runtime_addr)\n expr(runtime_addr, make_lo(utils.LazyString(\"%s%s\", label, offset_string)))\n byte(runtime_addr_high)\n expr(runtime_addr_high, make_hi(utils.LazyString(\"%s%s\", label, offset_string)))\n if abs(runtime_addr_high - runtime_addr) == 1:\n return max(runtime_addr, runtime_addr_high) + 1\n return None",
"def _get_first_code_line():\n return min(_code_lines)",
"def instruction_at(self, addr=None):\n if not addr:\n addr = self.reg.get_pc()\n data = self.read(addr, 15)\n return util.disassemble_string(self.bitness(), addr, data)",
"def current_function_address(self):\n\n if len(self._stack) == 0:\n return 0 # This is the root level\n else:\n frame = self._stack[-1]\n return frame.function_address",
"def get_current_instruction(self) -> Dict:\n\n instructions = self.environment.code.instruction_list\n return instructions[self.mstate.pc]",
"def get_code():\n return inspect.getsource(search)",
"def next_code(ea=None, down=True):\n r = BipElt.next_code_addr(ea=ea, down=down)\n if r is None:\n return r\n else:\n return GetElt(r)",
"def current_function_address(self):\n\n if len(self._callstack) == 0:\n return 0 # This is the root level\n else:\n frame = self._callstack[-1]\n return frame.func_addr",
"def _get_address_calculation(segment, index, file_name):\n\n if segment == \"constant\": # Temp starts at 5\n load_bytecode = [f\"@{index}\", \"D=A\"]\n\n elif segment == \"temp\":\n load_bytecode = [f\"@{int(index) + 5}\", \"D=A\"]\n\n elif segment == \"static\":\n variable_name = file_name + \".\" + index\n load_bytecode = [f\"@{variable_name}\", \"D=A\"]\n\n elif segment == \"pointer\":\n if index == \"0\":\n register = \"THIS\"\n else:\n register = \"THAT\"\n\n load_bytecode = [f\"@{register}\", \"D=A\"]\n\n else:\n load_bytecode = [f\"@{VirtualMachineLibrary._get_symbolic_symbol(segment)}\", \"D=M\", f\"@{index}\", \"D=D+A\"]\n\n full_address_bytecode = load_bytecode + [\"@R13\", \"M=D\"]\n return full_address_bytecode"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate a checksum for the given path. Will eventually use this to ensure config has changed before reloading.
|
def checksum(path):
with open(path, 'r') as f:
return md5(f.read()).digest()
|
[
"def file_checksum(path):\n\n with abort_if_file_changes_during_read(path):\n m = hash_implementation()\n\n with open(path, 'rb') as f:\n for chunk in read_in_chunks(f, io.DEFAULT_BUFFER_SIZE):\n m.update(chunk)\n\n return m.hexdigest()",
"def get_checksum(path: Union[Path, str]) -> str:\n path = Path(path)\n if not (path.is_file() or path.is_dir()):\n msg.fail(f\"Can't get checksum for {path}: not a file or directory\", exits=1)\n if path.is_file():\n return hashlib.md5(Path(path).read_bytes()).hexdigest()\n else:\n # TODO: this is currently pretty slow\n dir_checksum = hashlib.md5()\n for sub_file in sorted(fp for fp in path.rglob(\"*\") if fp.is_file()):\n dir_checksum.update(sub_file.read_bytes())\n return dir_checksum.hexdigest()",
"def checksum(self, path):\n return self._gcs_object(path).crc32c",
"def _calc_checksum(self, method, querystring):\n blob = method + querystring + self.api_secret\n logger.debug(f\"Creating checksum from {blob}\")\n return hashlib.sha1(blob.encode('utf-8')).hexdigest()",
"def _calc_sha1(path):\n calc = hashlib.sha1()\n with open(path, 'r') as f:\n calc.update(f.read())\n return calc.hexdigest()",
"def sha1sum( self, path ):\n\t\twith open(path, \"rb\") as f:\n\t\t\treturn hashlib.sha1(f.read()).hexdigest()",
"def _get_checksum(self):\n file_path = self._hd_path + '/' + self._file_name\n file = open( file_path, 'r' )\n content_of_file = file.read()\n file.close()\n md5obj = hashlib.md5( content_of_file )\n return md5obj.digest()",
"def checksum(self, filepath) -> str:\n if os.path.exists(filepath):\n hash_md5 = md5()\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return urlsafe_b64encode(hash_md5.digest()).decode('utf-8')\n\n return \"\"",
"def get_md5sum(self, path):\n _, md5sum_result, _ = run_command(['md5sum', path])\n return md5sum_result.split(' ')[0].strip()",
"def path_checksum(paths):\n\n if not hasattr(paths, '__iter__'):\n raise TypeError('sequence or iterable expected not %r!' % type(paths))\n\n def _update_checksum(checksum, dirname, filenames):\n for filename in sorted(filenames):\n path = path_join(dirname, filename)\n if isfile(path):\n fh = open(path, 'rb')\n while 1:\n buf = fh.read(4096)\n if not buf : break\n checksum.update(buf)\n fh.close()\n\n chksum = sha1()\n\n for path in sorted([normpath(f) for f in paths]):\n if path_exists(path):\n if isdir(path):\n walk(path, _update_checksum, chksum)\n elif isfile(path):\n _update_checksum(chksum, dirname(path), basename(path))\n\n return chksum.hexdigest()",
"def checksum_file(path: Union[bytes, str], algorithm: str) -> str:\n alg = _parse_algorithm(algorithm)\n accum = alg()\n with open(path, \"rb\") as f:\n # read in 1MB chunks\n for chunk in iter(lambda: f.read(1024 * 1024), b\"\"):\n accum.update(chunk)\n return accum.hexdigest()",
"def getChecksumForDir(dirPath):\n outFile = dirPath + '.checksum'\n out = open(outFile, 'w')\n checkSumWalk(dirPath, dirPath, out)\n out.close()",
"def _get_checksum(self, text):\n # Compute the new checksum over everything but the sha1sum line.\n # This will fail if sha1sum appears for some other reason. It won't ;-)\n text = \"\".join([line for line in text.splitlines(True) if \"sha1sum\" not in line])\n return utils.str_checksum(text)",
"def checksum(self, path, python_interp):\n ## Super class implements this function by sh commands and python scripts\n ## If python_interp is modified to 'docker CONTAINER python', it will only influence the python\n ## script part in super class. Instead we should influence both\n simple_interp = 'python'\n assert(python_interp.startswith('docker exec '))\n assert(python_interp.endswith(' ' + simple_interp))\n\n docker_prefix = re.sub(simple_interp, '', python_interp)\n cmd = super(ShellModule, self).checksum(path, simple_interp)\n ## Escape the cmd:\n ## \" --> \\\"\n cmd_escaped = cmd.replace('\"', '\\\\\"')\n ## $ --> \\$\n cmd_escaped = cmd_escaped.replace('$', '\\\\$')\n return '%s sh -c \"%s\"' % (docker_prefix, cmd_escaped)",
"def md5Checksum(self, filepath):\n # http://www.joelverhagen.com/blog/2011/02/md5-hash-of-file-in-python/\n f = open(filepath, 'rb') # open for reading in binary mode\n m = hashlib.md5()\n while True:\n data = f.read(8192) # file is read in 8192 byte chunks to minimize memory use to ~8k\n if not data:\n break\n m.update(data)\n return m.hexdigest()",
"def file_digest(path, algo=hashlib.md5):\n checksum = algo()\n with open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n checksum.update(chunk)\n return checksum.hexdigest()",
"def test_checksum(self):\n self.webHDFS.create(TEST_DIR_PATH + '/foo.txt', \"foobar\")\n checksum = self.webHDFS.get_checksum(TEST_DIR_PATH + '/foo.txt')\n self.assertEqual(checksum['bytes'], \"00000200000000000000000043d7180b6d1dfa6acae636572cd3b70f00000000\")\n self.assertEqual(checksum['length'], 28)",
"def _checksum(addr):\n return checksum(addr)[-constants.check_sum_len_bytes :]",
"def calc_file_md5(file_path):\n hash_md5 = str()\n method = hashlib.md5()\n if not os.path.exists(file_path):\n logger.error(\"File(%s) don not exist, can not calculation file hash\" % file_path)\n return hash_md5\n\n with open(file_path, 'rb') as f:\n for chunk in read_chunks(f, 1024 * 1024):\n method.update(chunk)\n return method.hexdigest()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set up inotify if requested.
|
def _setup_inotify(self, flag):
i = None
if flag:
try:
import inotify.adapters
except ImportError:
raise AssertionError(
'cannot use inotify, package not installed')
else:
i = inotify.adapters.Inotify(paths=[self.watch],
block_duration_s=0)
return (flag, i)
|
[
"def SetupFileWatcher(filename, cb):\n wm = pyinotify.WatchManager()\n handler = FileEventHandler(wm, filename, cb)\n asyncnotifier.AsyncNotifier(wm, default_proc_fun=handler)",
"def __init__(self, watch_manager, default_proc_fun=None, map=None):\n if default_proc_fun is None:\n default_proc_fun = pyinotify.ProcessEvent()\n\n self.notifier = pyinotify.Notifier(watch_manager, default_proc_fun)\n\n # here we need to steal the file descriptor from the notifier, so we can\n # use it in the global asyncore select, and avoid calling the\n # check_events() function of the notifier (which doesn't allow us to select\n # together with other file descriptors)\n self.fd = self.notifier._fd\n asyncore.file_dispatcher.__init__(self, self.fd, map)",
"def Start(self):\n \n if platform.system() == 'Linux':\n #\n # On linux it is possible to use the inotify api.\n #\n from twisted.internet import inotify\n \n notifier = inotify.INotify()\n notifier.startReading()\n notifier.watch(\n filepath.FilePath(self.path_to_watch),\n mask=inotify.IN_CREATE,\n callbacks=[self._inotifyCB]\n )\n \n else:\n #\n # On windows we use a method from:\n # http://timgolden.me.uk/python/win32_how_do_i/watch_directory_for_changes.html\n #\n d = threads.deferToThread(self._watchThread)",
"def test_inotify(self):\n command = '%s %s' % (self.prog, self.sig)\n with ReloadConf(self.dir, self.file, command, inotify=True) as rc:\n rc.poll()\n # Command should now be running.\n self.assertTrue(rc.check_command())\n # Write out \"config\" file.\n with open(pathjoin(self.dir, basename(self.file)), 'wb') as f:\n f.write(b'foo')\n # Command should receive HUP.\n rc.poll()\n time.sleep(0.1)\n self.assertTrue(pathexists(self.sig))",
"def inotify_init(flags=0, closefd=CLOEXEC_DEFAULT):\n assert isinstance(flags, int), 'Flags must be an integer'\n\n if closefd:\n flags |= IN_CLOEXEC\n\n fd = lib.inotify_init1(flags)\n \n if fd < 0:\n err = ffi.errno\n if err == errno.EINVAL:\n raise ValueError(\"Invalid argument or flag\")\n elif err == errno.EMFILE:\n raise OSError(\"Maximum inotify instances reached\")\n elif err == errno.ENFILE:\n raise OSError(\"File descriptor limit hit\")\n elif err == errno.ENOMEM:\n raise MemoryError(\"Insufficent kernel memory avalible\")\n else:\n # If you are here, its a bug. send us the traceback\n raise UnknownError(err)\n\n return fd",
"def initiate(self):\n self.io.debug(\"Inititating\")\n if self.location is None:\n self.location = './'\n files = glob.glob(self.location + '/*.%s' % self.suff)\n for f in files:\n index = f.split('/')[-1].split('.')[0]\n self._files[index] = f\n if self._autoload:\n self.get(index, True)\n self.is_init = True",
"def test_inotify(self):\n self.fail(\"write a test\")",
"def _setup(self):\n # Look for ini file\n if not os.path.isfile(self.ini_file):\n self._fail('Cannot find ini file')\n\n self._setup_logging()\n\n # Import debexpo root directory\n sys.path.append(os.path.dirname(self.ini_file))\n\n # Initialize Pylons app\n conf = appconfig('config:' + self.ini_file)\n pylons.config = load_environment(conf.global_conf, conf.local_conf)\n\n # Change into the incoming directory\n incoming_dir = pylons.config['debexpo.upload.incoming']\n logging.info(\"Changing dir to %s\", incoming_dir)\n os.chdir(incoming_dir)\n\n # Look for the changes file\n if not os.path.isfile(self.changes_file):\n self._fail('Cannot find changes file')",
"async def watchForFileSystemEvents(self):\n\n # Things that can throw this off:\n #\n # * Moving a watched directory out of the watch tree (will still\n # generate events even when outside of directory tree)\n #\n # * Doing two changes on a directory or something before the program\n # has a time to handle it (this will also throw off a lot of inotify\n # code, though)\n #\n # * Moving a watched directory within a watched directory will get the\n # wrong path. This needs to use the cookie system to link events\n # together and complete the move properly, which can still make some\n # events get the wrong path if you get file events during the move or\n # something silly like that, since MOVED_FROM and MOVED_TO aren't\n # guaranteed to be contiguous. That exercise is left up to the\n # reader.\n #\n # * Trying to watch a path that doesn't exist won't automatically\n # create it or anything of the sort.\n #\n # * Deleting and recreating or moving the watched directory won't do\n # anything special, but it probably should.\n #\n async for event in self.inotify:\n\n if not self.continueWatchingFS :\n return\n\n # If this is a creation event, add a watch for the new path (and its\n # subdirectories if any)\n #\n if Mask.CREATE in event.mask and event.path is not None :\n await self.watchAPath(event.path)\n\n if Mask.DELETE_SELF in event.mask and event.path is not None :\n await self.unWatchAPath(event.path, event.watch)\n\n # If there are some bits in the cpMask in the event.mask yield this\n # event\n #\n if event.mask & self.cpMask:\n yield event\n else:\n # Note that these events are needed for cleanup purposes.\n # We'll always get IGNORED events so the watch can be removed\n # from the inotify. We don't need to do anything with the\n # events, but they do need to be generated for cleanup.\n # We don't need to pass IGNORED events up, because the end-user\n # doesn't have the inotify instance anyway, and IGNORED is just\n # used for management purposes.\n #\n self.logger.debug(f'UNYIELDED EVENT: {event}')",
"def setup_filesystem(self):\n raise NotImplementedError()",
"def _setup_watch(self, watch):\n assert not isfile(watch), 'watch dir is a file'\n\n if pathexists(watch):\n return watch\n\n os.makedirs(watch)\n\n if self.chown:\n try:\n os.chown(watch, *self.chown)\n\n except OSError:\n pass # Non-fatal\n\n if self.chmod:\n try:\n os.chmod(watch, self.chmod)\n\n except OSError:\n pass # Non-fatal\n\n return watch",
"def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()",
"def start(self):\n for config_class in self.watched_configurables:\n monitor = ConfigFileMonitor(config_class, self.config_dir)\n self.observers.append(\n monitor.start(\n self.add_configurable,\n self.update_configurable,\n self.remove_configurable\n )\n )\n\n self.run()",
"def setup(self):\n # Set bashrc file\n self._bashrc()\n\n # Return if not running script as root user\n if self.running_as_root is False:\n return\n\n # Return if user prompted doesn't exist\n if self.infoset_user_exists is False:\n return\n\n # Set file permissions\n self._file_permissions()\n\n # Setup systemd\n self._systemd()",
"def setup_listener(\n self, qtile: \"Qtile\", eventloop: asyncio.AbstractEventLoop\n ) -> None:\n logger.debug(\"Adding io watch\")\n self.qtile = qtile\n fd = self.conn.conn.get_file_descriptor()\n eventloop.add_reader(fd, self._xpoll)",
"def flask_setup(self):\n\n self.app_file = \"{}.py\".format(self.app.split(\":\")[0])\n assert os.path.isfile(self.app_file), \"module must exist\"\n\n if self.python_version == \"2\":\n plugin_version = \"\"\n elif self.python_version == \"3\":\n plugin_version = \"3\"\n else:\n assert False, \"Python version {} is invalid\".format(python_version)\n\n self.service_files = [File(self.app_file)]\n self.start_cmd = \"uwsgi --protocol=http --plugin python{} -p {} -w {} --logto /dev/null\".format(\n plugin_version, self.num_workers, self.app\n )",
"def file_sync_setup(request):\n\n # Create two temporary directories.\n master_dir = tempfile.mkdtemp()\n slave_dir = tempfile.mkdtemp()\n\n # Collect constants from the file_sync test module.\n master_service = getattr(request.module, 'MASTER_SERVICE')\n slave_service = getattr(request.module, 'SLAVE_SERVICE')\n path_to_modules = getattr(request.module, 'PATH_TO_MIX_DIR')\n\n # Start the master and slave watchdogs.\n start_service_in(master_service, 'master', master_dir,\n alias=ALIAS, path=path_to_modules)\n start_service_in(slave_service, 'slave', slave_dir,\n alias=ALIAS, path=path_to_modules)\n\n yield master_dir, slave_dir\n\n # Stop the services and remove the temp dirs.\n sparkl('stop', master_service, alias=ALIAS)\n sparkl('stop', slave_service, alias=ALIAS)\n subprocess.check_call(['rm', '-rf', master_dir])\n subprocess.check_call(['rm', '-rf', slave_dir])",
"def _setup_watchers(self, folders):\n \n return {'observer': Observer()}",
"def __init__(self):\n super().__init__()\n\n etc_conf_names = ('app.conf', 'app.local.conf')\n conf_paths = [os.path.join(APP_DIR, 'etc', c) for c in etc_conf_names]\n\n user_config_path = os.path.join(\n os.path.expanduser('~'),\n '.config',\n 'url_manager.conf'\n )\n conf_paths.append(user_config_path)\n\n self.read(conf_paths)\n self.set('DEFAULT', 'app_dir', APP_DIR)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create watch directory if it does not exist.
|
def _setup_watch(self, watch):
assert not isfile(watch), 'watch dir is a file'
if pathexists(watch):
return watch
os.makedirs(watch)
if self.chown:
try:
os.chown(watch, *self.chown)
except OSError:
pass # Non-fatal
if self.chmod:
try:
os.chmod(watch, self.chmod)
except OSError:
pass # Non-fatal
return watch
|
[
"def watchDirectory(self, path):\n pass",
"def _create_file_dir(self):\n path = os.path.dirname(self.filename)\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n time.sleep(1)\n self._create_file_dir()",
"def check_directory():\n directory = \"C:\\\\Users\\\\Indra\\\\PycharmProjects\\\\forex_tracker\\\\historical_data\\\\\" + str(datetime.now().date())\n if not os.path.exists(directory):\n os.mkdir(directory)",
"def _check_or_create_dir(self):\n # update today_dir\n date_now = datetime.now().strftime(self.cfg['dir_format'])\n self.today_dir = os.path.join(self.cfg['working_directory'], date_now)\n if not os.path.isdir(self.today_dir):\n for d in self.cfg['file_types'].keys():\n os.path.os.makedirs(os.path.join(self.today_dir, d))\n self._clear_empty_folders()",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def check_for_directory():\n if not os.path.isdir(notes_dir):\n os.mkdir(notes_dir, mode=0o700)",
"def create_working_directory(self):\n os.makedirs(self.working_directory, exist_ok=True)",
"def test_nodir(self):\n # Remove the watch directory.\n os.rmdir(self.dir)\n\n # Ensure reloadconf creates the watch directory.\n with ReloadConf(self.dir, self.file, '/bin/sleep 1',\n chown=(TEST_UID, TEST_UID), chmod=0o700) as rc:\n rc.poll()\n self.assertTrue(rc.check_command())\n self.assertTrue(isdir(self.dir))",
"def checkFiles():\n path = os.getcwd()\n initial = 0\n\n if not os.path.exists(path + \"/userevents\"):\n os.mkdir(path + \"/userevents\")\n\n if not os.path.exists(path + \"/config\"):\n os.mkdir(path + \"/config\")\n initial = 1\n\n return initial",
"def createWriteDirs():\n\tf = nuke.filename(nuke.thisNode())\n\tdirr = os.path.dirname(f)\n\tif not os.path.exists(dirr):\n\t\tosdir = nuke.callbacks.filenameFilter(dirr)\n\t\tos.makedirs(osdir)",
"def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)",
"def create_log_dir():\n # Create the log dir, if not created previously.\n if not os.path.exists(config.atf_log_path):\n os.system(\"mkdir -p \" + config.atf_log_path)",
"def get_dir_watch(self):\n watch = dirwatch.DirWatcher(self.paths.manifest_dir)\n watch.on_created = self._on_created\n return watch",
"def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)",
"def CreateTrackerDirIfNeeded():\n tracker_dir = config.get(\n 'GSUtil', 'resumable_tracker_dir',\n os.path.join(GetGsutilStateDir(), 'tracker-files'))\n CreateDirIfNeeded(tracker_dir)\n return tracker_dir",
"def make_experiment_directory(path='',config=None,default_dir='_runs'):\n directory = path\n if not path:\n timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')\n directory = os.path.join(default_dir,timestamp)\n directory = os.path.abspath(directory) \n if os.path.isdir(directory) and not config.override and not config.cloud:\n raise ValueError(\n 'directory already exists, use --override option: %s'\n % directory)\n elif os.path.isdir(directory) and not config.cloud: \n rmtree(directory)\n if not config.cloud: \n os.makedirs(directory)\n if config:\n config.wdir = directory \n return directory",
"def create_cache_dir(self) -> None:\n try:\n os.makedirs(self.cache_folder)\n except FileExistsError:\n pass",
"def should_watch_dir(self, entry):\n return False",
"def makeifnotexists(d):\n if not os.path.exists(d):\n os.mkdir(d)\n return d"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reload configuration. If reload command is given, run that, otherwise, signal process with HUP.
|
def reload_command(self):
if self.reload is None:
if not self.check_command():
LOGGER.info('Command dead, restarting...')
self.start_command(wait_for_config=False)
else:
LOGGER.info('Sending HUP signal...')
self.process.send_signal(signal.SIGHUP)
else:
LOGGER.info('Executing reload command...')
subprocess.call(shlex.split(self.reload))
|
[
"def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])",
"def reload(kwargs=None):\n sudo(\"supervisorctl restart %(name)s\" % kwargs)",
"def reload_daemon():\n subprocess.run([\n 'systemctl',\n 'daemon-reload'\n ], check=True)",
"def _reload(self):\n try:\n self._call([\"reload\"])\n except subprocess.CalledProcessError:\n raise errors.PluginError(\n \"Postfix failed to reload its configuration\")",
"def reload():\n _dynamic_env()\n env.sudo('supervisorctl reread', shell=False)\n env.sudo('supervisorctl add %(server_name)s' % env, shell=False)\n env.sudo('supervisorctl restart %(server_name)s' % env, shell=False)\n with warn_only():\n env.sudo('nginx', shell=False)\n env.sudo('nginx -s reload', shell=False)",
"def reload_config(username=None, password=None):\n if username and password:\n run('supervisorctl -u %s -p %s update' % (username, password))\n else:\n run('supervisorctl update')",
"def reload_monitor(self, option, opt, value, parser):\n\t\tif self.debug_level == 1:\n\t\t\tprint \"Reloading taking place........\"\n\t\tcon = self.check_config_syntax(value)\n\t\tself.process_config(con)\n\n\t\ttry: \n\t\t\tf = open('/tmp/'+self.monitorName+'.pid','r')\n\t\t\tdict = pickle.load(f)\n\t\t\tpids = dict['pid']\n\t\t\tf.close()\n\t\t\tos.system('kill -15 '+pids)\n\t\t\tos.system('rm -f /tmp/'+self.monitorName+'.pid')\n\n\t\texcept Exception, e:\n\t\t\tprint 'reloading not proper ',e\n\t\t\tsys.exit(0)\n\t\t\n\t\treturn",
"def nginx_reload():\n log('reload nginx', yellow)\n sudo('/etc/init.d/nginx reload')",
"def reload_config(self):\n pass",
"def reload_uwsgi(force_reload=None):\n if force_reload:\n run('uwsgi --stop /run/uwsgi/{}/uwsgi.pid'.format(env.appname))\n else:\n run('uwsgi --reload /run/uwsgi/{}/uwsgi.pid'.format(env.appname))",
"def reload_config(self):\n self.conf.reload()\n self.load_config()",
"def site_nginx_reload():\n sudo(\"/etc/init.d/nginx reload\")",
"def reload_config():\n _env_reloader.update()",
"def Reload(what):\n \n if what == \"commands\":\n print \"Reloading commands...\"\n main_script.Reload()\n print \"Success!\"\n\n elif what == \"triggers\": \n print \"NOTICE: Currently,only commands can be reloaded.\"\n\n else:\n print \"Error: %s is an invalid option!\"%what",
"def reload(self):\n self.stop()\n # Scheduler Config neu laden\n if self._loadConfig():\n self.messenger.send('Scheduler reloaded by user', '0500', 'success', 'reload', None, 'appinternal')\n self.start()",
"def reload_gunicorn():\n puts(yellow(\"Reload gunicorn graceful\"))\n sudo('kill -HUP `cat %s`' % (env.gunicorn_pidpath), user=env.app_user)",
"def restart():\n cmd = f'supervisorctl restart pocs-config-server'\n print(f'Running: {cmd}')\n subprocess.run(cmd, shell=True)",
"def on_reload_command(self, cmd_event):\n self.NITE.stop()\n self.NITE.start()",
"def reExec(self):\n self.log.warn(\"SIGHUP received - restarting\")\n try:\n self.log.info(\"Removing pidfile: {log_source.pidfilePath}\")\n os.remove(self.pidfilePath)\n except OSError:\n pass\n self.reactor.addSystemEventTrigger(\n \"after\", \"shutdown\", os.execv,\n sys.executable, [sys.executable] + sys.argv\n )\n self.reactor.stop()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return False if command is dead, otherwise True.
|
def check_command(self):
return self.process is not None and self.process.poll() is None
|
[
"def dead(self):\n self.tamagotchi.update_status(TimeKeeper.time_check())\n if self.tamagotchi.health == 0:\n return True\n else:\n return False",
"def is_dead(self):\n return self.health <= 0",
"def is_dead(self):\n return self.hp <= 0",
"def is_dead(self):\r\n return pg.time.get_ticks() - self.startTime > self.duration",
"def is_player_dead(self):\n return self.game.is_player_dead()",
"def has_commands(self) -> bool:\n return len(self.commands) > 0",
"def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd",
"def is_dead(self):\r\n if len(self.parachute) <= 5:\r\n self.parachute.pop(0)\r\n self.parachute.insert(0, \" x\")\r\n return True\r\n else:\r\n return False",
"def are_you_dead(dead):\n if dead:\n quit()",
"def is_command(self, message):\n return self._regex.match(message) is not None",
"def is_command(message):\n return True if re.match(Command.COMMAND_RE, message.content) else False",
"def should_poll(self):\r\n return self._command_state is not None",
"def assumed_state(self):\r\n return self._command_state is None",
"def should_poll(self):\n return self._command_state is not None",
"def check_valid_command(command):\n if len(command) == 0:\n return False",
"def is_ready(self, command=\"id\"):\n if self.runner_ready:\n return True\n\n try:\n # run command to determine if the host is ready for use\n self.run_command_check_call(command)\n except (subprocess.CalledProcessError) as _:\n # ignore exceptions, this is useful when the host is still stating up\n pass\n else:\n log.debug(\"Runner is ready for use\")\n self.runner_ready = True\n\n return self.runner_ready",
"def is_alive(self):\n return (self.read_name() != '')",
"def deadPerson (self) :\n\t\treturn (self._health <= 0 or self._healthTitle == \"Dead\")",
"def is_alive(self):\r\n if self.health > 0 and self.life_span > 0:\r\n return True\r\n else:\r\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get unique list of new config files in watch dir.
|
def get_config(self):
config = set()
while True:
filenames = self.get_config_files()
for fn in filenames:
if fn not in self.watch_names:
filenames.remove(fn)
if fn in config:
filenames.remove(fn)
# If we did not find any new config files, exit loop.
if not filenames:
break
# Save the config files we found, sleep, then look again.
config.update(filenames)
# Sleep a bit to allow for settling. We loop until no new
# config files are found.
time.sleep(1.0)
return config
|
[
"def getPreviousApplicationSettingsDirsByTime() -> List[java.io.File]:\n ...",
"def _get_cfg_list():\n config.update({'sensors': {}})\n conf_files = filter(lambda x: x.endswith('.yaml') and not x.startswith('unicon-setting'),\n os.listdir(path=f\"{cpath}\"))\n cfg_list = list(map(lambda line: line.split('.')[0], conf_files))\n return cfg_list",
"def files_to_sync(self):\n return []",
"def get_config_files(self):\n self.clear_lists()\n print self.abs_directory\n for file in os.listdir(self.abs_directory):\n print file\n if file.endswith('.json') and \"qemii\" in file:\n self.txt_files.append(file)",
"def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"connect-log.log\")\n return full_list",
"def config_files(self):\n return self._config_files",
"def _configFiles(self):\n import glob\n ret = [] \n for ext in self.configManager.extensions:\n ret.extend(\n glob.glob(f\"{self.pipelinesDir}/{self.pipeName}/*{ext}\"))\n return ret",
"def list_config(\n temp_config_paths: submanager.models.config.ConfigPaths,\n) -> submanager.models.config.ConfigPaths:\n config_data: Any = [\"spam\", \"eggs\"]\n submanager.config.utils.write_config(\n config_data,\n config_path=temp_config_paths.static,\n )\n return temp_config_paths",
"def _get_changed_paths(self):\n paths = set()\n while True:\n if not self._inotify_poll.poll(0):\n break\n\n self._inotify_events += os.read(self._inotify_fd, 1024)\n while len(self._inotify_events) > _INOTIFY_EVENT_SIZE:\n wd, mask, cookie, length = _INOTIFY_EVENT.unpack(\n self._inotify_events[:_INOTIFY_EVENT_SIZE])\n if len(self._inotify_events) < _INOTIFY_EVENT_SIZE + length:\n break\n\n name = self._inotify_events[\n _INOTIFY_EVENT_SIZE:_INOTIFY_EVENT_SIZE+length]\n name = name.rstrip('\\0')\n\n logging.debug('wd=%s, mask=%s, cookie=%s, length=%s, name=%r',\n wd, hex(mask), cookie, length, name)\n\n self._inotify_events = self._inotify_events[_INOTIFY_EVENT_SIZE+length:]\n\n if mask & IN_IGNORED:\n continue\n try:\n directory = self._watch_to_directory[wd]\n except KeyError:\n logging.debug('Watch deleted for watch descriptor=%d', wd)\n continue\n\n path = os.path.join(directory, name)\n if os.path.isdir(path) or path in self._directory_to_watch_descriptor:\n if mask & IN_DELETE:\n self._remove_watch_for_path(path)\n elif mask & IN_MOVED_FROM:\n self._remove_watch_for_path(path)\n elif mask & IN_CREATE:\n self._add_watch_for_path(path)\n elif mask & IN_MOVED_TO:\n self._add_watch_for_path(path)\n if path not in paths:\n paths.add(path)\n return paths",
"def watchList(self):\n return self._watchList",
"def find_config_files(create=False):\n files = [\".wpwatcher/wpwatcher.conf\", \"wpwatcher.conf\"]\n env = [\"HOME\", \"XDG_CONFIG_HOME\", \"APPDATA\", \"PWD\"]\n\n return WPWatcherConfig.find_files(env, files, WPWatcherConfig.TEMPLATE_FILE)",
"def getFileList(self):\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.sensor + sep + 'padhist'\n pattern = '*' + self.sensor + '_hstv*.mat'\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n uTime = stringTimeToUnix(name[0:13] + '_00_00.000')\n if ( self.uStart <= uTime <= self.uStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n fileList.sort()\n self.fileList = fileList",
"def _get_config_files():\n # default files\n conf_files = cfg.find_config_files(project='monasca',\n prog='monasca-persister')\n # deprecated config files (only used if standard config files are not there)\n if len(conf_files) == 0:\n old_conf_files = cfg.find_config_files(project='monasca',\n prog='persister')\n if len(old_conf_files) > 0:\n LOG.warning('Found deprecated old location \"{}\" '\n 'of main configuration file'.format(old_conf_files))\n conf_files += old_conf_files\n return conf_files",
"def __get_configs_from_dir(self, config_dir):\n configs = []\n try:\n conf_files = os.listdir(config_dir)\n conf_files.sort()\n for filename in conf_files:\n if fnmatch.fnmatch(filename, '*.ini'):\n config_file = os.path.realpath(\n config_dir + os.sep + filename)\n self.log.debug(\"Adding config file: %s\", config_file)\n configs += [config_file]\n except OSError:\n self.log.warning(\n \"Failed to get configs from %s\",\n config_dir,\n exc_info=True\n )\n\n return configs",
"def get_all_init_filepaths(dir_path):\n pattern = re.compile(\".sync\");\n init_files = [ f for f in os.listdir(dir_path)\n if re.search(pattern, f)]\n print \"-\"*100\n print(\"INIT Files\")\n print(init_files)\n print \"-\"*100\n return init_files",
"def get_update_file_list(directory):\n update_files_list = set(UPDATE_FILES_STATIC)\n update_files_exclude = set(UPDATE_FILES_EXCLUDE)\n\n for root, dirs, files in os.walk(path.join(PATH_ROOT, directory)):\n for filen in files:\n if UPDATE_FILES_RE.match(filen):\n filep = path.join(root, filen)\n update_files_list.add(path.relpath(filep, PATH_ROOT))\n \n return update_files_list - update_files_exclude",
"def collect_configs(self, folder_path):\n\n dl_req = list()\n for root, dir, files in os.walk(folder_path):\n for i in dir:\n new_path = root + '/' + i\n for nroot, _, nfiles in os.walk(new_path):\n configs = [c for c in nfiles if 'conf' in c]\n dl_req.append((nroot, configs))\n print(dl_req)\n return dl_req",
"def _config_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n lines = []\n for config in res['configs']:\n line = '* ' if config['current'] else ' '\n\n if ctx.verbose:\n line += config['mtime'] + ' '\n\n line += config['name']\n lines.append(line)\n\n return \"\\n\".join(lines)",
"def loadRecentFiles(self):\n self.recentFiles.clear()\n for n in range(RECENTFILEMAX):\n rf = self.getSection(CFG_RECENT, str(n))\n if rf:\n self.recentFiles.append(rf)\n else:\n break"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Kill the running command.
|
def kill(self):
if self.process is not None:
LOGGER.info('Killing command...')
self.process.kill()
self.process = None
|
[
"def kill(self):\n\n self.proc.kill()",
"def kill(self):\n self._stop_proc(signal.SIGKILL)",
"def kill_subprocess(self):\n try:\n self.process.kill()\n except OSError:\n pass\n return",
"def kill(self):\n self.rpc_process.kill()",
"def stop(self):\n self._kill_process()",
"def kill(self):\n if self.process:\n logs.log('Stopping emulator.')\n self.process.kill()\n self.process = None",
"def kill(self):\n if self.run_proc.running():\n self.run_proc.kill()\n else:\n logging.info(\n 'Not killing the experiment process because it is not running.'\n )",
"def killJob(self):\n \n pid = self.killText.get(\"1.0\",END)\n print(pid)\n try:\n os.kill(int(pid), signal.SIGTERM)\n except WindowsError:\n self.displayErrorWindow(\"The process that you selected is no longer in use.\")\n self.killWindow.destroy()",
"def kill(self):\n self.send_signal(signal.SIGKILL)",
"def kill(self):\r\n self.send_signal(signal.SIGKILL)",
"def kill(self):\n self._screen_commands('quit')",
"def stop(self):\n try:\n self.process.terminate()\n self.process.kill()\n except AttributeError:\n raise BagNotRunningError(\"stop\")",
"def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False",
"def kill(self, ti):\n if self.cmd is None:\n if not ti and not self.task_instance:\n raise Exception(\"Unable to cancel Qubole Command, context is unavailable!\")\n elif not ti:\n ti = self.task_instance\n cmd_id = ti.xcom_pull(key=\"qbol_cmd_id\", task_ids=ti.task_id)\n self.cmd = self.cls.find(cmd_id)\n if self.cls and self.cmd:\n self.log.info(\"Sending KILL signal to Qubole Command Id: %s\", self.cmd.id)\n self.cmd.cancel()",
"def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")",
"async def kill(self) -> None:\n self.send_signal(signal.SIGKILL)\n await self.update_state(State.FINISHED)\n self.error = ProcessKilled(\"Process terminated with SIGKILL\")",
"def kill(self):\n \n self.killSlavePids()",
"def script_kill(self):\n return self._execute([b'SCRIPT', b'KILL'], b'OK')",
"def command_kill(self):\n required_arguments = {\n 'kill': self.BINARY_PATHS['kill'],\n 'pid': self.screen_pid\n }\n\n self._previous_arguments = required_arguments\n return '%(kill)s %(pid)s' % required_arguments"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Backs up entire configuration.
|
def backup_config(self):
prev_config = set()
for src in self.config:
dst = '%s.prev' % src
LOGGER.debug('Backing up %s to %s', src, dst)
try:
shutil.copy(src, dst)
except IOError as e:
if e.errno != errno.ENOENT:
raise
# If the config file is missing, we can skip backing it up.
LOGGER.warning('File %s missing, skipping backup', src)
else:
prev_config.add(dst)
return prev_config
|
[
"def backup_config():\n copy(CONFIG_FILE, CONFIG_FILE + '.bak')",
"def backupCfg( self ):\n self.handle.sendline( \"\" )\n self.handle.expect( self.prompt )\n self.handle.sendline( \"cp %s%s %s%s.backup\" % (self.switchDirectory, self.conf, self.switchDirectory, self.conf) )\n self.handle.expect( self.prompt )",
"def backup_current_config():\n LOGGER.debug(\"Backing up current config\")\n\n backup(VIMRC)\n backup(VIMDIR)\n #backup(BASHRC)\n #backup(ZSHRC)\n backup(PYLINTRC)",
"def backup(config_file, bakfile):\n return _backup_config(config_file, bakfile)",
"def backup_config(self):\n if not os.path.isdir(self.data_dir):\n os.makedirs(self.data_dir)\n\n _info('Exporting templates and hostgroups')\n self.export_component_config('template', 'templateid', 'templates',\n 'templates')\n self.export_component_config('hostgroup', 'groupid', 'groups',\n 'hostgroups')\n _info('Exporting hosts')\n self.export_component_config('host', 'hostid', 'hosts', 'hosts')\n _info('Exporting registration actions')\n self.export_action_config(2, 'reg_actions',\n 'auto-registration actions')\n _info('Exporting trigger actions')\n self.export_action_config(0, 'trigger_actions',\n 'trigger actions')\n _info('Exporting media types')\n self.export_component_config('mediatype', 'mediatypeid', 'mediaTypes', 'mediatypes')\n\n _info('Exporting services')\n self.export_component('service', 'services')\n\n _info('Exporting proxies')\n self.export_component('proxy', 'proxies')\n\n self.get_id_file()",
"def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')",
"def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")",
"def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')",
"def _backup_config(config_file, bak_path=None):\n try:\n if not bak_path:\n bak_path = config_file+\".bak\"\n with open(config_file, 'r') as oldfile, open(bak_path, 'w') as bakfile:\n tmp = oldfile.read(1024)\n while tmp:\n bakfile.write(tmp)\n tmp = oldfile.read(1024)\n except Exception, e:\n return 1, e\n return 0, \"success\"",
"def backup_data(self):\n self.log.info('Initialized backup of data.')\n try:\n for env in self.environments:\n drupal_vars = pantheon.parse_vhost(self.server.get_vhost_file(\n self.project, env))\n dest = os.path.join(self.backup_dir, env, 'database.sql')\n self._dump_data(dest, drupal_vars)\n except:\n self.log.exception('Backing up the data was unsuccessful.')\n raise\n else:\n self.log.info('Backup of data successful.')",
"def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)",
"def backup(self):\n # s=colorize(value.strftime(\"%Y-%m-%d\"),color)\n prefix=DateTime.now().strftime(\"%Y%m%d_%H%M%S\")+\"_\"\n p_backup = self._config._path_backup\n f_todo=self._config._file_todo\n f_todo_backup=os.path.join(p_backup, prefix+self._config.todo_backup)\n f_todo_archive=self._config._file_archive\n f_todo_archive_backup=os.path.join(p_backup, prefix+self._config.archive_backup)\n\n logger.debug(\"### Archiving\")\n logger.debug(\" -%s\",f_todo)\n logger.debug(\" %s\",f_todo_backup)\n logger.debug(\" -%s\",f_todo_archive)\n logger.debug(\" %s\",f_todo_archive_backup)\n shutil.copy(src=f_todo,dst=f_todo_backup)\n shutil.copy(src=f_todo_archive,dst=f_todo_archive_backup)",
"def backup_monit_config_files(duthost):\n logger.info(\"Backing up Monit configuration files on DuT '{}' ...\".format(duthost.hostname))\n duthost.shell(\"cp -f /etc/monit/monitrc /tmp/\")\n duthost.shell(\"mv -f /etc/monit/conf.d/monit_* /tmp/\")\n duthost.shell(\"cp -f /tmp/monit_telemetry /etc/monit/conf.d/\")\n logger.info(\"Monit configuration files on DuT '{}' is backed up.\".format(duthost.hostname))",
"def env_backup():",
"def _backup_and_restore_config_db(duthost):\n CONFIG_DB = \"/etc/sonic/config_db.json\"\n CONFIG_DB_BAK = \"/etc/sonic/config_db.json.before_test\"\n logger.info(\"Backup {} to {}\".format(CONFIG_DB, CONFIG_DB_BAK))\n duthost.shell(\"cp {} {}\".format(CONFIG_DB, CONFIG_DB_BAK))\n\n yield\n\n logger.info(\"Restore {} with {}\".format(CONFIG_DB, CONFIG_DB_BAK))\n duthost.shell(\"mv {} {}\".format(CONFIG_DB_BAK, CONFIG_DB))",
"def backup(self):\n ds = self.datasheets(name = \"core_Backup\")\n args = [\"--lib=%s\" % self.location, \"--backup\"]\n \n if ds.IncludeInput[0] == \"Yes\":\n args += [\"--input\"]\n \n if ds.IncludeOutput[0] == \"Yes\":\n args += [\"--output\"]\n \n self.session._Session__call_console(args)",
"def backup(self):\n # If the target directory doesn't exist, create it.\n if not os.path.exists(self.target):\n os.makedirs(self.target)\n\n # Sync source and target directories.\n if self.type == 'directory':\n self.sync_ordinary_directory()\n elif self.type == 'virtual-machine':\n self.sync_virtual_machine()",
"def backup_running_config(task: Task):\n # 创建存放备份配置的目标文件夹\n pathlib.Path(\"backup_config\").mkdir(exist_ok=True)\n r = task.run(\n task=netmiko_send_command,\n name=f\"Backup {task.host.name} {task.host.hostname}\",\n command_string=\"display current-configuration\",\n use_timing=True,\n )\n # task.host['backup'] = r.result\n if not r.failed:\n print(f\"{r.name} completed successfully!\")\n task.run(\n task=write_file,\n filename=f\"backup_config/{task.host.name}.cfg\",\n content=r.result\n )",
"def do_backup_w_args():\n backup()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Backup old config, write new config, test config, HUP or restore.
|
def test_and_swap(self, config):
LOGGER.info('Attempting to apply new configuration')
backup = self.backup_config()
# We have backed up ALL config files (not just the ones we might
# replace). If any error occurs from here out, we will need to restore
# our config, so we will use exception handling.
try:
self.install_config(config)
# We have now merged in our new configuration files, lets test this
# config.
if self.test_command(quiet=False):
LOGGER.debug('Configuration good, reloading')
self.reload_command()
self.remove_config(backup)
else:
LOGGER.info('Configuration bad, restoring')
self.restore_config(backup)
except Exception:
LOGGER.exception('Failure, restoring config', exc_info=True)
self.restore_config(backup)
|
[
"def backup_config():\n copy(CONFIG_FILE, CONFIG_FILE + '.bak')",
"def backup_current_config():\n LOGGER.debug(\"Backing up current config\")\n\n backup(VIMRC)\n backup(VIMDIR)\n #backup(BASHRC)\n #backup(ZSHRC)\n backup(PYLINTRC)",
"def _backup_config(config_file, bak_path=None):\n try:\n if not bak_path:\n bak_path = config_file+\".bak\"\n with open(config_file, 'r') as oldfile, open(bak_path, 'w') as bakfile:\n tmp = oldfile.read(1024)\n while tmp:\n bakfile.write(tmp)\n tmp = oldfile.read(1024)\n except Exception, e:\n return 1, e\n return 0, \"success\"",
"def backup_config(self):\n prev_config = set()\n for src in self.config:\n dst = '%s.prev' % src\n LOGGER.debug('Backing up %s to %s', src, dst)\n\n try:\n shutil.copy(src, dst)\n\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n\n # If the config file is missing, we can skip backing it up.\n LOGGER.warning('File %s missing, skipping backup', src)\n\n else:\n prev_config.add(dst)\n return prev_config",
"def backupCfg( self ):\n self.handle.sendline( \"\" )\n self.handle.expect( self.prompt )\n self.handle.sendline( \"cp %s%s %s%s.backup\" % (self.switchDirectory, self.conf, self.switchDirectory, self.conf) )\n self.handle.expect( self.prompt )",
"def _backup_and_restore_config_db(duthost):\n CONFIG_DB = \"/etc/sonic/config_db.json\"\n CONFIG_DB_BAK = \"/etc/sonic/config_db.json.before_test\"\n logger.info(\"Backup {} to {}\".format(CONFIG_DB, CONFIG_DB_BAK))\n duthost.shell(\"cp {} {}\".format(CONFIG_DB, CONFIG_DB_BAK))\n\n yield\n\n logger.info(\"Restore {} with {}\".format(CONFIG_DB, CONFIG_DB_BAK))\n duthost.shell(\"mv {} {}\".format(CONFIG_DB_BAK, CONFIG_DB))",
"def backup(config_file, bakfile):\n return _backup_config(config_file, bakfile)",
"def slave_restore_config(self):\r\n self.assert_is_master(False)\r\n\r\n cf_source_dir = self.cf.getfile(\"config_backup\", \"\")\r\n cf_target_dir = self.cf.getfile(\"slave_config_dir\", \"\")\r\n\r\n if not cf_source_dir:\r\n self.log.info(\"Configuration backup location not specified.\")\r\n return\r\n\r\n if not cf_target_dir:\r\n self.log.info(\"Configuration directory not specified, config files not restored.\")\r\n return\r\n\r\n if not os.path.exists(cf_target_dir):\r\n self.log.warning(\"Configuration directory does not exist: %s\", cf_target_dir)\r\n return\r\n\r\n self.log.info(\"Restoring configuration files\")\r\n for cf in ('postgresql.conf', 'pg_hba.conf', 'pg_ident.conf'):\r\n cfsrc = os.path.join(cf_source_dir, cf)\r\n cfdst = os.path.join(cf_target_dir, cf)\r\n\r\n if not os.path.isfile(cfsrc):\r\n self.log.warning(\"Missing configuration file backup: %s\", cf)\r\n continue\r\n\r\n self.log.debug(\"Copy %s to %s\", cfsrc, cfdst)\r\n if not self.not_really:\r\n copy_conf(cfsrc, cfdst)\r\n if cf == 'postgresql.conf':\r\n self.slave_deconfigure_archiving(cfdst)",
"def test_config_save_restore(self):\n\n config_filename_initial = 'test_configuration'\n config_filename_save = 'save_configuration'\n\n # Get config path\n local_dir = os.path.dirname(__file__)\n config_path_initial = os.path.join(local_dir, config_filename_initial)\n config_path_save = os.path.join(local_dir, config_filename_save)\n\n # Load initial configuration from file\n config_initial = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_initial)\n\n config1 = config_initial.genome_config\n names1 = [p.name for p in config1._params]\n for n in names1:\n assert hasattr(config1, n)\n\n # Save configuration to another file\n config_initial.save(config_path_save)\n\n # Obtain configuration from saved file\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_save)\n\n config2 = config.genome_config\n names2 = [p.name for p in config2._params]\n for n in names2:\n assert hasattr(config2, n)\n\n self.assertEqual(names1, names2)\n\n for n in names1:\n v1 = getattr(config1, n)\n v2 = getattr(config2, n)\n self.assertEqual(v1, v2)",
"def backup_keepalived_conf():\n try:\n shutil.copyfile(KEEPALIVED_CONF_FILE, KEEPALIVED_CONF_BACKUP)\n return True\n except:\n raise ValueError",
"def env_backup():",
"def sanitize_new_config(self):\n config_log = self._load_config_log()\n if 'new' in config_log:\n for cfg in config_log['new']:\n with open(cfg, 'r+') as f:\n data = yaml.load(f)\n f.seek(0)\n yaml.safe_dump(data, f, default_flow_style=False)\n f.truncate()\n del config_log['new']\n\n self._save_config_log(config_log)",
"def backup_monit_config_files(duthost):\n logger.info(\"Backing up Monit configuration files on DuT '{}' ...\".format(duthost.hostname))\n duthost.shell(\"cp -f /etc/monit/monitrc /tmp/\")\n duthost.shell(\"mv -f /etc/monit/conf.d/monit_* /tmp/\")\n duthost.shell(\"cp -f /tmp/monit_telemetry /etc/monit/conf.d/\")\n logger.info(\"Monit configuration files on DuT '{}' is backed up.\".format(duthost.hostname))",
"def restore_backup():\n\n # restore vim configuration folder\n if exists('.vim-bkp'):\n print(green('Restoring your vim configuration folder.'))\n cmd = 'rm -rf .vim'\n run(cmd)\n cmd = 'mv .vim-bkp .vim'\n run(cmd)\n else:\n print(red('vim-bkp folder not found.'))\n\n # restore vim configuration file\n if exists('.vimrc-bkp'):\n print(green('Restoring your vim configuration file.'))\n cmd = 'rm -rf .vimrc'\n run(cmd)\n cmd = 'mv .vimrc-bkp .vimrc'\n run(cmd)\n else:\n print(red('vimrc-bkp file not found.'))",
"def test_config_save_restore1(self):\n\n config_filename_initial = 'test_configuration2'\n config_filename_save = 'save_configuration2'\n\n # Get config path\n local_dir = os.path.dirname(__file__)\n config_path_initial = os.path.join(local_dir, config_filename_initial)\n config_path_save = os.path.join(local_dir, config_filename_save)\n\n # Load initial configuration from file\n config_initial = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_initial)\n\n config1 = config_initial.genome_config\n names1 = [p.name for p in config1._params]\n for n in names1:\n assert hasattr(config1, n)\n\n # Save configuration to another file\n config_initial.save(config_path_save)\n\n # Obtain configuration from saved file\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path_save)\n\n config2 = config.genome_config\n names2 = [p.name for p in config2._params]\n for n in names2:\n assert hasattr(config2, n)\n\n self.assertEqual(names1, names2)\n\n for n in names1:\n v1 = getattr(config1, n)\n v2 = getattr(config2, n)\n self.assertEqual(v1, v2)",
"def backup_config(context):\n context.copy_from(DNF_PLUGIN_DATA_PATH, DNF_PLUGIN_DATA_LOG_PATH)",
"def backup_tempest_config(conf_file, res_dir):\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n shutil.copyfile(conf_file,\n os.path.join(res_dir, 'tempest.conf'))",
"async def recreate(self):\n self._config = get_default_config()\n await self.save()",
"def backup_running_config(task: Task):\n # 创建存放备份配置的目标文件夹\n pathlib.Path(\"backup_config\").mkdir(exist_ok=True)\n r = task.run(\n task=netmiko_send_command,\n name=f\"Backup {task.host.name} {task.host.hostname}\",\n command_string=\"display current-configuration\",\n use_timing=True,\n )\n # task.host['backup'] = r.result\n if not r.failed:\n print(f\"{r.name} completed successfully!\")\n task.run(\n task=write_file,\n filename=f\"backup_config/{task.host.name}.cfg\",\n content=r.result\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot and save histograms from predicted steerings and real steerings. Arguments
|
def make_and_save_histogramsX(pred_steerings, real_steerings,
img_name = "histogramsX.png"):
pred_steerings = np.array(pred_steerings)
real_steerings = np.array(real_steerings)
max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))
min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))
bins = np.linspace(min_h, max_h, num=50)
plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')
plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')
#plt.title('Steering angle')
plt.legend(fontsize=10)
plt.savefig(img_name, bbox_inches='tight')
|
[
"def make_and_save_histogramsY(pred_steerings, real_steerings,\n img_name = \"histogramsY.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')",
"def plot_prediction_histograms(dataset, model1, model2, model3=[]):",
"def plot_hist_snfit_meta(self): \n \n self.read_meta()\n self.read_snfit_results()\n\n \n self.diff_x0 = []\n self.diff_x0_err = []\n self.diff_x1 = []\n self.diff_x1_err = [] \n self.diff_c = []\n self.diff_c_err = [] \n self.diff_mb = []\n self.diff_mb_err = [] \n self.diff_cov_x0_x1 = []\n self.diff_cov_x0_c = []\n self.diff_cov_x1_c = []\n self.diff_cov_mb_x1 = []\n self.diff_cov_mb_c = []\n\n for i in range (len(self.sn_name)):\n for j in range (len(self.meta_sn_name_list)):\n if self.sn_name[i] == self.meta_sn_name_list[j]:\n if np.abs(self.mb[i] - self.meta_mb[j]) < 0.0001:\n self.diff_x0.append(self.x0[i] - self.meta_x0[j])\n self.diff_x0_err.append(self.x0_err[i] - self.meta_x0_err[j])\n self.diff_x1.append(self.x1[i] - self.meta_x1[j])\n self.diff_x1_err.append(self.x1_err[i] - self.meta_x1_err[j]) \n self.diff_c.append(self.c[i] - self.meta_c[j])\n self.diff_c_err.append(self.c_err[i] - self.meta_c_err[j]) \n self.diff_mb.append(self.mb[i] - self.meta_mb[j])\n self.diff_mb_err.append(self.mb_err[i] - self.meta_mb_err[j])\n# self.diff_cov_x0_x1.append()\n# self.diff_cov_x0_c.append()\n# self.diff_cov_x1_c.append()\n# self.diff_cov_mb_x1.append()\n# self.diff_cov_mb_c.append()\n else:\n print self.x1[i] - self.meta_x1[j], self.sn_name[i],self.meta_sn_name_list[j], self.x1[i], self.meta_x1[j]\n\n\n fig = plt.figure(figsize=(8.,8.)) \n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0,25,label='$\\Delta$ X0')\n ax0_2.hist(self.diff_x0_err,25,label='$\\Delta$ X0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x0_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1,25,label='$\\Delta$ X1')\n ax0_2.hist(self.diff_x1_err,25,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x1_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c,25,label='$\\Delta$ Color')\n ax0_2.hist(self.diff_c_err,25,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/color_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb,50,label='$\\Delta$ mb')\n ax0_2.hist(self.diff_mb_err,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/mb_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def graphic(self,final):\r\n\t\t\r\n\t\t\r\n\t\t#Creating an adeguate linspace\r\n\t\tbins \t\t= np.linspace(0,self.Ndeputies,int(self.Ndeputies/2))\r\n\t\t#opening the real assigned seats file \r\n\t\treal \t\t= [line.strip() for line in open('Elections/Real_Election_for_Confrontation.txt')]\r\n\r\n\t\tif len(real) == 0:\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tfor i in self.Parties:\r\n\t\t\t\t#Creating the single histogram\r\n\t\t\t\tplt.hist(final[i],bins,alpha = 0.5,label = i+'_sim') \r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\telif len(real) !=0:\r\n\t\t\t\r\n\t\t\treal = {real[0].split('\\t')[i]:float(real[1].split('\\t')[i]) for i in range(len(real[0].split('\\t')))}\r\n\t\t\t\r\n\t\t\tif len(real) != len(final):\r\n\t\t\t\traise ValueError ('There is not the same number of both real ad simulated parties!')\r\n\t\t\t\r\n\t\t\tif final.keys() != real.keys():\r\n\t\t\t\traise ValueError ('The simulated Parties and the real parties seem to have different names!')\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tfor i in self.Parties:\r\n\t\t\t\tplt.hist(final[i],bins,alpha = 0.5,label = i+'_sim')\r\n\t\t\t\t#creating an histogram with both real and results data displayed\r\n\t\t\t\tplt.hist(real[i],bins,alpha = 0.5,label = i + 'real') \r\n\t\t\t\r\n\t\t\t\r\n\t\tplt.xlabel('Seats')\r\n\t\tplt.legend(loc='upper right')\r\n\t\tplt.title(self.election)\r\n\t\t#saving the histogram\r\n\t\tplt.savefig(\"Graphic/Histogram-Confrontation_for_\"+self.election+\".png\") \r\n\t\tplt.close()\r\n\t\r\n\t\tfor i in self.Parties:\r\n\t\t\tif self.Results[i]<0.05:\r\n\t\t\t\tcontinue\r\n\t\t\t#Plotting the histogram with all possible results throughout the simulation.\r\n\t\t\tplt.hist(self.allResults[i],bins,alpha = 0.5,label = i) \r\n\t\t\t\r\n\r\n\t\tplt.xlabel('Seats')\r\n\t\tplt.legend(loc='upper right')\r\n\t\t#Saving the histogram\r\n\t\tplt.savefig(\"Graphic/Numbers of possible results_for_\"+self.election+'.png')\r\n\t\tplt.close()",
"def plot_data_hist(self, show=False):\n\t\tfig, ax = plt.subplots(2, 1 ,figsize= (15,10))\n\n\t\tax[0].hist(self.data['x_central'][np.random.randint(self.n_train*self.n_s)]\n\t\t\t,label='training data',alpha=0.5)\n\n\t\tax[0].legend(frameon = False)\n\t\tax[0].set_xlabel(\"Data amplitude\")\n\t\tax[0].set_ylabel('Counts')\n\t\tax[0].set_title('%i data points'%self.input_shape[0])\n\t\tif not self.rescaled: ax[0].set_xlim(0,6)\n\n\t\tax[1].hist(self.data['x_central_test'][np.random.randint(self.n_s)]\n\t\t\t,label='test data',alpha=0.5)\n\n\t\tax[1].legend(frameon = False)\n\t\tax[0].set_title('%i data points'%self.input_shape[0])\n\t\tax[1].set_xlabel(\"Data amplitude\")\n\t\tax[1].set_ylabel('Counts')\n\t\tif not self.rescaled: ax[1].set_xlim(0,6)\n\t\t\n\t\tplt.savefig(f'{self.figuredir}data_visualization_hist_{self.modelversion}.png')\n\t\tif show: plt.show()\n\t\tplt.close()",
"def save_histograms(self):\n if self.rank == 0:\n output_switch = \"%sSwitch_%d.hst\" % (settings.DIRS['calc'], self._stepT)\n output_updown = \"%sUpDown_%d.hst\" % (settings.DIRS['calc'], self._stepT)\n output_round_trip = \"%sRoundTrip_%d.hst\" % (settings.DIRS['calc'], self._stepT)\n\n output_switch = open(output_switch, \"w+\")\n output_updown_f = open(output_updown, \"w+\")\n output_round_trip_f = open(output_round_trip, \"w+\")\n output_files = [output_switch, output_updown_f, output_round_trip_f]\n\n tmpTrip = 0.0\n\n for i in range(self.size):\n if i < self.size-1:\n l = \"%d;%f;%f\\n\" % (i, self.temp_list[i], self.switch_histogram[i]/float(self.switch_count))\n output_switch.writelines(l)\n\n l = \"%d;%f;%f\\n\" % (i, self.temp_list[i], self.up_histogram[i]/float(self.up_histogram[i] + self.down_histogram[i]))\n output_updown_f.writelines(l)\n\n if self.round_trip[i] != 0:\n tmpTrip = self.round_trip[i] / 2.0\n l = \"%d;%f;%f;%f\\n\" % (i, self.temp_list[i], tmpTrip, (settings.MC_MODULE.stop_step - settings.MC_MODULE.start_step)/float(tmpTrip))\n output_round_trip_f.writelines(l)\n else:\n l = \"%d;%f;0.00;0.00\\n\" % (i, self.temp_list[i])\n output_round_trip_f.writelines(l)\n\n for output in output_files:\n output.close()\n\n ### save histograms of temperature\n output_temp_histogram = \"%sTemp_%d_%d.hst\" % (settings.DIRS['calc'], self.rank, self._stepT)\n output_temp_histogram_f = open(output_temp_histogram, \"a+\")\n for i in range(self.size):\n l = \"%d;%d;%f;%f\\n\" % (self.rank, i, self.temp_list[i], self.temperature_histogram[i]/float(self.switch_count))\n output_temp_histogram_f.writelines(\"\\n\")\n output_temp_histogram_f.close()",
"def postplot(num, M, V, L, os_probability, streamflow, av_multiplier, Q_futures , Nsize, low_percentile, case_to_derive):\n \n\n# Figure 1: Fit KOSUGI MODEL to historical data\n# Figure 2: derived FDCs\n\n\n# Derive streamflow statistics\n Q_m, Q_v, Q_low = streamflow_statistics(Q_futures, low_percentile, num, case_to_derive)\n \n# Figure 3: plot sampled vs calculated mean/median values\n plt.plot(Q_m, 'ro', label=\"Derived\")\n plt.plot(M, 'b*', label=\"Sampled\")\n plt.legend(loc=\"upper right\")\n plt.grid() \n plt.xlabel(\"Futures\")\n plt.ylabel(\"M\")\n plt.savefig('PostProcessor_plots' + '/Fig3-M.png')\n plt.clf()\n\n# Figure 4: plot sampled vs calculated Std/CV values\n plt.plot(Q_v, 'ro', label=\"Derived\")\n plt.plot(V, 'b*', label=\"Sampled\")\n plt.legend(loc=\"upper right\")\n plt.grid()\n plt.xlabel(\"Futures\")\n plt.ylabel(\"V\")\n plt.savefig('PostProcessor_plots' + '/Fig4-V.png') \n plt.clf()\n\n\n# Figure 5: plot sampled vs calculated low percentile values\n plt.plot(Q_low, 'ro', label=\"Derived\")\n plt.plot(L, 'b*', label=\"Sampled\")\n plt.legend(loc=\"upper right\")\n plt.grid() \n plt.xlabel(\"Futures\")\n plt.ylabel(\"Low Percentile [$m^3/s$]\")\n plt.savefig('PostProcessor_plots' + '/Fig5-Low.png') \n plt.clf()\n\n\n#Figure 6: Random 3 years of observed stream flow vs derived streamflow\n plt.figure(figsize=(11, 6))\n idplot = np.where((av_multiplier[:,1] > 1.75) & (av_multiplier[:,0] < 0.75) & (0.5 < av_multiplier[:,0]) ) # find the scenario to plot\n idplot = np.asarray(idplot) # converting tuple into int array \n if np.size(idplot) == 0:\n idplot = np.where(av_multiplier[:,1] >= 1.75)\n idplot = np.asarray(idplot) # converting tuple into int array \n idplot = np.min(idplot) # get on of the indices if there is more than one\n \n qplot = Q_futures[:,idplot] # select the future \n qplot = np.reshape(qplot, (len(os_probability),1)) \n #plt.plot(streamflow[8765:-1],'r')\n #plt.plot(qplot[8765:-1],c='0.35')\n plt.plot(streamflow[8765:-1],'r', label=\"Observed Streamflow\")\n plt.plot(qplot[8765:-1], label=\"Derived Streamflow\",c='0.35')\n plt.legend(loc=\"upper right\")\n plt.xlabel(\"Time [Days]\")\n plt.ylabel(\"Discharge [$m^3/s$]\")\n plt.grid() \n plt.xlim(0, len(qplot[8765:-1])+10)\n plt.legend(bbox_to_anchor=(1.05, 1))\n plt.tight_layout()\n plt.savefig('PostProcessor_plots' + '/Fig6-ObservedvsDerived_discharge.png') \n plt.clf()",
"def plot_train_hist(y_vals, checked_iters, fig_path, ylabel):\n x_vals = np.array(checked_iters)\n y_vals = np.vstack(y_vals)\n plt.plot(x_vals, y_vals, '-', linewidth=2)\n plt.xlabel('Training iteration')\n plt.ylabel(ylabel)\n plt.title('Evaluated every: {:d} iterations'.format(\n checked_iters[1]-checked_iters[0]))\n plt.tight_layout()\n ylabel='_'.join(ylabel.lower().split())\n fig_file = os.path.join(fig_path, '{:s}_history.eps'.format(ylabel))\n plt.savefig(fig_file)\n plt.savefig(os.path.join(fig_path, '{:s}_history.png'.format(ylabel)))\n plt.clf()\n print('Wrote: {:s}'.format(fig_file))",
"def spike_hist():\n\n import matplotlib.pyplot as plt\n\n plt.figure()\n for i, (_, spikes) in enumerate(utils.get_data_set('train')):\n x = np.reshape(spikes, (-1,))\n x = x[np.isnan(x) == False]\n print(x.shape)\n\n plt.subplot(5, 2, i + 1)\n plt.hist(np.cast[np.int32](x), range(6), log=True)\n\n plt.show()",
"def plot_predictions_histogram(df):\n\n fig = go.Figure()\n fig.add_trace(go.Histogram(x=df[\"preds\"], name=\"preds\"))\n fig.add_trace(go.Histogram(x=df[\"truth\"], name=\"truth\"))\n\n # Overlay both histograms\n fig.update_layout(barmode=\"overlay\")\n # Reduce opacity to see both histograms\n fig.update_traces(opacity=0.5)\n fig.update_layout(xaxis_title=r\"HOMO-LUMO\", yaxis_title=r\"count.\")\n wandb.log({f\"Predictions Hist\": fig})",
"def savehists(sag):\n for i in sag['train'].columns.values:\n sag.dohistograms(sag['train'], i)\n\n for i in sag['train'].columns.values:\n #plt.close()\n sag.dotimeseries(sag['train'], i)\n #plt.close('all')",
"def plot_residuals(turnstile_weather, predictions):\n plt.figure()\n (turnstile_weather['ENTRIESn_hourly'] - predictions).hist()\n return plt",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_explore_histograms(ds, labels):\n # set up a dictionary with the percentiles of the histograms to be plotted\n p = {0: {\"lwr\": 0.0, \"upr\": 100.0},\n 1: {\"lwr\": 0.1, \"upr\": 99.9},\n 2: {\"lwr\": 0.5, \"upr\": 99.5},\n 3: {\"lwr\": 1.0, \"upr\": 99.0},\n 4: {\"lwr\": 2.5, \"upr\": 97.5}}\n site_name = ds.root[\"Attributes\"][\"site_name\"]\n plt.ion()\n for label in labels:\n var = pfp_utils.GetVariable(ds, label)\n sdt = var[\"DateTime\"][0]\n edt = var[\"DateTime\"][-1]\n fig = plt.figure(figsize=(11, 8), tight_layout=True)\n window_title = site_name + \": \" + var[\"Label\"]\n fig.canvas.manager.set_window_title(window_title)\n gs = gridspec.GridSpec(2, 5, height_ratios=[1, 0.5])\n ax_ts = fig.add_subplot(gs[0, :])\n title_str = site_name + \": \" + sdt.strftime(\"%Y-%m-%d\") + \" to \"\n title_str += edt.strftime(\"%Y-%m-%d\")\n ax_ts.set_title(title_str)\n ax_ts.plot(var[\"DateTime\"], var[\"Data\"], 'b.', label=var[\"Label\"])\n ax_ts.legend()\n for n in p:\n d = plot_explore_do_histogram(var, p[n][\"lwr\"], p[n][\"upr\"])\n lwrs = str(pfp_utils.round2significant(d[\"lwr\"], 4))\n uprs = str(pfp_utils.round2significant(d[\"upr\"], 4))\n x = numpy.arange(len(d[\"hist\"]))\n ax_hist = fig.add_subplot(gs[1, n])\n label = str(p[n][\"lwr\"]) + \",\" + str(p[n][\"upr\"])\n ax_hist.bar(x, d[\"hist\"])\n ax_hist.text(0.5, 0.9, label, transform=ax_hist.transAxes,\n horizontalalignment='center')\n ax_hist.set_xticks([x[1], x[-2]])\n ax_hist.set_xticklabels([lwrs, uprs])\n plt.draw()\n pfp_utils.mypause(0.5)\n return",
"def turns_hist(turns, seed, num_players, filename):\n plt.hist(turns, 300, (1, 300))\n plt.title(f\"Turns to Win in Chutes and Ladders: {num_players} player(s) \\n \"\n f\"{len(turns)} simulations, random seed = {seed}\")\n plt.xlabel(\"Turns to Land on Final Space\")\n plt.ylabel(\"Number of Games\")\n plt.savefig(filename)",
"def save_dataset_visual(lines, output_dir):\n\n # Ensure output directory exists\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n cols = 3\n rows = 3\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n # Random sample of images\n save_name = \"training_data_sample.png\"\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n for row in range(rows):\n for col in range(cols):\n idx = np.random.randint(0, len(lines))\n ax[row, col].imshow(ndimage.imread(lines[idx][0]))\n ax[row, col].set_title(\"Angle = \" + str(round(lines[idx][1], 3)))\n plt.savefig(output_dir + save_name, bbox_inches='tight')\n # Distribution of steering angles\n save_name = \"data_histogram.png\"\n fig_size = (5, 3) # Figure width and height, in inches\n num_bins = 100\n angles = np.array([line[1] for line in lines])\n hist, bins = np.histogram(angles, bins=num_bins)\n fig = plt.figure(figsize=fig_size)\n plt.bar(bins[:-1], hist)\n plt.xlabel(\"Steering Angle\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Distribution of Steering Angles in Training Data\")\n plt.savefig(output_dir + save_name, bbox_inches='tight')",
"def hist_plot(f_yld,Ahist,Bhist):\n EA=[]; SA=[]; EB=[]; SB=[]\n if (len(Ahist)!=len(Bhist)):\n raise IOError, 'Unexpected unbalanced step size'\n\n sigma_A=[]; sigma_B=[]\n for i in xrange(len(Ahist)):\n A = Ahist[i]; B = Bhist[i]\n ea = A.H.eps; sa = A.H.sig\n eb = B.H.eps; sb = B.H.sig\n\n sig_A = A.stress; sig_B = B.stress\n sigma_A.append(sig_A); sigma_B.append(sig_B)\n\n EA.append(ea); EB.append(eb)\n SA.append(sa); SB.append(sb)\n\n EA=np.array(EA); EB=np.array(EB)\n SA=np.array(SA); SB=np.array(SB)\n S6A=np.array(sigma_A); S6B=np.array(sigma_B)\n\n import matplotlib.pyplot as plt\n fig=plt.figure(figsize=(10,3.5));\n ax1=fig.add_subplot(131)\n ax2=fig.add_subplot(132)\n ax3=fig.add_subplot(133)\n\n ax1.plot(EA,SA,label='A',ls='-',zorder=99)\n ax1.plot(EB,SB,label='B',ls='-',zorder=100,alpha=0.4)\n\n ## plot yield locus\n pi = np.pi; sin=np.sin; cos=np.cos\n th = np.linspace(-pi,pi)\n x=cos(th);y=sin(th)\n z=np.zeros(len(th))\n s=np.array([x,y,z,z,z,z]).T\n print s.shape\n X=[]; Y=[]\n for i in xrange(len(s)):\n ys, phi, dphi, d2phi = vm(s[i])\n X.append(ys[0])\n Y.append(ys[1])\n\n X=np.array(X)\n Y=np.array(Y)\n ax2.plot(X,Y,label='Yield locus')\n ## initial location of region A stress state\n ax2.plot(S6A[0][0],S6A[0][1],'r.',mfc='None',\n mec='r',label='A initial')\n ## final location of region A stress state\n ax2.plot(S6A[-1][0],S6A[-1][1],'rx',mfc='None',\n mec='r',label='A final')\n ## initial location of region B stress state\n ax2.plot(S6B[0][0],S6B[0][1],'g.',mfc='None',\n mec='g',label='B initial')\n ## final location of region B stress state\n ax2.plot(S6B[-1][0],S6B[-1][1],'gx',label='B final')\n\n A1=X*SA[-1];A2=Y*SA[-1]\n B1=X*SB[-1];B2=Y*SB[-1]\n ax3.plot(A1,A2,'-',label='Final Yield locus (A)')\n ax3.plot(B1,B2,'-',label='Final Yield locus (B)')\n # print 'A1'\n # print(A1)\n # print 'B1'\n # print(B1)\n ## initial location of region A stress state\n ax3.plot(S6A[0][0]*SA[0],S6A[0][1]*SA[0],\n 'r.',mfc='None',mec='r',label='A initial')\n ## final location of region A stress state\n ax3.plot(S6A[-1][0]*SA[-1],S6A[-1][1]*SA[-1],\n 'rx',mfc='None',mec='r',label='A final')\n ## initial location of region B stress state\n ax3.plot(S6B[0][0]*SB[0],S6B[0][1]*SB[0],\n 'g.',label='B initial')\n ## final location of region B stress state\n ax3.plot(S6B[-1][0]*SB[-1],S6B[-1][1]*SB[-1],\n 'gx',label='B final')\n ax2.legend();ax3.legend()\n\n fn='hist_plot.pdf'\n fig.tight_layout()\n fig.savefig(fn,bbox_inches='tight')\n print '%s has been saved'%fn",
"def bench_plotter(self):\n\n # plot random as histogram, upper en lower bound as a red line\n minima = []\n for i in range(1, 4):\n cost_list = []\n with open(f\"../output_runs/text_info_random{i}_10k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n counter = 0\n for number in text:\n counter += 1\n if number is not \"\":\n cost_list.append(int(number))\n if counter == 1000:\n break\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random:\", minim, maxim)\n plt.axvline(x=53188, color='r')\n plt.axvline(x=103030, color=\"r\")\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Random walk\")\n\n # plot histogram of priority and hillclimber\n cost_list = []\n with open(f\"../output_runs/text_info_prior_hill{i}_\\\n 1k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"prior hill:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Priority + Hill\")\n\n # plot histogram of simulated annealing\n cost_list = []\n with open(f\"../output_runs/simulated_annealing{i}_1000.txt\",\n \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+anneal:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Random + sim anneal\")\n\n # plot histogram of random plus hillclimber\n cost_list = []\n with open(f\"../output_runs/random_hill{i}_1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+hill:\", minim, maxim)\n plt.hist(cost_list, bins=100, alpha=0.5,\n label=f\"Random + Hillclimber\")\n\n # plot histogram of kmeans plus hillclimber\n cost_list = []\n with open(f\"../output_runs/text_k-means_hill{i}_\\\n 1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Kmean and hill {i}\")\n totalmin = min(minima)\n plt.axvline(x=totalmin, color=\"g\")\n plt.title(f\"4 algorithms Wijk {i}, lowest cost: {totalmin}\")\n plt.xlabel(\"Cost\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.show()",
"def plot_loss(i, trainHolder, trainLoss, validHolder, validLoss, name, checkpoint_hist, prog_dir):\n \n plt.figure()\n plt.scatter(adjustForMinibatch(trainHolder), trainLoss, label='train') \n plt.scatter(adjustForMinibatch(validHolder), validLoss, label='validation') \n addCheckpoints(checkpoint_hist)\n plt.xlabel('number of times weights being updated')\n plt.ylabel(name)\n plt.legend()\n plt.savefig(os.path.join(prog_dir, name+str(i)+'.png'))\n plt.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot and save histograms from predicted steerings and real steerings. Arguments
|
def make_and_save_histogramsY(pred_steerings, real_steerings,
img_name = "histogramsY.png"):
pred_steerings = np.array(pred_steerings)
real_steerings = np.array(real_steerings)
max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))
min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))
bins = np.linspace(min_h, max_h, num=50)
plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')
plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')
#plt.title('Steering angle')
plt.legend(fontsize=10)
plt.savefig(img_name, bbox_inches='tight')
|
[
"def make_and_save_histogramsX(pred_steerings, real_steerings,\n img_name = \"histogramsX.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')",
"def plot_prediction_histograms(dataset, model1, model2, model3=[]):",
"def plot_hist_snfit_meta(self): \n \n self.read_meta()\n self.read_snfit_results()\n\n \n self.diff_x0 = []\n self.diff_x0_err = []\n self.diff_x1 = []\n self.diff_x1_err = [] \n self.diff_c = []\n self.diff_c_err = [] \n self.diff_mb = []\n self.diff_mb_err = [] \n self.diff_cov_x0_x1 = []\n self.diff_cov_x0_c = []\n self.diff_cov_x1_c = []\n self.diff_cov_mb_x1 = []\n self.diff_cov_mb_c = []\n\n for i in range (len(self.sn_name)):\n for j in range (len(self.meta_sn_name_list)):\n if self.sn_name[i] == self.meta_sn_name_list[j]:\n if np.abs(self.mb[i] - self.meta_mb[j]) < 0.0001:\n self.diff_x0.append(self.x0[i] - self.meta_x0[j])\n self.diff_x0_err.append(self.x0_err[i] - self.meta_x0_err[j])\n self.diff_x1.append(self.x1[i] - self.meta_x1[j])\n self.diff_x1_err.append(self.x1_err[i] - self.meta_x1_err[j]) \n self.diff_c.append(self.c[i] - self.meta_c[j])\n self.diff_c_err.append(self.c_err[i] - self.meta_c_err[j]) \n self.diff_mb.append(self.mb[i] - self.meta_mb[j])\n self.diff_mb_err.append(self.mb_err[i] - self.meta_mb_err[j])\n# self.diff_cov_x0_x1.append()\n# self.diff_cov_x0_c.append()\n# self.diff_cov_x1_c.append()\n# self.diff_cov_mb_x1.append()\n# self.diff_cov_mb_c.append()\n else:\n print self.x1[i] - self.meta_x1[j], self.sn_name[i],self.meta_sn_name_list[j], self.x1[i], self.meta_x1[j]\n\n\n fig = plt.figure(figsize=(8.,8.)) \n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0,25,label='$\\Delta$ X0')\n ax0_2.hist(self.diff_x0_err,25,label='$\\Delta$ X0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x0_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1,25,label='$\\Delta$ X1')\n ax0_2.hist(self.diff_x1_err,25,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x1_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c,25,label='$\\Delta$ Color')\n ax0_2.hist(self.diff_c_err,25,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/color_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb,50,label='$\\Delta$ mb')\n ax0_2.hist(self.diff_mb_err,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/mb_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def graphic(self,final):\r\n\t\t\r\n\t\t\r\n\t\t#Creating an adeguate linspace\r\n\t\tbins \t\t= np.linspace(0,self.Ndeputies,int(self.Ndeputies/2))\r\n\t\t#opening the real assigned seats file \r\n\t\treal \t\t= [line.strip() for line in open('Elections/Real_Election_for_Confrontation.txt')]\r\n\r\n\t\tif len(real) == 0:\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tfor i in self.Parties:\r\n\t\t\t\t#Creating the single histogram\r\n\t\t\t\tplt.hist(final[i],bins,alpha = 0.5,label = i+'_sim') \r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\telif len(real) !=0:\r\n\t\t\t\r\n\t\t\treal = {real[0].split('\\t')[i]:float(real[1].split('\\t')[i]) for i in range(len(real[0].split('\\t')))}\r\n\t\t\t\r\n\t\t\tif len(real) != len(final):\r\n\t\t\t\traise ValueError ('There is not the same number of both real ad simulated parties!')\r\n\t\t\t\r\n\t\t\tif final.keys() != real.keys():\r\n\t\t\t\traise ValueError ('The simulated Parties and the real parties seem to have different names!')\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tfor i in self.Parties:\r\n\t\t\t\tplt.hist(final[i],bins,alpha = 0.5,label = i+'_sim')\r\n\t\t\t\t#creating an histogram with both real and results data displayed\r\n\t\t\t\tplt.hist(real[i],bins,alpha = 0.5,label = i + 'real') \r\n\t\t\t\r\n\t\t\t\r\n\t\tplt.xlabel('Seats')\r\n\t\tplt.legend(loc='upper right')\r\n\t\tplt.title(self.election)\r\n\t\t#saving the histogram\r\n\t\tplt.savefig(\"Graphic/Histogram-Confrontation_for_\"+self.election+\".png\") \r\n\t\tplt.close()\r\n\t\r\n\t\tfor i in self.Parties:\r\n\t\t\tif self.Results[i]<0.05:\r\n\t\t\t\tcontinue\r\n\t\t\t#Plotting the histogram with all possible results throughout the simulation.\r\n\t\t\tplt.hist(self.allResults[i],bins,alpha = 0.5,label = i) \r\n\t\t\t\r\n\r\n\t\tplt.xlabel('Seats')\r\n\t\tplt.legend(loc='upper right')\r\n\t\t#Saving the histogram\r\n\t\tplt.savefig(\"Graphic/Numbers of possible results_for_\"+self.election+'.png')\r\n\t\tplt.close()",
"def plot_data_hist(self, show=False):\n\t\tfig, ax = plt.subplots(2, 1 ,figsize= (15,10))\n\n\t\tax[0].hist(self.data['x_central'][np.random.randint(self.n_train*self.n_s)]\n\t\t\t,label='training data',alpha=0.5)\n\n\t\tax[0].legend(frameon = False)\n\t\tax[0].set_xlabel(\"Data amplitude\")\n\t\tax[0].set_ylabel('Counts')\n\t\tax[0].set_title('%i data points'%self.input_shape[0])\n\t\tif not self.rescaled: ax[0].set_xlim(0,6)\n\n\t\tax[1].hist(self.data['x_central_test'][np.random.randint(self.n_s)]\n\t\t\t,label='test data',alpha=0.5)\n\n\t\tax[1].legend(frameon = False)\n\t\tax[0].set_title('%i data points'%self.input_shape[0])\n\t\tax[1].set_xlabel(\"Data amplitude\")\n\t\tax[1].set_ylabel('Counts')\n\t\tif not self.rescaled: ax[1].set_xlim(0,6)\n\t\t\n\t\tplt.savefig(f'{self.figuredir}data_visualization_hist_{self.modelversion}.png')\n\t\tif show: plt.show()\n\t\tplt.close()",
"def save_histograms(self):\n if self.rank == 0:\n output_switch = \"%sSwitch_%d.hst\" % (settings.DIRS['calc'], self._stepT)\n output_updown = \"%sUpDown_%d.hst\" % (settings.DIRS['calc'], self._stepT)\n output_round_trip = \"%sRoundTrip_%d.hst\" % (settings.DIRS['calc'], self._stepT)\n\n output_switch = open(output_switch, \"w+\")\n output_updown_f = open(output_updown, \"w+\")\n output_round_trip_f = open(output_round_trip, \"w+\")\n output_files = [output_switch, output_updown_f, output_round_trip_f]\n\n tmpTrip = 0.0\n\n for i in range(self.size):\n if i < self.size-1:\n l = \"%d;%f;%f\\n\" % (i, self.temp_list[i], self.switch_histogram[i]/float(self.switch_count))\n output_switch.writelines(l)\n\n l = \"%d;%f;%f\\n\" % (i, self.temp_list[i], self.up_histogram[i]/float(self.up_histogram[i] + self.down_histogram[i]))\n output_updown_f.writelines(l)\n\n if self.round_trip[i] != 0:\n tmpTrip = self.round_trip[i] / 2.0\n l = \"%d;%f;%f;%f\\n\" % (i, self.temp_list[i], tmpTrip, (settings.MC_MODULE.stop_step - settings.MC_MODULE.start_step)/float(tmpTrip))\n output_round_trip_f.writelines(l)\n else:\n l = \"%d;%f;0.00;0.00\\n\" % (i, self.temp_list[i])\n output_round_trip_f.writelines(l)\n\n for output in output_files:\n output.close()\n\n ### save histograms of temperature\n output_temp_histogram = \"%sTemp_%d_%d.hst\" % (settings.DIRS['calc'], self.rank, self._stepT)\n output_temp_histogram_f = open(output_temp_histogram, \"a+\")\n for i in range(self.size):\n l = \"%d;%d;%f;%f\\n\" % (self.rank, i, self.temp_list[i], self.temperature_histogram[i]/float(self.switch_count))\n output_temp_histogram_f.writelines(\"\\n\")\n output_temp_histogram_f.close()",
"def postplot(num, M, V, L, os_probability, streamflow, av_multiplier, Q_futures , Nsize, low_percentile, case_to_derive):\n \n\n# Figure 1: Fit KOSUGI MODEL to historical data\n# Figure 2: derived FDCs\n\n\n# Derive streamflow statistics\n Q_m, Q_v, Q_low = streamflow_statistics(Q_futures, low_percentile, num, case_to_derive)\n \n# Figure 3: plot sampled vs calculated mean/median values\n plt.plot(Q_m, 'ro', label=\"Derived\")\n plt.plot(M, 'b*', label=\"Sampled\")\n plt.legend(loc=\"upper right\")\n plt.grid() \n plt.xlabel(\"Futures\")\n plt.ylabel(\"M\")\n plt.savefig('PostProcessor_plots' + '/Fig3-M.png')\n plt.clf()\n\n# Figure 4: plot sampled vs calculated Std/CV values\n plt.plot(Q_v, 'ro', label=\"Derived\")\n plt.plot(V, 'b*', label=\"Sampled\")\n plt.legend(loc=\"upper right\")\n plt.grid()\n plt.xlabel(\"Futures\")\n plt.ylabel(\"V\")\n plt.savefig('PostProcessor_plots' + '/Fig4-V.png') \n plt.clf()\n\n\n# Figure 5: plot sampled vs calculated low percentile values\n plt.plot(Q_low, 'ro', label=\"Derived\")\n plt.plot(L, 'b*', label=\"Sampled\")\n plt.legend(loc=\"upper right\")\n plt.grid() \n plt.xlabel(\"Futures\")\n plt.ylabel(\"Low Percentile [$m^3/s$]\")\n plt.savefig('PostProcessor_plots' + '/Fig5-Low.png') \n plt.clf()\n\n\n#Figure 6: Random 3 years of observed stream flow vs derived streamflow\n plt.figure(figsize=(11, 6))\n idplot = np.where((av_multiplier[:,1] > 1.75) & (av_multiplier[:,0] < 0.75) & (0.5 < av_multiplier[:,0]) ) # find the scenario to plot\n idplot = np.asarray(idplot) # converting tuple into int array \n if np.size(idplot) == 0:\n idplot = np.where(av_multiplier[:,1] >= 1.75)\n idplot = np.asarray(idplot) # converting tuple into int array \n idplot = np.min(idplot) # get on of the indices if there is more than one\n \n qplot = Q_futures[:,idplot] # select the future \n qplot = np.reshape(qplot, (len(os_probability),1)) \n #plt.plot(streamflow[8765:-1],'r')\n #plt.plot(qplot[8765:-1],c='0.35')\n plt.plot(streamflow[8765:-1],'r', label=\"Observed Streamflow\")\n plt.plot(qplot[8765:-1], label=\"Derived Streamflow\",c='0.35')\n plt.legend(loc=\"upper right\")\n plt.xlabel(\"Time [Days]\")\n plt.ylabel(\"Discharge [$m^3/s$]\")\n plt.grid() \n plt.xlim(0, len(qplot[8765:-1])+10)\n plt.legend(bbox_to_anchor=(1.05, 1))\n plt.tight_layout()\n plt.savefig('PostProcessor_plots' + '/Fig6-ObservedvsDerived_discharge.png') \n plt.clf()",
"def plot_train_hist(y_vals, checked_iters, fig_path, ylabel):\n x_vals = np.array(checked_iters)\n y_vals = np.vstack(y_vals)\n plt.plot(x_vals, y_vals, '-', linewidth=2)\n plt.xlabel('Training iteration')\n plt.ylabel(ylabel)\n plt.title('Evaluated every: {:d} iterations'.format(\n checked_iters[1]-checked_iters[0]))\n plt.tight_layout()\n ylabel='_'.join(ylabel.lower().split())\n fig_file = os.path.join(fig_path, '{:s}_history.eps'.format(ylabel))\n plt.savefig(fig_file)\n plt.savefig(os.path.join(fig_path, '{:s}_history.png'.format(ylabel)))\n plt.clf()\n print('Wrote: {:s}'.format(fig_file))",
"def spike_hist():\n\n import matplotlib.pyplot as plt\n\n plt.figure()\n for i, (_, spikes) in enumerate(utils.get_data_set('train')):\n x = np.reshape(spikes, (-1,))\n x = x[np.isnan(x) == False]\n print(x.shape)\n\n plt.subplot(5, 2, i + 1)\n plt.hist(np.cast[np.int32](x), range(6), log=True)\n\n plt.show()",
"def plot_predictions_histogram(df):\n\n fig = go.Figure()\n fig.add_trace(go.Histogram(x=df[\"preds\"], name=\"preds\"))\n fig.add_trace(go.Histogram(x=df[\"truth\"], name=\"truth\"))\n\n # Overlay both histograms\n fig.update_layout(barmode=\"overlay\")\n # Reduce opacity to see both histograms\n fig.update_traces(opacity=0.5)\n fig.update_layout(xaxis_title=r\"HOMO-LUMO\", yaxis_title=r\"count.\")\n wandb.log({f\"Predictions Hist\": fig})",
"def savehists(sag):\n for i in sag['train'].columns.values:\n sag.dohistograms(sag['train'], i)\n\n for i in sag['train'].columns.values:\n #plt.close()\n sag.dotimeseries(sag['train'], i)\n #plt.close('all')",
"def plot_residuals(turnstile_weather, predictions):\n plt.figure()\n (turnstile_weather['ENTRIESn_hourly'] - predictions).hist()\n return plt",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_explore_histograms(ds, labels):\n # set up a dictionary with the percentiles of the histograms to be plotted\n p = {0: {\"lwr\": 0.0, \"upr\": 100.0},\n 1: {\"lwr\": 0.1, \"upr\": 99.9},\n 2: {\"lwr\": 0.5, \"upr\": 99.5},\n 3: {\"lwr\": 1.0, \"upr\": 99.0},\n 4: {\"lwr\": 2.5, \"upr\": 97.5}}\n site_name = ds.root[\"Attributes\"][\"site_name\"]\n plt.ion()\n for label in labels:\n var = pfp_utils.GetVariable(ds, label)\n sdt = var[\"DateTime\"][0]\n edt = var[\"DateTime\"][-1]\n fig = plt.figure(figsize=(11, 8), tight_layout=True)\n window_title = site_name + \": \" + var[\"Label\"]\n fig.canvas.manager.set_window_title(window_title)\n gs = gridspec.GridSpec(2, 5, height_ratios=[1, 0.5])\n ax_ts = fig.add_subplot(gs[0, :])\n title_str = site_name + \": \" + sdt.strftime(\"%Y-%m-%d\") + \" to \"\n title_str += edt.strftime(\"%Y-%m-%d\")\n ax_ts.set_title(title_str)\n ax_ts.plot(var[\"DateTime\"], var[\"Data\"], 'b.', label=var[\"Label\"])\n ax_ts.legend()\n for n in p:\n d = plot_explore_do_histogram(var, p[n][\"lwr\"], p[n][\"upr\"])\n lwrs = str(pfp_utils.round2significant(d[\"lwr\"], 4))\n uprs = str(pfp_utils.round2significant(d[\"upr\"], 4))\n x = numpy.arange(len(d[\"hist\"]))\n ax_hist = fig.add_subplot(gs[1, n])\n label = str(p[n][\"lwr\"]) + \",\" + str(p[n][\"upr\"])\n ax_hist.bar(x, d[\"hist\"])\n ax_hist.text(0.5, 0.9, label, transform=ax_hist.transAxes,\n horizontalalignment='center')\n ax_hist.set_xticks([x[1], x[-2]])\n ax_hist.set_xticklabels([lwrs, uprs])\n plt.draw()\n pfp_utils.mypause(0.5)\n return",
"def turns_hist(turns, seed, num_players, filename):\n plt.hist(turns, 300, (1, 300))\n plt.title(f\"Turns to Win in Chutes and Ladders: {num_players} player(s) \\n \"\n f\"{len(turns)} simulations, random seed = {seed}\")\n plt.xlabel(\"Turns to Land on Final Space\")\n plt.ylabel(\"Number of Games\")\n plt.savefig(filename)",
"def save_dataset_visual(lines, output_dir):\n\n # Ensure output directory exists\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n cols = 3\n rows = 3\n fig_size = (7 * cols, 4 * rows) # Figure width and height, in inches\n # Random sample of images\n save_name = \"training_data_sample.png\"\n fig, ax = plt.subplots(rows, cols, figsize=fig_size)\n for row in range(rows):\n for col in range(cols):\n idx = np.random.randint(0, len(lines))\n ax[row, col].imshow(ndimage.imread(lines[idx][0]))\n ax[row, col].set_title(\"Angle = \" + str(round(lines[idx][1], 3)))\n plt.savefig(output_dir + save_name, bbox_inches='tight')\n # Distribution of steering angles\n save_name = \"data_histogram.png\"\n fig_size = (5, 3) # Figure width and height, in inches\n num_bins = 100\n angles = np.array([line[1] for line in lines])\n hist, bins = np.histogram(angles, bins=num_bins)\n fig = plt.figure(figsize=fig_size)\n plt.bar(bins[:-1], hist)\n plt.xlabel(\"Steering Angle\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Distribution of Steering Angles in Training Data\")\n plt.savefig(output_dir + save_name, bbox_inches='tight')",
"def hist_plot(f_yld,Ahist,Bhist):\n EA=[]; SA=[]; EB=[]; SB=[]\n if (len(Ahist)!=len(Bhist)):\n raise IOError, 'Unexpected unbalanced step size'\n\n sigma_A=[]; sigma_B=[]\n for i in xrange(len(Ahist)):\n A = Ahist[i]; B = Bhist[i]\n ea = A.H.eps; sa = A.H.sig\n eb = B.H.eps; sb = B.H.sig\n\n sig_A = A.stress; sig_B = B.stress\n sigma_A.append(sig_A); sigma_B.append(sig_B)\n\n EA.append(ea); EB.append(eb)\n SA.append(sa); SB.append(sb)\n\n EA=np.array(EA); EB=np.array(EB)\n SA=np.array(SA); SB=np.array(SB)\n S6A=np.array(sigma_A); S6B=np.array(sigma_B)\n\n import matplotlib.pyplot as plt\n fig=plt.figure(figsize=(10,3.5));\n ax1=fig.add_subplot(131)\n ax2=fig.add_subplot(132)\n ax3=fig.add_subplot(133)\n\n ax1.plot(EA,SA,label='A',ls='-',zorder=99)\n ax1.plot(EB,SB,label='B',ls='-',zorder=100,alpha=0.4)\n\n ## plot yield locus\n pi = np.pi; sin=np.sin; cos=np.cos\n th = np.linspace(-pi,pi)\n x=cos(th);y=sin(th)\n z=np.zeros(len(th))\n s=np.array([x,y,z,z,z,z]).T\n print s.shape\n X=[]; Y=[]\n for i in xrange(len(s)):\n ys, phi, dphi, d2phi = vm(s[i])\n X.append(ys[0])\n Y.append(ys[1])\n\n X=np.array(X)\n Y=np.array(Y)\n ax2.plot(X,Y,label='Yield locus')\n ## initial location of region A stress state\n ax2.plot(S6A[0][0],S6A[0][1],'r.',mfc='None',\n mec='r',label='A initial')\n ## final location of region A stress state\n ax2.plot(S6A[-1][0],S6A[-1][1],'rx',mfc='None',\n mec='r',label='A final')\n ## initial location of region B stress state\n ax2.plot(S6B[0][0],S6B[0][1],'g.',mfc='None',\n mec='g',label='B initial')\n ## final location of region B stress state\n ax2.plot(S6B[-1][0],S6B[-1][1],'gx',label='B final')\n\n A1=X*SA[-1];A2=Y*SA[-1]\n B1=X*SB[-1];B2=Y*SB[-1]\n ax3.plot(A1,A2,'-',label='Final Yield locus (A)')\n ax3.plot(B1,B2,'-',label='Final Yield locus (B)')\n # print 'A1'\n # print(A1)\n # print 'B1'\n # print(B1)\n ## initial location of region A stress state\n ax3.plot(S6A[0][0]*SA[0],S6A[0][1]*SA[0],\n 'r.',mfc='None',mec='r',label='A initial')\n ## final location of region A stress state\n ax3.plot(S6A[-1][0]*SA[-1],S6A[-1][1]*SA[-1],\n 'rx',mfc='None',mec='r',label='A final')\n ## initial location of region B stress state\n ax3.plot(S6B[0][0]*SB[0],S6B[0][1]*SB[0],\n 'g.',label='B initial')\n ## final location of region B stress state\n ax3.plot(S6B[-1][0]*SB[-1],S6B[-1][1]*SB[-1],\n 'gx',label='B final')\n ax2.legend();ax3.legend()\n\n fn='hist_plot.pdf'\n fig.tight_layout()\n fig.savefig(fn,bbox_inches='tight')\n print '%s has been saved'%fn",
"def bench_plotter(self):\n\n # plot random as histogram, upper en lower bound as a red line\n minima = []\n for i in range(1, 4):\n cost_list = []\n with open(f\"../output_runs/text_info_random{i}_10k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n counter = 0\n for number in text:\n counter += 1\n if number is not \"\":\n cost_list.append(int(number))\n if counter == 1000:\n break\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random:\", minim, maxim)\n plt.axvline(x=53188, color='r')\n plt.axvline(x=103030, color=\"r\")\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Random walk\")\n\n # plot histogram of priority and hillclimber\n cost_list = []\n with open(f\"../output_runs/text_info_prior_hill{i}_\\\n 1k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"prior hill:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Priority + Hill\")\n\n # plot histogram of simulated annealing\n cost_list = []\n with open(f\"../output_runs/simulated_annealing{i}_1000.txt\",\n \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+anneal:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Random + sim anneal\")\n\n # plot histogram of random plus hillclimber\n cost_list = []\n with open(f\"../output_runs/random_hill{i}_1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+hill:\", minim, maxim)\n plt.hist(cost_list, bins=100, alpha=0.5,\n label=f\"Random + Hillclimber\")\n\n # plot histogram of kmeans plus hillclimber\n cost_list = []\n with open(f\"../output_runs/text_k-means_hill{i}_\\\n 1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Kmean and hill {i}\")\n totalmin = min(minima)\n plt.axvline(x=totalmin, color=\"g\")\n plt.title(f\"4 algorithms Wijk {i}, lowest cost: {totalmin}\")\n plt.xlabel(\"Cost\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.show()",
"def plot_loss(i, trainHolder, trainLoss, validHolder, validLoss, name, checkpoint_hist, prog_dir):\n \n plt.figure()\n plt.scatter(adjustForMinibatch(trainHolder), trainLoss, label='train') \n plt.scatter(adjustForMinibatch(validHolder), validLoss, label='validation') \n addCheckpoints(checkpoint_hist)\n plt.xlabel('number of times weights being updated')\n plt.ylabel(name)\n plt.legend()\n plt.savefig(os.path.join(prog_dir, name+str(i)+'.png'))\n plt.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot and save confusion matrix computed from predicted and real labels. Arguments
|
def plot_confusion_matrix(real_labels, pred_prob, classes,
normalize=False,
img_name="confusion.png"):
real_labels = np.array(real_labels)
# Binarize predicted probabilities
pred_prob = np.array(pred_prob)
pred_labels = np.zeros_like(pred_prob)
pred_labels[pred_prob >= 0.5] = 1
cm = confusion_matrix(real_labels, pred_labels)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
#plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes, rotation=90)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(img_name)
|
[
"def confusion_matrix_plot(y_true, y_pred) -> None:\n from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix\n\n cm = confusion_matrix(y_true, y_pred)\n plot = ConfusionMatrixDisplay(confusion_matrix=cm).plot()\n plot.ax_.set_title(\"Confusion Matrix\")",
"def plot_print_confusion_matrix(y_true, y_pred, classes,dataset,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print_cm(cm,classes,ResultsFolder)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fileToSaveConfusionMatrix=os.path.join(ResultsFolder,dataset+'_ConfusionMatrix.png')\n plt.savefig(fileToSaveConfusionMatrix)\n print(\"[INFO] Confusion matrix saved to {}\".format(fileToSaveConfusionMatrix))\n\n plt.show()\n\n\n return ax",
"def confusion_matrix(y_true, y_pred):\n skplt.plot_confusion_matrix(y_true, y_pred, normalize=True)\n plt.show()",
"def plot_confusion_matrix(y_val, y_pred, classes, model_name=None):\n \n print('\\n clasification report:\\n', classification_report(y_val, y_pred, target_names=classes))\n print(\"-----------------------------------------------\")\n\n cnf_matrix = confusion_matrix(y_val, y_pred)\n \n # Create the basic matrix\n plt.imshow(cnf_matrix, cmap=plt.cm.Purples) \n\n # Add title and axis labels\n plt.title(f'{model_name} Confusion Matrix')\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n # Add appropriate axis scales\n class_names = ['',classes[0],'',classes[1],'']# set(y) # Get class labels to add to matrix\n tick_marks = [-0.5,0,0.5,1,1.5]\n \n # Add appropriate axis scales\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Add labels to each cell\n thresh = cnf_matrix.max() / 2. # Used for text coloring below\n # Here we iterate through the confusion matrix and append labels to our visualization \n for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])):\n plt.text(j, i, cnf_matrix[i, j],\n horizontalalignment='center',\n color='white' if cnf_matrix[i, j] > thresh else 'black')\n \n # Add a legend\n plt.colorbar()\n plt.show()",
"def plot_matrix(y_true, y_pred, classes, title=None, cmap=plt.cm.Blues):\n\n\t# Compute confusion matrix\n\tcm = confusion_matrix(y_true, y_pred)\n\n\t# Only use the labels that appear in the data\n\tclasses = classes[unique_labels(y_true, y_pred)]\n\n\tfig, ax = plt.subplots()\n\tim = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n\tax.figure.colorbar(im, ax=ax)\n\t# We want to show all ticks...\n\tax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Cluster label')\n\t \n\t# Rotate the tick labels and set their alignment.\n\tplt.setp(ax.get_xticklabels(), fontsize=6, rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\tplt.setp(ax.get_yticklabels(), fontsize=6)\n\n\t# Loop over data dimensions and create text annotations.\n\tthresh = cm.max() / 2.\n\tfor i in range(cm.shape[0]):\n\t\tfor j in range(cm.shape[1]):\n\t\t\tif cm[i,j] > 0:\n\t\t\t\tax.text(j, i, cm[i, j], fontsize=6, ha=\"center\", va=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\tfig.tight_layout()\n\tplt.show()\n\treturn ax",
"def plot_cmatrix_wrapper(y_true, y_pred, classes, **kwargs):\n cm = confusion_matrix(y_true, y_pred)\n plot_confusion_matrix(cm, classes, **kwargs)",
"def plot_conf_matrix(y_true, y_preds):\n conf_matrix = confusion_matrix(y_true, y_preds)\n ax = sns.heatmap(conf_matrix, annot=True, fmt='g')\n ax.invert_xaxis()\n ax.invert_yaxis()\n plt.ylabel('Actual')\n plt.xlabel('Predicted')\n plt.title('Confusion Matrix', fontsize=16)\n plt.show()",
"def visual_cm(true_y, pred_y, labels = None):\n # visualizing the confusion matrix\n\n # setting labels\n lbls = labels\n \n\n # declaring a confusion matrix object\n cm = confusion_matrix(y_true = true_y,\n y_pred = pred_y)\n\n\n # heatmap\n sns.heatmap(cm,\n annot = True,\n xticklabels = lbls,\n yticklabels = lbls,\n cmap = 'Blues',\n fmt = 'g')\n\n\n plt.xlabel('Predicted')\n plt.ylabel('Actual')\n plt.title('Confusion Matrix of the Classifier')\n plt.show()",
"def plot_classification(X,\n y,\n y_true,\n y_pred,\n metrics=(\"acc\", \"sen\", \"spe\"),\n fig_size=(12, 5),\n fig_show=True,\n save_as=\"figure.pdf\",\n x_label=\"x\",\n y_label=\"y\",\n **plot_kwargs):\n\n # Convert the input data to pd.Series\n if not isinstance(X, pd.Series):\n X = pd.Series(X.reshape((len(X), )))\n if not isinstance(y, pd.Series):\n y = pd.Series(y.reshape((len(y), )))\n if not isinstance(y_true, pd.Series):\n y_true = pd.Series(y_true.reshape((len(y_true), )))\n if not isinstance(y_pred, pd.Series):\n y_pred = pd.Series(y_pred.reshape((len(y_pred), )))\n\n # Compute the classification metrics\n computed_metrics = [(metric, round(classification_metric(metric, y_true, y_pred), 2)) for metric in metrics]\n\n # Prepare the temporary DataFrame\n df = pd.DataFrame({\"X\": X, \"y\": y, \"y_true\": y_true, \"y_pred\": y_pred, \"matches\": y_true == y_pred})\n\n # Create the figure\n fig = plt.figure(figsize=fig_size)\n\n # Plot the true labels scatter-plot\n ax = fig.add_subplot(1, 2, 1)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_true\", data=df, **plot_kwargs)\n\n ax.set_title(\"Ground truth\")\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.tight_layout()\n\n # Plot the predicted labels scatter-plot\n ax = fig.add_subplot(1, 2, 2)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_pred\", size=\"matches\", data=df, **plot_kwargs)\n\n ax.set_title(\"Predicted ({})\".format(\" \".join([\"{} = {},\".format(m, v) for m, v in computed_metrics])))\n ax.set_xlabel(x_label)\n ax.set_ylabel(\"\")\n plt.tight_layout()\n\n # Store the figure\n if save_as:\n plt.savefig(save_as)\n\n # Show the graph (if enabled)\n if fig_show:\n plt.show()\n else:\n plt.close()",
"def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(10, 10))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n \n\n # Normalize the confusion matrix.\n #cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n #threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n #print(cm[i, j])\n color = \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure",
"def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()",
"def plot_confusion_matrix(name, trained_predictor, X_test, y_test):\n\n fig, ax = plt.subplots()\n fig.tight_layout()\n cm = confusion_matrix(y_test, trained_predictor.predict(X_test), normalize=\"all\")\n ConfusionMatrixDisplay(cm, display_labels=[\"False\", \"True\"]).plot(\n ax=ax\n )\n plt.title(name)",
"def get_accuracy_and_plot_confusion(y_correct, y_pred, classes, plot=True, title='Confusion matrix'):\n if plot:\n cm = confusion_matrix(y_correct, y_pred)\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return accuracy_score(y_correct, y_pred)",
"def plot_confusion_matrix(cm, title, categories):\n\n plt.figure(figsize=(7.6,7.6))\n plt.imshow(cm, interpolation='none',cmap='Blues')\n for (i,j), z in np.ndenumerate(cm):\n plt.text(j, i, z, ha='center', va='center')\n plt.xlabel(\"prediction\")\n plt.ylabel(\"ground truth\")\n plt.title(title+' set')\n plt.gca().set_xticks(range(len(categories)))\n plt.gca().set_xticklabels(categories, rotation=45)\n plt.gca().set_yticks(range(len(categories)))\n plt.gca().set_yticklabels(categories)\n plt.gca().invert_yaxis()\n \n plt.tight_layout()\n plt.savefig('./2_Results/plots/confusion_matrix_'+title+'_set.png', dpi=100, format='png', trasparent=True)\n\n plt.close()",
"def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(8, 8))\n plt.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n cm = cm.numpy()\n # Normalize the confusion matrix.\n cm = np.around(cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n return figure",
"def Confusion_Matrix(predicted_labels: list, actual_labels: list):\n labels = set(actual_labels)\n\n predicted_labels = list(map(custom_round, predicted_labels))\n\n matrix = pd.DataFrame(index=labels, columns=labels)\n\n matrix = matrix.fillna(0)\n\n for i in range(len(actual_labels)):\n matrix[actual_labels[i]][predicted_labels[i]] += 1\n m = matrix.values\n\n plt.matshow(m, cmap=plt.cm.Blues)\n\n for i in range(2):\n for j in range(2):\n c = m[j, i]\n plt.text(i, j, str(c), va='center', ha='center')\n\n plt.show()",
"def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',saveas='cm', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n\n plt.figure() \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n foo_fig = plt.gcf() # 'get current figure'\n# foo_fig.savefig('confusion_matrix.eps', format='eps', dpi=1000) \n foo_fig.savefig(saveas, dpi=1000, bbox_inches='tight')\n plt.show()",
"def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(8, 8))\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Normalize the confusion matrix.\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return figure",
"def plot_confusion_matrix(cm, class_names):\n figure = plt.figure(figsize=(8, 8))\n ax = plt.gca()\n im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion matrix\")\n plt.colorbar(im, fraction=0.046, pad=0.04)\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n\n # Normalize the confusion matrix.\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n # Use white text if squares are dark; otherwise black.\n threshold = cm.max() / 2.\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n color = \"white\" if cm[i, j] > threshold else \"black\"\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=color)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n return figure"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Discover and add a Tasmota sensor.
|
async def async_discover_sensor(tasmota_entity, discovery_hash):
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
|
[
"async def async_add_binary_sensor(mac):\n if USB_MOTION_ID in api_stick.devices[mac].features:\n _LOGGER.debug(\"Add binary_sensors for %s\", mac)\n async_add_entities([USBBinarySensor(api_stick.devices[mac])])\n\n # Register services\n platform.async_register_entity_service(\n SERVICE_CONFIGURE_SCAN,\n {\n vol.Required(ATTR_SCAN_SENSITIVITY_MODE): vol.In(\n SCAN_SENSITIVITY_MODES\n ),\n vol.Required(ATTR_SCAN_RESET_TIMER): vol.All(\n vol.Coerce(int), vol.Range(min=1, max=240)\n ),\n vol.Required(ATTR_SCAN_DAYLIGHT_MODE): cv.boolean,\n },\n \"_service_configure_scan\",\n )\n platform.async_register_entity_service(\n SERVICE_CONFIGURE_BATTERY,\n {\n vol.Required(ATTR_SED_STAY_ACTIVE): vol.All(\n vol.Coerce(int), vol.Range(min=1, max=120)\n ),\n vol.Required(ATTR_SED_SLEEP_FOR): vol.All(\n vol.Coerce(int), vol.Range(min=10, max=60)\n ),\n vol.Required(ATTR_SED_MAINTENANCE_INTERVAL): vol.All(\n vol.Coerce(int), vol.Range(min=5, max=1440)\n ),\n vol.Required(ATTR_SED_CLOCK_SYNC): cv.boolean,\n vol.Required(ATTR_SED_CLOCK_INTERVAL): vol.All(\n vol.Coerce(int), vol.Range(min=60, max=10080)\n ),\n },\n \"_service_configure_battery_savings\",\n )",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n data = GoodServiceData()\n data.update()\n sensors = [\n MTASubwaySensor(line, data)\n for line in config.get(CONF_LINE)\n ]\n add_devices(sensors, True)",
"def test_oral_b_smartseries_7000(self):\n data_string = \"043e1e020100007173b66a1ba8120201050effdc0003210b0328041107373804b7\"\n data = bytes(bytearray.fromhex(data_string))\n\n # pylint: disable=unused-variable\n ble_parser = BleParser()\n sensor_msg, tracker_msg = ble_parser.parse_raw_data(data)\n print(sensor_msg)\n assert sensor_msg[\"firmware\"] == \"Oral-B\"\n assert sensor_msg[\"type\"] == \"SmartSeries 7000\"\n assert sensor_msg[\"mac\"] == \"A81B6AB67371\"\n assert sensor_msg[\"packet\"] == \"no packet id\"\n assert sensor_msg[\"data\"]\n assert sensor_msg[\"toothbrush\"] == 1\n assert sensor_msg[\"toothbrush state\"] == 'running'\n assert sensor_msg[\"pressure\"] == 'unknown pressure 40'\n assert sensor_msg[\"counter\"] == 1041\n assert sensor_msg[\"mode\"] == 'turbo'\n assert sensor_msg[\"sector\"] == 'sector 55'\n assert sensor_msg[\"sector timer\"] == 56\n assert sensor_msg[\"number of sectors\"] == 4\n assert sensor_msg[\"rssi\"] == -73",
"def flash_tasmota(self, flash_mode, serial_port):\n # Make sure device is tasmota\n if self.software != 'tasmota':\n print('{f_name} is {software}, not tasmota'.format(**self))\n return(False)\n if current_tasmota_version != get_tasmota_version():\n print('{RED}Error: Tasmota version mismatch expected: \"{expected}\", got \"{current}\"{NC}'.format(**colors, expected=current_tasmota_version, current= get_tasmota_version()))\n return(False)\n self.write_tasmota_config()\n\n correctPIO = os.path.join(espqdir, 'platformio_override.ini')\n tasmotaPIO = os.path.join(tasmota_dir, 'platformio_override.ini')\n if not os.path.exists(tasmotaPIO) or not cmp(correctPIO, tasmotaPIO):\n copyfile(correctPIO, tasmotaPIO)\n\n\n os.chdir(tasmota_dir)\n\n pio_call = 'platformio run -e tasmota-{flash_mode} -t upload'.format(flash_mode=flash_mode);\n\n # if we're flashing via wifi or serial port is specified to us,\n # specify it to pio\n if flash_mode == 'wifi' or serial_port:\n pio_call += ' --upload-port {port}'\n\n if flash_mode == 'wifi':\n self.flashing_notice(flash_mode, self.ip_addr)\n # If we don't know the IP address, ask device\n if not 'ip_addr' in self or not self.ip_addr:\n print('No IP address for this device in the config.'\n 'Querying device...')\n self.query_tas_status()\n if 'ip' in self.reported:\n print('{name} is online at {ip}'.format(name=self.f_name,\n ip=self.reported['ip']))\n self.ip_addr = self.reported['ip']\n else:\n print('{f_name} did not respond at {c_topic}. IP address '\n 'unavailable. Skipping device...'.format(**self))\n return(False)\n pio_call = pio_call.format(port=(self.ip_addr + '/u2'))\n elif flash_mode == 'serial':\n self.flashing_notice(flash_mode, serial_port)\n pio_call = pio_call.format(port=serial_port)\n print('{BLUE}{f_name}\\'s MQTT topic is '\n '{topic}{NC}'.format(**colors, **self))\n print(pio_call)\n flash_result = call(pio_call, shell=True)\n return(True if flash_result == 0 else False)",
"def flash_tasmota(self):\n # Make sure device is tasmota\n if self.software != 'tasmota':\n print('{f_name} is {software}, not tasmota'.format(**self))\n return(False)\n if current_tasmota_version != get_tasmota_version():\n print('{RED}Error: Tasmota version mismatch{NOCOLOR}'.format(**colors))\n return(False)\n self.write_tasmota_config()\n\n correctPIO = os.path.join(espqdir, 'platformio.ini')\n tasmotaPIO = os.path.join(tasmotadir, 'platformio.ini')\n if filecmp.cmp(correctPIO, tasmotaPIO) == False:\n shutil.copyfile(correctPIO, tasmotaPIO)\n\n os.chdir(tasmotadir)\n pio_call = 'platformio run -e {environment} -t upload --upload-port {port}'\n if self.flash_mode == 'wifi':\n pio_call = pio_call.format(environment='sonoff-wifi', port=(self.ip_addr + '/u2'))\n print(('{BLUE}Now flashing {module} {f_name} with {software} via '\n '{flash_mode} at {ip_addr}{NOCOLOR}'.format(**colors, **self)))\n elif self.flash_mode == 'serial':\n pio_call = pio_call.format(environment='sonoff-serial', port=self.serial_port)\n print(('{BLUE}Now flashing {module} {f_name} with {software} via '\n '{flash_mode} at {serial_port}{NOCOLOR}'.format(**colors, **self)))\n print('{BLUE}{f_name}\\'s MQTT topic is {base_topic}/{topic}{NOCOLOR}'.format(**colors, **self))\n print(pio_call)\n flash_result = call(pio_call, shell=True)\n os.chdir(espqdir)\n return(True if flash_result == 0 else False)",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host: str = config[CONF_HOST]\n port: int = config[CONF_PORT]\n name: str = config[CONF_NAME]\n url = f\"http://{host}:{port}/api/LiveData.xml\"\n\n gateway = Ted5000Gateway(url)\n\n # Get MUT information to create the sensors.\n gateway.update()\n\n entities = []\n for mtu in gateway.data:\n for description in SENSORS:\n entities.append(Ted5000Sensor(gateway, name, mtu, description))\n\n add_entities(entities)",
"def LSM_acquisition(add):\n # control register\n CTRL0 = 0x1f # p.34, accelerator\n CTRL1 = 0x20\n CTRL2 = 0x21\n CTRL5 = 0x24 # p.36, magnetic\n CTRL6 = 0x25\n CTRL7 = 0x26\n FIFO_CTRL = 0x2e # p.40\n # accelerater\n OUT_X_L_A = 0x28\n OUT_X_H_A = 0x29\n OUT_Y_L_A = 0x2a\n OUT_Y_H_A = 0x2b\n OUT_Z_L_A = 0x2c\n OUT_Z_H_A = 0x2d\n # magentic\n OUT_X_L_M = 0x08\n OUT_X_H_M = 0x09\n OUT_Y_L_M = 0x0a\n OUT_Y_H_M = 0x0b\n OUT_Z_L_M = 0x0c\n OUT_Z_H_M = 0x0d\n\n # follow lsm303D arduino library\n # AFS = 0, +-2g scale\n bus.write_byte_data(add, CTRL2, 0x00)\n # 50 Hz AODR, all axis enable\n bus.write_byte_data(add, CTRL1, 0x57)\n # high resolution, 6.25Hz MODR\n bus.write_byte_data(add, CTRL5, 0x64)\n # +-4 gauss scale\n bus.write_byte_data(add, CTRL6, 0x20)\n # low power mode off, continuous conversion mode\n bus.write_byte_data(add, CTRL7, 0x00)\n # # FIFO mode\n # bus.write_byte_data(add, CTRL0, 0b01000000)\n # bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n # # accelerator with 12.5Hz, all axis enable\n # bus.write_byte_data(add, CTRL1, 0b00110111)\n # # magnetic 12.5Hz, high resolutn, temp en\n # bus.write_byte_data(add, CTRL5, 0b11100000)\n # # full scale range \\pm 12 gauss\n # bus.write_byte_data(add, CTRL6, 0b01101000)\n # # enable magnetic\n # bus.write_byte_data(add, CTRL7, 0x00)\n\n # accelerator accumulate\n while True:\n uint16_ax = (bus.read_byte_data(add, OUT_X_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_A)\n uint16_ay = (bus.read_byte_data(add, OUT_Y_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_A)\n uint16_az = (bus.read_byte_data(add, OUT_Z_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_A)\n\n uint16_mx = (bus.read_byte_data(add, OUT_X_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_M)\n uint16_my = (bus.read_byte_data(add, OUT_Y_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_M)\n uint16_mz = (bus.read_byte_data(add, OUT_Z_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_M)\n # accelerometer 12 bit left aligned\n # ax = twos_comp(uint16_ax>>4, 12)\n # ay = twos_comp(uint16_ay>>4, 12)\n # az = twos_comp(uint16_az>>4, 12)\n ax = twos_comp(uint16_ax, 16)\n ay = twos_comp(uint16_ay, 16)\n az = twos_comp(uint16_az, 16)\n\n mx = twos_comp(uint16_mx, 16)\n my = twos_comp(uint16_my, 16)\n mz = twos_comp(uint16_mz, 16)\n\n yield [ax, ay, az, mx, my, mz]",
"async def test_temp_sensor(hass: HomeAssistant) -> None:\n device = (\n \"sensor.test_temp\",\n \"42\",\n {\n \"friendly_name\": \"Test Temp Sensor\",\n \"unit_of_measurement\": UnitOfTemperature.FAHRENHEIT,\n },\n )\n appliance = await discovery_test(device, hass)\n\n assert appliance[\"endpointId\"] == \"sensor#test_temp\"\n assert appliance[\"displayCategories\"][0] == \"TEMPERATURE_SENSOR\"\n assert appliance[\"friendlyName\"] == \"Test Temp Sensor\"\n\n capabilities = assert_endpoint_capabilities(\n appliance, \"Alexa.TemperatureSensor\", \"Alexa.EndpointHealth\", \"Alexa\"\n )\n\n temp_sensor_capability = get_capability(capabilities, \"Alexa.TemperatureSensor\")\n assert temp_sensor_capability is not None\n properties = temp_sensor_capability[\"properties\"]\n assert properties[\"retrievable\"] is True\n assert {\"name\": \"temperature\"} in properties[\"supported\"]\n\n properties = await reported_properties(hass, \"sensor#test_temp\")\n properties.assert_equal(\n \"Alexa.TemperatureSensor\", \"temperature\", {\"value\": 42.0, \"scale\": \"FAHRENHEIT\"}\n )",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n add_devices([\n DemoThermostat(\"Nest\", 21, TEMP_CELSIUS, False, 19, False),\n DemoThermostat(\"Thermostat\", 68, TEMP_FAHRENHEIT, True, 77, True),\n ])",
"def registerDevice(self):\n\t\tr = req.post(\"http://localhost:9090/devices?id={}&sensors={}_{}&board={}\".format(\n\t\t\tBOARD_ID,\n\t\t\tSENSOR1,\n\t\t\tSENSOR2,\n\t\t\tBOARD\n\t\t))\n\t\tprint (\"[{}] Device Registered on Room Catalog\".format(\n\t\t\tint(time.time()),\n\t\t))",
"def add_atlas_sensor(self, identifier, serial_port):\n self.data_fetcher.atlas_sensor_list.append(AtlasScientificSensor(identifier, serial_port))",
"def setup_platform(hass, config, add_entities, discovery_info=None):\n name = config.get(CONF_NAME)\n cookie = config[CONF_COOKIE]\n include_offpeak = config.get(CONF_INCLUDE_OFF_PEAK)\n\n add_entities([TelemeterSensor(name, cookie, include_offpeak)], True)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n add_devices([ElecPriceSensor(hass,\n config.get(CONF_NAME),\n config.get(CONF_ELEC_RATE),\n config.get(CONF_TIMEOUT))], True)",
"def EEPROM_connect(self) :\n self.spi = spidev.SpiDev()\n self.spi.open(0,1)\n self.spi.cshigh = False\n self.spi.mode = 0b00\n self.spi.max_speed_hz = 3814\n return",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n dev_id = config.get(CONF_ID, None)\n devname = config.get(CONF_NAME, \"EnOcean binary sensor\")\n add_devices([EnOceanBinarySensor(dev_id, devname)])",
"def add_sensor(self, sensor):\n self.sensors.append(sensor)",
"def setup_platform(hass, config, add_entities, discovery_info=None):\n _LOGGER.debug('Setup__sensor__')\n conf_name = hass.data[DOMAIN]['conf_name']\n sensors = hass.data[DOMAIN]['bsensors']\n # All data was correct and sensor initialized\n dev = []\n for variable in sensors:\n dev.append(SauresSensor(\n conf_name, variable[0], variable[2],\n SENSOR_TYPES[variable[1]][0],\n SENSOR_TYPES[variable[1]][1],\n SENSOR_TYPES[variable[1]][2]))\n add_entities(dev, True)",
"async def async_setup_platform(hass, config, async_add_entities,\n discovery_info=None):\n\n import board # pylint: disable=import-error\n import busio # pylint: disable=import-error\n from adafruit_htu21d import HTU21D # pylint: disable=import-error\n\n name = config.get(CONF_NAME)\n SENSOR_TYPES[SENSOR_TEMPERATURE][1] = hass.config.units.temperature_unit\n\n bus = busio.I2C(board.SCL, board.SDA)\n sensor = await hass.async_add_job(partial(HTU21D, bus))\n\n #if not sensor.sample_ok:\n # _LOGGER.error(\"HTU21D sensor not detected in bus %s\", bus_number)\n # return False\n\n sensor_handler = await hass.async_add_job(HTU21DHandler, sensor)\n\n # dev = [HTU21DSensor(sensor_handler, name, SENSOR_TEMPERATURE, temp_unit),\n # HTU21DSensor(sensor_handler, name, SENSOR_HUMIDITY, '%')]\n dev = []\n try:\n for variable in config[CONF_MONITORED_CONDITIONS]:\n dev.append(\n HTU21DSensor(sensor_handler, variable, SENSOR_TYPES[variable][1], name)\n )\n except KeyError:\n pass\n\n async_add_entities(dev, True)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n add_devices([\n manual.ManualAlarm(hass, 'Alarm', '1234', 5, 10, False),\n ])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Installs the OpenMPI package on the VM.
|
def _Install(vm):
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget %s -P %s' % (MPI_URL, INSTALL_DIR))
vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, MPI_TAR))
make_jobs = vm.NumCpusForBenchmark()
shared_lib_command = ('--enable-shared' if FLAGS.openmpi_enable_shared
else '--disable-shared')
if FLAGS.openmpi_with_cuda_support:
cuda_cmd = ('--with-cuda=/usr/local/cuda-{version}/ '
'--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'.format(
version=FLAGS.cuda_toolkit_version))
else:
cuda_cmd = ''
config_cmd = (
'./configure --enable-static {shared_lib_cmd} --prefix=/usr '
'{cuda_cmd}'.format(shared_lib_cmd=shared_lib_command,
cuda_cmd=cuda_cmd))
vm.RobustRemoteCommand(
'cd %s && %s && make -j %s && sudo make install' %
(MPI_DIR, config_cmd, make_jobs))
|
[
"def _Install(vm):\n version_to_install = FLAGS.openmpi_version\n if not version_to_install:\n return\n current_version = GetMpiVersion(vm)\n if current_version == version_to_install:\n return\n\n first_dot_pos = version_to_install.find('.')\n second_dot_pos = version_to_install.find('.', first_dot_pos + 1)\n major_version = version_to_install[0:second_dot_pos]\n mpi_tar = ('openmpi-{version}.tar.gz'.format(version=version_to_install))\n mpi_url = ('{mpi_url_base}/v{major_version}/{mpi_tar}'.format(\n mpi_url_base=MPI_URL_BASE, major_version=major_version, mpi_tar=mpi_tar))\n install_dir = posixpath.join(\n linux_packages.INSTALL_DIR,\n 'openmpi-{version}'.format(version=version_to_install))\n\n vm.Install('build_tools')\n vm.Install('wget')\n vm.RemoteCommand('wget %s -P %s' % (mpi_url, install_dir))\n vm.RemoteCommand('cd %s && tar xvfz %s' % (install_dir, mpi_tar))\n make_jobs = vm.NumCpusForBenchmark()\n\n config_options = []\n config_options.append('--enable-static')\n config_options.append('--prefix=/usr')\n config_options.append('--enable-shared' if FLAGS.openmpi_enable_shared\n else '--disable-shared')\n if FLAGS.openmpi_with_cuda_support:\n config_options.append('--with-cuda=/usr/local/cuda-{version}/'\n .format(version=FLAGS.cuda_toolkit_version))\n config_options.append('--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'\n .format(version=FLAGS.cuda_toolkit_version))\n if FLAGS.openmpi_configs:\n config_options.append(FLAGS.openmpi_configs)\n\n config_cmd = './configure {}'.format(' '.join(config_options))\n vm.RobustRemoteCommand(\n 'cd %s/openmpi-%s && %s && make -j %s && sudo make install' %\n (install_dir, version_to_install, config_cmd, make_jobs))",
"def AptInstall(vm):\n if not FLAGS.openmpi_version:\n return\n vm.RobustRemoteCommand(\n 'sudo apt-get {}'.format(REMOVE_MPI_CMD), ignore_failure=True)\n _Install(vm)",
"def AptInstall(vm):\n vm.RobustRemoteCommand('sudo apt-get {}'.format(REMOVE_MPI_CMD))\n _Install(vm)",
"def Install(vm) -> None:\n if FLAGS.mpi_vendor == 'intel':\n mpilib = 'intelmpi'\n install_benchmarks = _InstallForIntelMpiLibrary\n elif FLAGS.mpi_vendor == 'openmpi':\n if not COMPILE_FROM_SOURCE.value:\n raise ValueError(\n f'--mpi_vendor=openmpi requires --{COMPILE_FROM_SOURCE.name}')\n mpilib = 'openmpi'\n install_benchmarks = _InstallForOpenMpiLibrary\n\n vm.Install(mpilib)\n if not COMPILE_FROM_SOURCE.value:\n return\n logging.info('Installing Intel MPI benchmarks from source')\n vm.Install('build_tools')\n install_benchmarks(vm)",
"def _InstallForIntelMpiLibrary(\n vm) -> None:\n if intel_repo.UseOneApi():\n vm.InstallPackages('intel-oneapi-compiler-dpcpp-cpp')\n vm.InstallPackages('intel-oneapi-mpi-devel') # for mpi.h\n source_cmds = f'. {intel_repo.ONEAPI_VARS_FILE}'\n else:\n source_cmds = (f'. {_INTEL_DIR}/mkl/bin/mklvars.sh intel64; '\n f'. {_INTEL_COMPILER_DIR}/bin/compilervars.sh intel64')\n for compiler_dir in (_INTEL_COMPILER_DIR, _INTEL_COMPILER_DIR_2020):\n vm.RemoteCommand(\n _INTEL_FIX_TBBROOT_CMD.format(compiler_dir=compiler_dir),\n ignore_failure=True)\n vm.RemoteCommand(_GIT_CHECKOUT_CMD)\n vm.PushDataFile(_PATCH_FILE)\n vm.RemoteCommand(_GIT_PATCH_CMD)\n # Default make uses the Intel compiler (mpiicc) not available in repos\n # {source_cmds} filled in at runtime due to differences in 2018/19 vs 2021\n compile_benchmark_cmd = (\n f'cd {_MPI_BENCHMARK_DIR}; {source_cmds}; CC=mpicc CXX=mpicxx make')\n vm.RemoteCommand(compile_benchmark_cmd)\n vm.RemoteCommand(_ENABLE_VERBOSE_SEGFAULT_LOGS)",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def AptInstall(vm):\n vm.Install('wget')\n vm.InstallPackages('numactl libnuma-dev')\n vm.Install('cuda_toolkit')\n # HPCG CUDA 10 requires Open MPI 3.1 and HPCG CUDA 11 requires Open MPI 4.0\n vm.Install('openmpi')\n\n if FLAGS.cuda_toolkit_version not in CUDA_FILES:\n raise cuda_toolkit.UnsupportedCudaVersionError(\n f'HPCG only supports CUDA {sorted(CUDA_FILES)}')\n hpcg_tar, hpcg_binary = CUDA_FILES[FLAGS.cuda_toolkit_version]\n vm.InstallPreprovisionedPackageData(PACKAGE_NAME, [hpcg_tar],\n linux_packages.INSTALL_DIR)\n vm.RemoteCommand('rm -rf %s' % HPCG_DIR)\n vm.RemoteCommand('mkdir %s' % HPCG_DIR)\n vm.RemoteCommand(\n 'cd %s && tar xvf %s --directory=%s --strip-components=1' %\n (linux_packages.INSTALL_DIR, hpcg_tar, HPCG_DIR))\n # Create a symlink from the hpcg binary to 'hpcg'\n if FLAGS.cuda_toolkit_version == '11.0':\n # HPCG only release the binary that supports CUDA 11. Use the data from\n # HPCG CUDA 10 package.\n vm.InstallPreprovisionedPackageData(PACKAGE_NAME, [hpcg_binary], HPCG_DIR)\n vm.RemoteCommand(f'chmod +x {posixpath.join(HPCG_DIR, hpcg_binary)}')\n vm.RemoteCommand('cd %s && ln -s %s %s' % (HPCG_DIR, hpcg_binary, 'hpcg'))",
"def install():\r\n sudo('apt-get update -qq')\r\n sudo('apt-get -y -q install rubygems git')\r\n\r\n puppet_version = env.get('loom_puppet_version')\r\n sudo(_gem_install('puppet', version=puppet_version))\r\n\r\n librarian_version = env.get('loom_librarian_version')\r\n sudo(_gem_install('librarian-puppet', version=librarian_version))\r\n\r\n # http://docs.puppetlabs.com/guides/installation.html\r\n sudo('puppet resource group puppet ensure=present')\r\n sudo(\"puppet resource user puppet ensure=present gid=puppet shell='/sbin/nologin'\")\r\n execute(update_configs)",
"def _Install(vm):\n if vm.OS_TYPE not in MOFED_OS_MAPPING:\n raise ValueError('OS type {} not in {}'.format(vm.OS_TYPE,\n sorted(MOFED_OS_MAPPING)))\n driver = MLNX_OFED_DOWNLOAD_URL.format(version=FLAGS.mofed_version,\n os=MOFED_OS_MAPPING[vm.OS_TYPE])\n vm.InstallPackages('libdapl2 libmlx4-1')\n vm.RemoteCommand(f'wget --retry-connrefused --tries=3 --waitretry=5 {driver}')\n vm.RemoteCommand('tar zxvf MLNX_OFED_LINUX-*-x86_64.tgz')\n stdout, _ = vm.RemoteCommand('cd MLNX_OFED_LINUX-*-x86_64 && sudo '\n './mlnxofedinstall --force --skip-repo')\n if 'Installation passed successfully' not in stdout:\n raise errors.Benchmarks.PrepareException(\n 'Mellanox OpenFabrics driver isn\\'t installed successfully.')\n vm.RemoteCommand('sudo /etc/init.d/openibd restart')\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.EnableRDMA=y/\"\n \"OS.EnableRDMA=y/g' /etc/waagent.conf\")\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.UpdateRdmaDriver=y/\"\n \"OS.UpdateRdmaDriver=y/g' /etc/waagent.conf\")\n vm.Reboot()\n # Check IB status.\n stdout, _ = vm.RemoteCommand('sudo ibdev2netdev -v')\n if 'port 1 (ACTIVE) ==> ib0 (Up)' not in stdout:\n raise errors.Benchmarks.PrepareException('Infiniband is not up.')",
"def install(self):\n try:\n stdout, stderr, code = self.sshconnection.execute('apt-get install -y %s' % (self.package))\n if code != 0:\n logging.error(\"[%s] Error installing package: %s\" % (self.hostname, self.package))\n logging.error(\"%s\" % stderr)\n sys.exit(1)\n except Exception as e:\n print('Caught exception: %s: %s' % (e.__class__, e))\n traceback.print_exc()\n sys.exit(1)",
"def _Install(vm) -> None:\n vm.Install('build_tools')\n vm.Install('wget')\n vm.RemoteCommand(f'cd {linux_packages.INSTALL_DIR}; git clone {REDIS_GIT}')\n vm.RemoteCommand(\n f'cd {GetRedisDir()} && git checkout {_VERSION.value} && make')",
"def Install(vm):\n vm.InstallPackages('git')\n vm.RemoteCommand('git clone {}'.format(CLOUD_TPU_GIT))\n vm.RemoteCommand('cd tpu && git checkout {}'.format(\n FLAGS.cloud_tpu_commit_hash))\n vm.Install('pip')\n vm.RemoteCommand('sudo pip install absl-py')",
"def install_agent():\r\n execute(install)\r\n put(os.path.join(files_path, 'init/puppet.conf'), '/etc/init/puppet.conf', use_sudo=True)\r\n restart('puppet')",
"def install(self, req, cluster_id):\n proton_install_task = instl.ProtonInstallTask(req, cluster_id)\n proton_install_task.start()",
"def install():\n GlusterFSCharm.singleton.install()",
"def YumInstall(vm):\n vm.RemoteCommand('sudo setenforce 0')\n vm.InstallPreprovisionedPackageData(PACKAGE_NAME, PREPROVISIONED_DATA.keys(),\n linux_packages.INSTALL_DIR)\n vm.RemoteCommand('sudo rpm -ivh --force %s' %\n posixpath.join(linux_packages.INSTALL_DIR, MYSQL_RPM))\n vm.InstallPackages('mysql-server')\n vm.RemoteCommand('sudo service mysqld start')\n vm.RemoteCommand('/usr/bin/mysqladmin -u root password \"%s\"' % MYSQL_PSWD)",
"def install(self, *args, **kwargs):\n return self.run(self.venv_python, \"-m\", \"pip\", \"install\", *args, **kwargs)",
"def install_local(self) -> None:\n pass",
"def install(self):\n PiService.install(self)\n self.sudo('svn co https://svn.code.sf.net/p/mjpg-streamer/code /etc/mjpg-streamer')\n self.run('cd /etc/mjpg-streamer/mjpg-streamer && sudo make USE_LIB4VL=true clean all && sudo make DESTDIR=/usr install')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Installs the OpenMPI package on the VM.
|
def AptInstall(vm):
vm.RobustRemoteCommand('sudo apt-get {}'.format(REMOVE_MPI_CMD))
_Install(vm)
|
[
"def _Install(vm):\n version_to_install = FLAGS.openmpi_version\n if not version_to_install:\n return\n current_version = GetMpiVersion(vm)\n if current_version == version_to_install:\n return\n\n first_dot_pos = version_to_install.find('.')\n second_dot_pos = version_to_install.find('.', first_dot_pos + 1)\n major_version = version_to_install[0:second_dot_pos]\n mpi_tar = ('openmpi-{version}.tar.gz'.format(version=version_to_install))\n mpi_url = ('{mpi_url_base}/v{major_version}/{mpi_tar}'.format(\n mpi_url_base=MPI_URL_BASE, major_version=major_version, mpi_tar=mpi_tar))\n install_dir = posixpath.join(\n linux_packages.INSTALL_DIR,\n 'openmpi-{version}'.format(version=version_to_install))\n\n vm.Install('build_tools')\n vm.Install('wget')\n vm.RemoteCommand('wget %s -P %s' % (mpi_url, install_dir))\n vm.RemoteCommand('cd %s && tar xvfz %s' % (install_dir, mpi_tar))\n make_jobs = vm.NumCpusForBenchmark()\n\n config_options = []\n config_options.append('--enable-static')\n config_options.append('--prefix=/usr')\n config_options.append('--enable-shared' if FLAGS.openmpi_enable_shared\n else '--disable-shared')\n if FLAGS.openmpi_with_cuda_support:\n config_options.append('--with-cuda=/usr/local/cuda-{version}/'\n .format(version=FLAGS.cuda_toolkit_version))\n config_options.append('--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'\n .format(version=FLAGS.cuda_toolkit_version))\n if FLAGS.openmpi_configs:\n config_options.append(FLAGS.openmpi_configs)\n\n config_cmd = './configure {}'.format(' '.join(config_options))\n vm.RobustRemoteCommand(\n 'cd %s/openmpi-%s && %s && make -j %s && sudo make install' %\n (install_dir, version_to_install, config_cmd, make_jobs))",
"def _Install(vm):\n vm.Install('build_tools')\n vm.Install('wget')\n vm.RemoteCommand('wget %s -P %s' % (MPI_URL, INSTALL_DIR))\n vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, MPI_TAR))\n make_jobs = vm.NumCpusForBenchmark()\n shared_lib_command = ('--enable-shared' if FLAGS.openmpi_enable_shared\n else '--disable-shared')\n if FLAGS.openmpi_with_cuda_support:\n cuda_cmd = ('--with-cuda=/usr/local/cuda-{version}/ '\n '--with-cuda-libdir=/usr/local/cuda-{version}/lib64/'.format(\n version=FLAGS.cuda_toolkit_version))\n else:\n cuda_cmd = ''\n config_cmd = (\n './configure --enable-static {shared_lib_cmd} --prefix=/usr '\n '{cuda_cmd}'.format(shared_lib_cmd=shared_lib_command,\n cuda_cmd=cuda_cmd))\n vm.RobustRemoteCommand(\n 'cd %s && %s && make -j %s && sudo make install' %\n (MPI_DIR, config_cmd, make_jobs))",
"def AptInstall(vm):\n if not FLAGS.openmpi_version:\n return\n vm.RobustRemoteCommand(\n 'sudo apt-get {}'.format(REMOVE_MPI_CMD), ignore_failure=True)\n _Install(vm)",
"def Install(vm) -> None:\n if FLAGS.mpi_vendor == 'intel':\n mpilib = 'intelmpi'\n install_benchmarks = _InstallForIntelMpiLibrary\n elif FLAGS.mpi_vendor == 'openmpi':\n if not COMPILE_FROM_SOURCE.value:\n raise ValueError(\n f'--mpi_vendor=openmpi requires --{COMPILE_FROM_SOURCE.name}')\n mpilib = 'openmpi'\n install_benchmarks = _InstallForOpenMpiLibrary\n\n vm.Install(mpilib)\n if not COMPILE_FROM_SOURCE.value:\n return\n logging.info('Installing Intel MPI benchmarks from source')\n vm.Install('build_tools')\n install_benchmarks(vm)",
"def _InstallForIntelMpiLibrary(\n vm) -> None:\n if intel_repo.UseOneApi():\n vm.InstallPackages('intel-oneapi-compiler-dpcpp-cpp')\n vm.InstallPackages('intel-oneapi-mpi-devel') # for mpi.h\n source_cmds = f'. {intel_repo.ONEAPI_VARS_FILE}'\n else:\n source_cmds = (f'. {_INTEL_DIR}/mkl/bin/mklvars.sh intel64; '\n f'. {_INTEL_COMPILER_DIR}/bin/compilervars.sh intel64')\n for compiler_dir in (_INTEL_COMPILER_DIR, _INTEL_COMPILER_DIR_2020):\n vm.RemoteCommand(\n _INTEL_FIX_TBBROOT_CMD.format(compiler_dir=compiler_dir),\n ignore_failure=True)\n vm.RemoteCommand(_GIT_CHECKOUT_CMD)\n vm.PushDataFile(_PATCH_FILE)\n vm.RemoteCommand(_GIT_PATCH_CMD)\n # Default make uses the Intel compiler (mpiicc) not available in repos\n # {source_cmds} filled in at runtime due to differences in 2018/19 vs 2021\n compile_benchmark_cmd = (\n f'cd {_MPI_BENCHMARK_DIR}; {source_cmds}; CC=mpicc CXX=mpicxx make')\n vm.RemoteCommand(compile_benchmark_cmd)\n vm.RemoteCommand(_ENABLE_VERBOSE_SEGFAULT_LOGS)",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def AptInstall(vm):\n vm.Install('wget')\n vm.InstallPackages('numactl libnuma-dev')\n vm.Install('cuda_toolkit')\n # HPCG CUDA 10 requires Open MPI 3.1 and HPCG CUDA 11 requires Open MPI 4.0\n vm.Install('openmpi')\n\n if FLAGS.cuda_toolkit_version not in CUDA_FILES:\n raise cuda_toolkit.UnsupportedCudaVersionError(\n f'HPCG only supports CUDA {sorted(CUDA_FILES)}')\n hpcg_tar, hpcg_binary = CUDA_FILES[FLAGS.cuda_toolkit_version]\n vm.InstallPreprovisionedPackageData(PACKAGE_NAME, [hpcg_tar],\n linux_packages.INSTALL_DIR)\n vm.RemoteCommand('rm -rf %s' % HPCG_DIR)\n vm.RemoteCommand('mkdir %s' % HPCG_DIR)\n vm.RemoteCommand(\n 'cd %s && tar xvf %s --directory=%s --strip-components=1' %\n (linux_packages.INSTALL_DIR, hpcg_tar, HPCG_DIR))\n # Create a symlink from the hpcg binary to 'hpcg'\n if FLAGS.cuda_toolkit_version == '11.0':\n # HPCG only release the binary that supports CUDA 11. Use the data from\n # HPCG CUDA 10 package.\n vm.InstallPreprovisionedPackageData(PACKAGE_NAME, [hpcg_binary], HPCG_DIR)\n vm.RemoteCommand(f'chmod +x {posixpath.join(HPCG_DIR, hpcg_binary)}')\n vm.RemoteCommand('cd %s && ln -s %s %s' % (HPCG_DIR, hpcg_binary, 'hpcg'))",
"def install():\r\n sudo('apt-get update -qq')\r\n sudo('apt-get -y -q install rubygems git')\r\n\r\n puppet_version = env.get('loom_puppet_version')\r\n sudo(_gem_install('puppet', version=puppet_version))\r\n\r\n librarian_version = env.get('loom_librarian_version')\r\n sudo(_gem_install('librarian-puppet', version=librarian_version))\r\n\r\n # http://docs.puppetlabs.com/guides/installation.html\r\n sudo('puppet resource group puppet ensure=present')\r\n sudo(\"puppet resource user puppet ensure=present gid=puppet shell='/sbin/nologin'\")\r\n execute(update_configs)",
"def _Install(vm):\n if vm.OS_TYPE not in MOFED_OS_MAPPING:\n raise ValueError('OS type {} not in {}'.format(vm.OS_TYPE,\n sorted(MOFED_OS_MAPPING)))\n driver = MLNX_OFED_DOWNLOAD_URL.format(version=FLAGS.mofed_version,\n os=MOFED_OS_MAPPING[vm.OS_TYPE])\n vm.InstallPackages('libdapl2 libmlx4-1')\n vm.RemoteCommand(f'wget --retry-connrefused --tries=3 --waitretry=5 {driver}')\n vm.RemoteCommand('tar zxvf MLNX_OFED_LINUX-*-x86_64.tgz')\n stdout, _ = vm.RemoteCommand('cd MLNX_OFED_LINUX-*-x86_64 && sudo '\n './mlnxofedinstall --force --skip-repo')\n if 'Installation passed successfully' not in stdout:\n raise errors.Benchmarks.PrepareException(\n 'Mellanox OpenFabrics driver isn\\'t installed successfully.')\n vm.RemoteCommand('sudo /etc/init.d/openibd restart')\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.EnableRDMA=y/\"\n \"OS.EnableRDMA=y/g' /etc/waagent.conf\")\n vm.RemoteCommand(\"sudo sed -i -e 's/# OS.UpdateRdmaDriver=y/\"\n \"OS.UpdateRdmaDriver=y/g' /etc/waagent.conf\")\n vm.Reboot()\n # Check IB status.\n stdout, _ = vm.RemoteCommand('sudo ibdev2netdev -v')\n if 'port 1 (ACTIVE) ==> ib0 (Up)' not in stdout:\n raise errors.Benchmarks.PrepareException('Infiniband is not up.')",
"def install(self):\n try:\n stdout, stderr, code = self.sshconnection.execute('apt-get install -y %s' % (self.package))\n if code != 0:\n logging.error(\"[%s] Error installing package: %s\" % (self.hostname, self.package))\n logging.error(\"%s\" % stderr)\n sys.exit(1)\n except Exception as e:\n print('Caught exception: %s: %s' % (e.__class__, e))\n traceback.print_exc()\n sys.exit(1)",
"def _Install(vm) -> None:\n vm.Install('build_tools')\n vm.Install('wget')\n vm.RemoteCommand(f'cd {linux_packages.INSTALL_DIR}; git clone {REDIS_GIT}')\n vm.RemoteCommand(\n f'cd {GetRedisDir()} && git checkout {_VERSION.value} && make')",
"def Install(vm):\n vm.InstallPackages('git')\n vm.RemoteCommand('git clone {}'.format(CLOUD_TPU_GIT))\n vm.RemoteCommand('cd tpu && git checkout {}'.format(\n FLAGS.cloud_tpu_commit_hash))\n vm.Install('pip')\n vm.RemoteCommand('sudo pip install absl-py')",
"def install_agent():\r\n execute(install)\r\n put(os.path.join(files_path, 'init/puppet.conf'), '/etc/init/puppet.conf', use_sudo=True)\r\n restart('puppet')",
"def install(self, req, cluster_id):\n proton_install_task = instl.ProtonInstallTask(req, cluster_id)\n proton_install_task.start()",
"def install():\n GlusterFSCharm.singleton.install()",
"def YumInstall(vm):\n vm.RemoteCommand('sudo setenforce 0')\n vm.InstallPreprovisionedPackageData(PACKAGE_NAME, PREPROVISIONED_DATA.keys(),\n linux_packages.INSTALL_DIR)\n vm.RemoteCommand('sudo rpm -ivh --force %s' %\n posixpath.join(linux_packages.INSTALL_DIR, MYSQL_RPM))\n vm.InstallPackages('mysql-server')\n vm.RemoteCommand('sudo service mysqld start')\n vm.RemoteCommand('/usr/bin/mysqladmin -u root password \"%s\"' % MYSQL_PSWD)",
"def install(self, *args, **kwargs):\n return self.run(self.venv_python, \"-m\", \"pip\", \"install\", *args, **kwargs)",
"def install_local(self) -> None:\n pass",
"def install(self):\n PiService.install(self)\n self.sudo('svn co https://svn.code.sf.net/p/mjpg-streamer/code /etc/mjpg-streamer')\n self.run('cd /etc/mjpg-streamer/mjpg-streamer && sudo make USE_LIB4VL=true clean all && sudo make DESTDIR=/usr install')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uninstalls the OpenMPI package on the VM.
|
def _Uninstall(vm):
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MPI_DIR))
|
[
"def _Uninstall(vm):\n vm.RemoteCommand('cd {0} && sudo make uninstall'.format(GetMpiDir()))",
"def Uninstall(vm):\n vm.RemoteCommand('rm -rf tpu')",
"def Uninstall(_):\n # No clean way to uninstall everything. The VM will be deleted at the end\n # of the test.\n pass",
"def AptUninstall(vm):\n _Uninstall(vm)",
"def uninstall(self):\n os.system(uninstallSoftware)",
"def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")",
"def remove(self):\n self.model_or_sim.remove_package(self)",
"def uninstall_from_pip():\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"uninstall\", \"es-bgm\"])",
"def pip_uninstall(self, packages, env=None):\n raise NotImplementedError()",
"def remove_package(package, remote):\n flavor = remote.os.package_type\n if flavor == 'deb':\n pkgcmd = ['DEBIAN_FRONTEND=noninteractive',\n 'sudo',\n '-E',\n 'apt-get',\n '-y',\n 'purge',\n '{package}'.format(package=package)]\n elif flavor == 'rpm':\n # FIXME: zypper\n pkgcmd = ['sudo',\n 'yum',\n '-y',\n 'erase',\n '{package}'.format(package=package)]\n else:\n log.error('remove_package: bad flavor ' + flavor + '\\n')\n return False\n return remote.run(args=pkgcmd)",
"def uninstall_ubuntu_packages():\n package_clean('python-amqp')\n package_clean('neutron-server')\n package_clean('neutron-plugin-openvswitch')\n package_clean('python-pyparsing')\n package_clean('python-mysqldb')",
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def _remove(self):\n self._system.remove(self.get_install_path())",
"def uninstall(self):\n pass",
"def remove(self):\n try:\n stdout, stderr, code = self.sshconnection.execute('apt-get purge -y %s' % (self.package))\n if code != 0:\n logging.error(\"[%s] Error removing package: %s\" % (self.hostname, self.package))\n logging.error(\"%s\" % stderr)\n sys.exit(1)\n except Exception as e:\n print('Caught exception: %s: %s' % (e.__class__, e))\n traceback.print_exc()\n sys.exit(1)",
"def PackageCleanup(self):\n for package_name in self._installed_packages:\n self.Uninstall(package_name)\n self.RemoteCommand('rm -recurse -force %s' % self.temp_dir)\n self.EnableGuestFirewall()",
"def uninstall(package):\n return G.DEVICE.uninstall_app(package)",
"def PluginUninstall(self, packageName):\n pass",
"def uninstall_pkgs() -> None:\n res = run(\"pip\", \"freeze\", capture=True)\n if any(\n ln.strip().startswith(TEST_DEMO_PIP_PACKAGE)\n for ln in res.out.splitlines()\n ):\n run(\"pip\", \"uninstall\", \"-y\", TEST_DEMO_PIP_PACKAGE)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method provides an ability to set order book's deep on the fly. If some of deep's parameters is <0 (bid_count or ask_count) then method raise the custom ChangeOrderBookDeepError exception.
|
def set_deep(self, deep: Deep) -> None:
def is_deep_invalid(var: Deep):
return not isinstance(var, Deep) \
or False in [str(value).isdigit() for value in deep.__dict__.values()] \
or deep.bid_count < 0 \
or deep.ask_count < 0
# Exit rule
if is_deep_invalid(deep):
raise ChangeOrderBookDeepError(deep)
self.deep = deep
|
[
"def test_overflow_bids_market_default_depth(new_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = new_order_book\n\n for _ in range(book.depth):\n book.add_offer('bids', 1, 1)\n\n assert book.depth == len(book.bids)\n assert not book.asks\n\n # try to put 21th lot into bids\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('bids', 1, 1)",
"def test_overflow_asks_market_default_depth(new_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = new_order_book\n\n for _ in range(book.depth):\n book.add_offer('asks', 1, 1)\n\n assert book.depth == len(book.asks)\n assert not book.bids\n\n # try to put 21th lot into asks\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('asks', 1, 1)",
"def update_order_book(self, symbol: str, updates: Dict):\n\n # TODO: easiest way to differentiate spots from perpetuals\n if '_' in symbol:\n book = self._order_books_perp[symbol]\n else:\n book = self._order_books[symbol]\n\n # initialize the order book.\n if len(book['asks']) == 0 and len(book['bids']) == 0:\n order_book = on_order_book(symbol)\n for (p, q) in order_book.asks:\n book['asks'][p] = q\n for (p, q) in order_book.bids:\n book['bids'][p] = q\n\n # update order book with incoming updates across each depth.\n asks, bids = updates['a'], updates['b']\n for ask in asks:\n p, q = Decimal(ask[0]), Decimal(ask[1])\n if q > 0:\n book['asks'][p] = q\n elif p in book['asks']:\n del book['asks'][p]\n\n for bid in bids:\n p, q = Decimal(bid[0]), Decimal(bid[1])\n if q > 0:\n book['bids'][p] = q\n elif p in book['bids']:\n del book['bids'][p]",
"def test_e2e_override_depth_amount_greater_than_from_order_book(self):\n\n cli = \"--balance 1 --override_depth_amount 0.5 offline --test -ob test_data/order_books.csv \"\n deal = self._run_bot_offine(cli)\n\n self.assertEqual(0.5, float(deal.data_row[\"_config_override_depth_amount\"]), 4)\n self.assertEqual(0.5, float(deal.data_row[\"start-qty\"]))\n self.assertEqual(float(deal.data_row[\"ob_result\"]), float(deal.data_row[\"result\"]))\n self.assertEqual(0.024282400000000093, float(deal.data_row[\"result-fact-diff\"]))\n\n # check if prices are from tickers\n self.assertEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))\n self.assertEqual(float(deal.data_row[\"leg2-price\"]), float(deal.data_row[\"leg2-ob-price\"]))\n self.assertEqual(float(deal.data_row[\"leg3-price\"]), float(deal.data_row[\"leg3-ob-price\"]))",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def adjustOrderBook(newState, capacity=100):\n log = Logger('adjustOrderBook')\n adjustmentDict = dict()\n adjustmentDict['bids'] = list()\n adjustmentDict['asks'] = list()\n adjustmentDict['cancel'] = list()\n nBids = newState['orderbooks']['bids']\n nAsks = newState['orderbooks']['asks']\n oBids = newState['sisty']['orderbook']['bids'] #including orderId in each level\n oAsks = newState['sisty']['orderbook']['asks'] #including orderId in each level\n log.debug(\"oBids:\\n{oBids}\", oBids=oBids)\n oBids = sorted(oBids, key=itemgetter(0,1), reverse=True) #sort: price from high to low; amount from low to high\n log.debug(\"sorted_oBids:\\n{oBids}\", oBids=oBids)\n oAsks = sorted(oAsks, key=itemgetter(0,1)) #sort: price from low to high; amount from low to high\n mergedBids = mergeOrderBook(oBids, capacity=capacity) #including orderIdList in each level\n mergedAsks = mergeOrderBook(oAsks, capacity=capacity) #including orderIdList in each level\n log.debug(\"mergedBids:\\n{mergedBids}\", mergedBids=mergedBids)\n log.debug(\"mergedAsks:\\n{mergedAsks}\", mergedAsks=mergedAsks)\n notCuttedBids = list()\n notCuttedAsks = list()\n cancelBidsList = list() #including an oringin index list of each cancel level\n cancelAsksList = list() #including an oringin index list of each cancel level\n\n \"\"\"\n bids: price from high to low\n \"\"\"\n oBidPrices = list()\n for oBid in mergedBids:\n oBidPrices.append(oBid[PRICE])\n nBidPrices = list()\n for nBid in nBids:\n nBidPrices.append(nBid[PRICE])\n\n for n in range(len(nBids)):\n \"\"\"\n 1.若n中有 o中没有 的价格,则以相应数量和价格挂单;\n \"\"\"\n if nBidPrices[n] not in oBidPrices:\n notCuttedBids.append([nBids[n][PRICE], nBids[n][AMOUNT]])\n \"\"\"\n 2.若n中有 o中有 的价格,则比较o总量和n总量,以差量挂撤;\n \"\"\"\n if nBidPrices[n] in oBidPrices:\n oIndex = oBidPrices.index(nBidPrices[n])\n if nBids[n][AMOUNT] > mergedBids[oIndex][AMOUNT]:\n deltaAmount = nBids[n][AMOUNT] - mergedBids[oIndex][AMOUNT]\n notCuttedBids.append([nBids[n][PRICE], deltaAmount])\n elif nBids[n][AMOUNT] < mergedBids[oIndex][AMOUNT]:\n deltaAmount = mergedBids[oIndex][AMOUNT] - nBids[n][AMOUNT]\n cancelBidsList.append([nBids[n][PRICE], deltaAmount, mergedBids[oIndex][ORDERID]])\n \"\"\"\n 3.若n中无 o中有 的价格,则把全部价格等于此价格的单撤掉。\n \"\"\"\n for o in range(len(mergedBids)):\n if oBidPrices[o] not in nBidPrices:\n cancelBidsList.append([mergedBids[o][PRICE], mergedBids[o][AMOUNT], mergedBids[o][ORDERID]])\n\n \"\"\"\n 处理撤单\n DONE: sort cancelBidsList to let it: price from high to low\n \"\"\"\n log.debug(\"cancelBidsList:\\n{cancelBidsList}\", cancelBidsList=cancelBidsList)\n sortedCancelBids = sorted(cancelBidsList, key=itemgetter(0,1), reverse=True) \n log.debug(\"sortedCancelBids:\\n{sortedCancelBids}\", sortedCancelBids=sortedCancelBids)\n\n for i in range(len(sortedCancelBids)):\n decimal = sortedCancelBids[i][AMOUNT] % capacity\n remainAmount = sortedCancelBids[i][AMOUNT]\n log.debug(\"remainAmount:\\n{remainAmount}\", remainAmount=remainAmount)\n if decimal == 0:\n oBids = sorted(oBids, key=itemgetter(1), reverse=True)\n oBids = sorted(oBids, key=itemgetter(0), reverse=True)\n \"\"\"\n DONE: sort oBids to let it:\n 1.price from high to low\n 2.amount from high to low\n \"\"\"\n else:\n oBids = oBids\n \"\"\"\n DONE: sort oBids to let it:\n 1.price from high to low\n 2.amount from low to high\n \"\"\"\n for j in range(len(oBids)):\n if oBids[j][PRICE] == sortedCancelBids[i][PRICE]:\n log.debug(\"inner remainAmount:\\n{remainAmount}\", remainAmount=remainAmount)\n if remainAmount > 0:\n orderId = oBids[j][ORDERID]\n adjustmentDict['cancel'].append(orderId)\n remainAmount -= oBids[j][AMOUNT]\n elif remainAmount == 0:\n \n break\n elif remainAmount < 0 and abs(remainAmount) <= capacity:\n adjustmentDict['bids'].append([oBids[j][PRICE], abs(remainAmount)])\n \n break\n else:\n raise(\"handle sortedCancelBids error\")\n log.debug(\"notCuttedBids inner:\\n{notCuttedBids}\", notCuttedBids=notCuttedBids)\n cuttedBids = cutOrderBook(notCuttedBids, capacity=capacity)\n log.debug(\"cuttedBids inner:\\n{cuttedBids}\", cuttedBids=cuttedBids)\n adjustmentDict['bids'].extend(cuttedBids)\n\n \"\"\"\n asks: price from low to high\n \"\"\"\n oAskPrices = list()\n for oAsk in mergedAsks:\n oAskPrices.append(oAsk[PRICE])\n nAskPrices = list()\n for nAsk in nAsks:\n nAskPrices.append(nAsk[PRICE])\n\n for n in range(len(nAsks)):\n \"\"\"\n 1.若n中有 o中没有 的价格,则以相应数量和价格挂单;\n \"\"\"\n if nAskPrices[n] not in oAskPrices:\n notCuttedAsks.append([nAsks[n][PRICE], nAsks[n][AMOUNT]])\n \"\"\"\n 2.若n中有 o中有 的价格,则比较o总量和n总量,以差量挂撤;\n \"\"\"\n if nAskPrices[n] in oAskPrices:\n oIndex = oAskPrices.index(nAskPrices[n])\n if nAsks[n][AMOUNT] > mergedAsks[oIndex][AMOUNT]:\n deltaAmount = nAsks[n][AMOUNT] - mergedAsks[oIndex][AMOUNT]\n notCuttedAsks.append([nAsks[n][PRICE], deltaAmount])\n elif nAsks[n][AMOUNT] < mergedAsks[oIndex][AMOUNT]:\n deltaAmount = mergedAsks[oIndex][AMOUNT] - nAsks[n][AMOUNT]\n cancelAsksList.append([nAsks[n][PRICE], deltaAmount, mergedAsks[oIndex][ORDERID]])\n \"\"\"\n 3.若n中无 o中有 的价格,则把全部价格等于此价格的单撤掉。\n \"\"\"\n for o in range(len(mergedAsks)):\n if oAskPrices[o] not in nAskPrices:\n cancelAsksList.append([mergedAsks[o][PRICE], mergedAsks[o][AMOUNT], mergedAsks[o][ORDERID]])\n\n \"\"\"\n 处理撤单\n DONE: sort cancelAsksList to let it: price from low to high\n \"\"\"\n log.debug(\"cancelAsksList:\\n{cancelAsksList}\", cancelAsksList=cancelAsksList)\n sortedCancelAsks = sorted(cancelAsksList, key=itemgetter(0,1)) \n log.debug(\"sortedCancelAsks:\\n{sortedCancelAsks}\", sortedCancelAsks=sortedCancelAsks) \n for i in range(len(sortedCancelAsks)):\n decimal = sortedCancelAsks[i][AMOUNT] % capacity\n remainAmount = sortedCancelAsks[i][AMOUNT]\n log.debug(\"remainAmount:\\n{remainAmount}\", remainAmount=remainAmount)\n if decimal == 0:\n oAsks = sorted(oAsks, key=itemgetter(1), reverse=True)\n oAsks = sorted(oAsks, key=itemgetter(0))\n log.debug(\"decimal == 0 oAsks:\\n{oAsks}\", oAsks=oAsks) \n \"\"\"\n DONE: sort oAsks to let it:\n 1.price from low to high\n 2.amount from high to low\n \"\"\"\n else:\n oAsks = oAsks\n log.debug(\"decimal != 0 oAsks:\\n{oAsks}\", oAsks=oAsks) \n \"\"\"\n DONE: sort oAsks to let it:\n 1.price from low to high\n 2.amount from low to high\n \"\"\"\n for j in range(len(oAsks)):\n if oAsks[j][PRICE] == sortedCancelAsks[i][PRICE]:\n log.debug(\"inner remainAmount:\\n{remainAmount}\", remainAmount=remainAmount)\n if remainAmount > 0:\n orderId = oAsks[j][ORDERID]\n adjustmentDict['cancel'].append(orderId)\n remainAmount -= oAsks[j][AMOUNT]\n elif remainAmount == 0:\n \n break\n elif remainAmount < 0 and abs(remainAmount) <= capacity:\n adjustmentDict['asks'].append([oAsks[j][PRICE], abs(remainAmount)])\n \n break\n else:\n raise(\"handle sortedCancelAsks error\")\n\n log.debug(\"notCuttedAsks inner:\\n{notCuttedAsks}\", notCuttedAsks=notCuttedAsks)\n log.debug(\"sortedCancelAsks inner:\\n{sortedCancelAsks}\", sortedCancelAsks=sortedCancelAsks)\n cuttedAsks = cutOrderBook(notCuttedAsks, capacity=capacity)\n adjustmentDict['asks'].extend(cuttedAsks)\n\n return adjustmentDict",
"def test_e2e_override_depth_amount_less_than_from_order_book(self):\n\n cli = \"--balance 1 --override_depth_amount 0.03 offline --test -ob test_data/order_books.csv \"\n deal = self._run_bot_offine(cli)\n\n self.assertAlmostEqual(0.06000734789047485, float(deal.data_row[\"start-qty\"]), 4)\n self.assertEqual(0.002407822109525136, float(deal.data_row[\"result-fact-diff\"]))\n\n # prices from order book\n self.assertNotEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))",
"def too_deep(self, level):\n return level > self.settings.max_depth",
"def test_too_deeply_nested(self) -> None:\n nested_action = TestNestedMenuAction()\n nested2_action = TestNested2MenuAction()\n nested3_action = TooDeeplyNestedAction()\n\n actions_registry.register(self.test_menu_action)\n actions_registry.register(nested_action)\n actions_registry.register(nested2_action)\n\n with self.assertRaises(DepthLimitExceededError):\n actions_registry.register(nested3_action)",
"def test_get_bid_offer_data(filled_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = filled_order_book\n\n bid_keys = list(book.bids.keys())\n offer_key = choice(bid_keys)\n\n received_offer = book.get_offers_data(offer_key)\n\n assert isinstance(received_offer, dict)\n\n try:\n offer_price = received_offer['price']\n offer_quantity = received_offer['quantity']\n\n except KeyError:\n pytest.fail('While parsing received_offer KeyError occured')\n\n assert isinstance(offer_price, int)\n assert isinstance(offer_quantity, int)\n\n try:\n bid_offer = book.bids[offer_key]\n\n except KeyError:\n pytest.fail('While parsing book.bids KeyError occured')\n\n assert bid_offer == received_offer\n\n with pytest.raises(TradeTypeOverflowedException):\n book.add_offer('bids', 1, 1)",
"def test_serialize_related_deep(self):\n\n s = serialize(self.author, include=[\n ('books', dict(\n include=[('publisher', dict())]\n ))\n ])\n\n self.assertEqual(s['name'], 'User Foo')\n self.assertEqual(len(s['books']), len(self.books))\n for b in s['books']:\n self.assertTrue(b['title'].startswith('Book '))\n self.assertEqual(b['publisher']['name'], 'Publisher')",
"def test_set_with_extra_branch_paths(self, test_param_tree):\n branch_data = deepcopy(test_param_tree.nested_dict['branch'])\n branch_data['extraParam'] = 'oops'\n\n with pytest.raises(ParameterTreeError) as excinfo:\n test_param_tree.complex_tree.set('branch', branch_data)\n\n assert 'Invalid path' in str(excinfo.value)",
"def check_depth(self, depth):\n if depth > conf.DEPTH_LIMIT:\n raise StopCrawlingException(\n \"Current depth %d from root node is greater than maximum \"\n \"allowed depth %d.\" % (depth, conf.DEPTH_LIMIT))",
"def slot_fulldepth(self, dummy_sender, data):\r\n (depth) = data\r\n self.debug(\"### got full depth, updating orderbook...\")\r\n self.bids = []\r\n self.asks = []\r\n self.total_ask = 0\r\n self.total_bid = 0\r\n if \"error\" in depth:\r\n self.debug(\"### \", depth[\"error\"])\r\n return\r\n for order in depth[\"data\"][\"asks\"]:\r\n price = int(order[\"price_int\"])\r\n volume = int(order[\"amount_int\"])\r\n self._update_total_ask(volume)\r\n self.asks.append(Level(price, volume))\r\n for order in depth[\"data\"][\"bids\"]:\r\n price = int(order[\"price_int\"])\r\n volume = int(order[\"amount_int\"])\r\n self._update_total_bid(volume, price)\r\n self.bids.insert(0, Level(price, volume))\r\n\r\n # update own volume cache\r\n for order in self.owns:\r\n self._update_level_own_volume(\r\n order.typ, order.price, self.get_own_volume_at(order.price, order.typ))\r\n\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n\r\n self._valid_ask_cache = -1\r\n self._valid_bid_cache = -1\r\n self.ready_depth = True\r\n self.signal_fulldepth_processed(self, None)\r\n self.signal_changed(self, None)",
"def setNested(self, *args):\n return _yarp.Bottle_setNested(self, *args)",
"def test_depth_limit(self):\n with self.assertRaisesRegexp(\n RemoteException,\n r'.*DepthLimitExceeded: Depth limit of 2 ' +\n 'exceeded at localhost -> localhost -> localhost'):\n recursive()",
"def check(self):\n super().check()\n try:\n value = self.raw_value\n except (AttributeError, KeyError) as err:\n self._reraise_if_required(err)\n else:\n errors = [] # type: List[str]\n for index, item in enumerate(value):\n try:\n self.inner_setting.nested_list_index = index\n self.inner_setting.check()\n except ValidationError as error:\n errors.extend(error.messages)\n if errors:\n raise ValidationError(errors)",
"def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )",
"def _computeDownwardChainsRecurse(self, results, drawTuple, count):\n prevCount = count.get(drawTuple, 0)\n count[drawTuple] = 1 + prevCount\n results.append(([drawTuple], dict(count)))\n for child in self._relationships[drawTuple][1].keys():\n if count.get(child, 0) < _RECURSIVE_LIMIT:\n oldSize = len(results)\n self._computeDownwardChainsRecurse(results, child, count)\n for k in range(oldSize, len(results)):\n results[k][0].insert(0, drawTuple)\n count[drawTuple] -= 1 # decrement count to avoid lasting effect\n if count[drawTuple] == 0:\n del count[drawTuple]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Private method that provide an ability to sort the orders by price
|
def __sort_orders_by_price(self):
self.orders = sorted(self.orders, key=lambda o: o.price, reverse=True)
|
[
"def test_sorting_descending_by_price():",
"def test_ordering_by_price_desc(self):\n request = self.factory.get('/api/v1/cars', {'distance': 10000,\n 'ordering': '-price'})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n cars = response.data['results'][0:2]\n self.assertGreater(cars[0]['price'], cars[1]['price'])\n self.assertNotEqual(cars[0], cars[1])",
"def test_sorting_ascending_by_price_and_area():",
"def select_sort_by_price_ascendant(self):\n msg = \"The new order of the items is by ascendant price\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Price (low to high)')\n self.allure.attach_image(self.driver, msg)",
"def test_sorting_descending_by_price_and_area():",
"def _sort_by_price(self, data):\n # Separate the data by currency\n alch = []\n fusing = []\n chaos = []\n exalted = []\n \n for item in data:\n price = item['price']\n if \"alchemy\" in price:\n alch.append(item)\n elif \"fusing\" in price:\n fusing.append(item)\n elif \"chaos\" in price:\n chaos.append(item)\n elif \"exalted\" in price:\n exalted.append(item)\n \n alch = natsorted(alch, key=lambda item: item['price'])\n fusing = natsorted(fusing, key=lambda item: item['price'])\n chaos = natsorted(chaos, key=lambda item: item['price'])\n exalted = natsorted(exalted, key=lambda item: item['price'])\n \n result = []\n result.extend(alch)\n result.extend(fusing)\n result.extend(chaos)\n result.extend(exalted)\n return result",
"def sort_price_items():\n\n items = Item.objects.order_by('-price')\n\n return render_template(\"item/home.html\" , items = items)",
"def set_bid_as_sort_price(self):\n self.sort_price_ = self.bid_price_\n return self",
"def _get_open_orders_by_price(self):\n log.debug(\"Getting open orders sorted by price-based priority\")\n\n # time_created ordering will be preserved as the secondary sort key.\n unprioritized_open_orders = list(Order.objects.filter(\n status=Order.STATUS_OPEN).order_by('time_created'))\n fulfilled_orders = list(Order.objects.filter(\n status=Order.STATUS_FULFILLED))\n\n sorted_orders_dict = self._sort_open_orders_by_price(\n unprioritized_open_orders,\n fulfilled_orders)\n return sorted_orders_dict['sorted_open_orders']",
"def sort_price(self):\n return Decimal(self.sort_price_)",
"def get_sorted_products_by_price(reverse=False):\n return {\n \"products\":\n sorted(\n data[\"products\"],\n key=lambda x: float(x[\"price\"].strip(\".\").replace(\",\", \".\")),\n reverse=reverse\n )\n }",
"def select_sort_by_price_descendant(self):\n msg = \"The new order of the items is by descendant price\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Price (high to low)')\n self.allure.attach_image(self.driver, msg)",
"def set_ask_as_sort_price(self):\n self.sort_price_ = self.ask_price_\n return self",
"def sort_by_price(children_events_options_list: List[ChildrenEventOption], sort_type: SortType = SortType.ASCENDING):\n return _sort_by(children_events_options_list, sort_type, key=attrgetter('price_in_uah'))",
"def _sort_open_orders_by_price(self,\n open_orders,\n fulfilled_orders):\n priorities_stats = self._compute_order_priorities_stats(\n open_orders + fulfilled_orders)\n median_demand, order_prices, tab_limits, total_fulfilled_prices = \\\n (priorities_stats['median_demand'],\n priorities_stats['order_prices'],\n priorities_stats['tab_limits'],\n priorities_stats['total_fulfilled_prices'])\n\n # The get_priority function also does a write to the database to update\n # tab_based_priority field for each order. This is because we use that\n # as a cached field to show the user the order's last known priority.\n # This is a side-effect of the function\n def get_priority(open_order):\n \"\"\"Compute an open order's price-based priority.\n\n The floor and 20% fudge keep FIFO as a small component of priority\n instead of severely penalizing people who ordered early but want\n just a bit more than average demand.\n\n Maintenance orders are a special case and always priced at 0.0\n to be processed early.\n \"\"\"\n priority = 0.0\n if not open_order.maintenance:\n order_price = order_prices[open_order.sid]\n tab = open_order.tab\n owner_total_fulfilled_price = \\\n total_fulfilled_prices.get(tab.id, 0.0)\n tab_limit = tab_limits[tab.sid]\n priority = floor(\n ((order_price + owner_total_fulfilled_price) / tab_limit) /\n (1.2 * median_demand))\n\n open_order.tab_based_priority = priority\n open_order.save(update_fields=['tab_based_priority'])\n\n return priority\n\n order_priorities = {\n order.sid: get_priority(order) for order in open_orders\n }\n\n log.debug('Open order price-based priorities: %s' % order_priorities)\n\n sorted_open_orders = \\\n sorted(open_orders,\n key=lambda o: order_priorities[o.sid])\n\n return {\n 'sorted_open_orders': sorted_open_orders,\n 'open_order_priorities': order_priorities\n }",
"def cart(self, **post):\n sort_attr = []\n order_lines = []\n sort_dict = {}\n order = request.website.sale_get_order()\n if order:\n order_lines = [line for line in order.order_line]\n from_currency = order.company_id.currency_id\n to_currency = order.pricelist_id.currency_id\n compute_currency = lambda price: from_currency.compute(price, to_currency)\n else:\n compute_currency = lambda price: price\n # apply sorting on shopping cart\n attribs_with_sequence = ['cut', 'cl', 'col', 'pol', 'sym', 'fc', 'fl', 'mk']\n if post.get('SortList1', False):\n for key, val in post.items():\n if not val.startswith(\"choose\"):\n if val in attribs_with_sequence:\n val = str(val) + '.sequence'\n sort_dict[key] = val\n if 'SortList1' in sort_dict.keys():\n sort_attr.append(sort_dict['SortList1'])\n if 'SortList2' in sort_dict.keys():\n sort_attr.append(sort_dict['SortList2'])\n if 'SortList3' in sort_dict.keys():\n sort_attr.append(sort_dict['SortList3'])\n if 'SortList4' in sort_dict.keys():\n sort_attr.append(sort_dict['SortList4'])\n if sort_attr:\n cr = request._cr\n order_by = u\",\".join(sort_attr)\n where_clause = tuple([line.product_id.diamond_id.id for line in order.order_line])\n where_len = len(where_clause)\n where_clause = str(where_clause)\n if where_len <= 1:\n where_clause = where_clause.replace(',', '')\n cr.execute(\"\"\"\n SELECT ad.id\n FROM avalon_diamonds as ad LEFT JOIN avalon_diamonds_cut as cut ON cut.id = ad.cut LEFT JOIN avalon_diamonds_color as col ON col.id = ad.col LEFT JOIN avalon_diamonds_mk as mk ON mk.id = ad.mk LEFT JOIN avalon_diamonds_pol as pol ON pol.id = ad.pol LEFT JOIN avalon_diamonds_sym as sym ON sym.id = ad.sym LEFT JOIN avalon_diamonds_fl as fl ON fl.id = ad.fl LEFT JOIN avalon_diamonds_fc as fc ON fc.id = ad.fc LEFT JOIN avalon_diamonds_cl as cl ON cl.id = ad.cl WHERE ad.id in \"\"\" + where_clause + \"\"\"\n ORDER BY \"\"\" + str(order_by))\n sorted_dia_ids = [i[0] for i in cr.fetchall()]\n order_lines = [line for dia_id in sorted_dia_ids for line in order.order_line if line.product_id.diamond_id.id == dia_id]\n\n values = {\n 'website_sale_order': order,\n 'website_order_line': order_lines, # pass sorted order lines to website shopping cart\n 'compute_currency': compute_currency,\n 'suggested_products': [],\n 'sortlist1': post.get('SortList1', False),\n 'sortlist2': post.get('SortList2', False),\n 'sortlist3': post.get('SortList3', False),\n 'sortlist4': post.get('SortList4', False)\n }\n if order:\n _order = order\n if not request.env.context.get('pricelist'):\n _order = order.with_context(pricelist=order.pricelist_id.id)\n values['suggested_products'] = _order._cart_accessories()\n\n if post.get('type') == 'popover':\n return request.render(\"website_sale.cart_popover\", values)\n\n if post.get('code_not_available'):\n values['code_not_available'] = post.get('code_not_available')\n\n return request.render(\"website_sale.cart\", values)",
"def test_get_order(self):\n pass",
"def sort_orders(self):\n return self._sort_orders",
"def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method provide an ability to find order by id and reject it.
|
def reject_order(self, order: Order) -> None:
order = self.get_order_by_id(order.id)
order.status = OrderStatus.REJECT
|
[
"def _on_order_not_found(self, msg):\r\n parts = msg[\"id\"].split(\":\")\r\n oid = parts[1]\r\n self.debug(\"### got 'Order not found' for\", oid)\r\n # we are now going to fake a user_order message (the one we\r\n # obviously missed earlier) that will have the effect of\r\n # removing the order cleanly.\r\n fakemsg = {\"user_order\": {\"oid\": oid, \"reason\": \"requested\"}}\r\n self._on_op_private_user_order(fakemsg)",
"def test_get_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.get_order(2),\n \"order does not exist\")",
"def test_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.change_status(\n 2, \"pending\"), \"order not found\")",
"def test_order_find(self):\n pass",
"def test_get_order_report_by_order_id(self):\n pass",
"def test_delete_order_by_id(self):\n resp = self.barb_behaviors.create_order_from_config(\n use_expiration=False)\n self.assertEqual(resp.status_code, 202,\n 'Returned unexpected response code')\n\n self.cl_behaviors.delete_order_by_id(resp.id)\n # Deleting here because using two different behaviors\n self.barb_behaviors.remove_from_created_orders(\n order_id=resp.id)\n\n get_resp = self.barb_client.get_order(resp.id)\n self.assertEqual(get_resp.status_code, 404,\n 'Should have failed with 404')",
"def test_get_order_by_id(self):\n resp = self.barb_behaviors.create_order_from_config(\n use_expiration=False)\n self.assertEqual(resp.status_code, 202,\n 'Barbican returned unexpected response code')\n\n order = self.cl_client.get_order_by_id(resp.id)\n self.assertIsNotNone(order)",
"def cancelorder(self, id, **args ):\n\n if id not in self._position.order.order_dict :\n self._logger.info(\"ID:{} is already filled or canceld or expired\".format(id) )\n return {'stat':-999 , 'msg':\"Order is already filled or canceld or expired\"}\n\n args['orderId']=id\n\n self._logger.debug(\"[cancelorder] : {}\".format(args) )\n\n self._position.order.mark_as_invalidate( id )\n\n try:\n res = self._request(\"/v1/cancelOrder\", \"POST\", params=args, private=True )\n except Exception as e:\n return {'stat': -999, 'msg': str(e)}\n\n if res.get('status')!=0 : \n self._logger.error(\"Error response [cancelorder] : {}\".format(res) )\n\n # すでにオーダーがなくなっている\n # [{'message_code': 'ERR-5122', 'message_string': 'The request is invalid due to the status of the specified order.'}]\n if res.get('messages',[{}])[0].get('message_code')=='ERR-5122' :\n order_info = self._position.order.mark_as_invalidate( id, timeout=3 ) # 3秒後にオーダーリストから削除(キャンセルと約定が同時になったときには約定を優先させるため)\n\n # 決済オーダーが無くなっていたら 当該のポジション情報のCloseorder発注済みフラグを削除\n if order_info :\n closeid = order_info.get('closeid')\n if closeid :\n if order_info['side']=='BUY' :\n self._position.position._short_position[closeid]['closeorder'] = False\n self._logger.debug(\"Flag Off invalidate : {}\".format(self._position.position._short_position[closeid]) )\n else:\n self._position.position._long_position[closeid]['closeorder'] = False\n self._logger.debug(\"Flag Off invalidate : {}\".format(self._position.position._long_position[closeid]) )\n\n return {'stat':res.get('status') , 'msg':res.get('messages')}\n\n return {'stat': 0, 'msg': \"\"}",
"def _order_not_found():\n pecan.abort(404, u._('Order not found.'))",
"def test_order_id(self):\n\n u1 = User.objects.get(id='1')\n u2 = User.objects.get(id='2')\n\n ord1 = Orders.objects.get(user_id=u1)\n ord2 = Orders.objects.get(user_id=u2, active='Y')\n ord3 = Orders.objects.get(user_id=u2, active=\"N\")\n\n ord4 = Orders(user_id=u1)\n ord4.save()\n\n self.assertFalse(ord1.order_id == None)\n self.assertFalse(ord2.order_id == None)\n self.assertFalse(ord3.order_id == None)\n self.assertFalse(ord4.order_id == None)\n self.assertFalse(ord1.pk == None)\n self.assertFalse(ord2.pk == None)\n self.assertFalse(ord3.pk == None)\n self.assertFalse(ord4.pk == None)",
"def test_fetch_specific_order_when_does_not_exist(self):\n response = self.api_test_client.get(\n '{}/orders/100'.format(self.BASE_URL))\n self.assertEqual(response.status_code, 404)\n self.assertEqual(\n 'Order with id 100 not found', response_as_json(\n response)['message'])",
"def test_delete_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.deletes_order(2),\n \"order not found\")\n self.assertEqual(len(self.orders_list.get_orders()), 2)",
"def cancel(self, order_id: int):",
"def test_cancel_order(self):\n pass",
"def cancel_order(self, order_id):\n for order in orders:\n if order_id == order['order_id']:\n order['status'] = canceled\n return order",
"def cancelOrder(self, order):\n raise NotImplementedError()",
"async def get_order_by_id(request: web.Request, order_id) -> web.Response:\n return web.Response(status=200)",
"def test_order_cannot_be_deleted_if_dont_exist(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/5',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 404)\n\t\tself.assertEqual(result[\"message\"], \"That order is not available\")",
"def is_order_exist(self, id):\n\n if len(self.orders_list) > id and id >= 0:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method provide an ability to fill order in order book. This action means that order is completed.
|
def fill_order(self, order: Order) -> None:
order = self.get_order_by_id(order.id)
order.status = OrderStatus.FILL
|
[
"def onOrderFilled(self, broker_, order):\n pass",
"def sync_completed_order(self):\n self.completed_order = yield get_private('complete_order', {'currency': self.currency})",
"def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n self.order_id = order_id\n print(\"Order completed\")\n self.save()",
"def fillMarketOrder(self, broker_, order, bar):\n raise NotImplementedError()",
"def fulfill_order(self, **kwargs):\n return self.client.execute(\"order/fulfill-one\", \"POST\", kwargs)",
"def place_order(self):\n logger.info(\"\\n\\nPlacing Order.......\\n\\n\")\n self.place_order_button.click()\n time.sleep(5)",
"def complete_order(self):\n order_no = self.sendy_info.sendy_order_no\n self.state = 'shipping'\n self.save()\n return sendy.complete_delivery(order_no)",
"def test_create_confirm_order_details(self):\n pass",
"def fill_details(self):\n logger.info(\"\\n\\nFilling Order Details Form.......\\n\\n\")\n self.form_details()",
"def _is_order_filled(self):\r\n if self.filled_quantity == self.quantity:\r\n self.order_finish()",
"def _execute_order(self, order: Order, fill_price: float, fill_volume: int):\n timestamp = self._timer.now()\n commission = self._commission_model.calculate_commission(fill_volume, fill_price)\n\n transaction = Transaction(timestamp, order.ticker, fill_volume, fill_price, commission)\n\n self._monitor.record_transaction(transaction)\n self._portfolio.transact_transaction(transaction)",
"def test_poi_order_sync_post(self):\n pass",
"def execute_order(self, ib_order):\n # send the order to IB\n #self.create_fill_dict_entry(self.valid_id, ib_order)\n self.ib_conn.placeOrder(\n self.valid_id, self.contract, ib_order\n )\n\n # order goes through!\n time.sleep(1)\n\n # Increment the order ID TODO not sure we need to instanciate there\n self.valid_id += 1",
"def openOrderEnd(self):\n\n self._my_open_orders.put(FINISHED)",
"def complete_trade(self):\n # Obtain the row index that the incomplete order exists on\n pending_trade_index = self.trading_history.index[self.trading_history['Buy Completed Time'].isnull()].tolist()\n # If there are pending trades & a row exists for completion\n if self.get_pending_purchase() and pending_trade_index is not None:\n # Obtain the stock ticker at that row index. Values will return a list, so we use 0 index for the value\n pending_stock = self.trading_history.iloc[pending_trade_index]['Stock Ticker'].values[0]\n # Obtain the invested amount at that row index. Values will return a list, so we use 0 index for the value\n invested_capital = self.trading_history.iloc[pending_trade_index]['Buy Invested Amount'].values[0]\n current_stock_price = get_stock_price(pending_stock) # A function that obtains the current stock price\n # Obtain the buy time at that row index. Values will return a list, so we use 0 index for the value\n buy_time = self.trading_history.iloc[pending_trade_index]['Buy Submission Time'].values[0]\n current_time = pd.Timestamp.now() # Get the current time with the pandas method\n elapsed_time = (current_time - buy_time).total_seconds() # Delta time for elapsed time in seconds\n\n if elapsed_time > 60 * 10: # If it has been ten minutes since purchase initiation\n shares_holding = invested_capital / current_stock_price # Calculate shares holding\n self.trading_history.at[pending_trade_index, 'Buy Completed Time'] = current_time # Add completed time\n # Insert Completed Order Price\n self.trading_history.at[pending_trade_index, 'Completed Order Price'] = current_stock_price\n # Shares holding is invested capital divided by current stock price, insert into record\n self.trading_history.at[pending_trade_index, 'Shares Holding'] = shares_holding\n self.add_cash(-invested_capital) # Adjust cash balance for the trade\n self.set_invested_capital(invested_capital) # Set the transaction as the attribute\n self.set_pending_purchase(False) # Change attribute to close pending trade\n self.set_current_stock_holding(pending_stock) # Set the attribute to the current stock\n self.set_current_stock_purchase_price(current_stock_price) # Set the attribute as the purchase price\n self.set_current_number_of_shares(shares_holding) # Set the current number of shares holding\n self.dump_profile_to_pickle() # Save changes to profile\n return True # Return true, it was successful\n else: # No trades pending\n self.dump_profile_to_pickle() # Save changes to profile\n return False # Return false",
"def test_order_complete_order_completed(self):\n u = User.objects.get(username=\"test1\")\n u.userplan.expire = date.today() + timedelta(days=50)\n u.userplan.active = False\n u.userplan.save()\n plan_pricing = PlanPricing.objects.get(plan=u.userplan.plan, pricing__period=30)\n order = Order.objects.create(\n user=u,\n pricing=plan_pricing.pricing,\n amount=100,\n plan=plan_pricing.plan,\n completed=date(2010, 10, 10),\n )\n self.assertFalse(order.complete_order())",
"def complete_order(order_id, filename, url, proxy):\n order_json = read_work_order(order_id, filename)\n if order_json is None:\n raise LookupError(\"No order found with id %r\"%order_id)\n order = json.loads(order_json)\n message = make_complete(order)\n data = json.dumps(message, indent=4)\n print data\n if url:\n send_request(data, url, proxy)\n else:\n print \"\\nUse the --url argument to specify destination\"",
"def order_ready(request):\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\tif cs.current_order is not None:\n\t\tcs.current_order.status = 'ready-to-serve'\n\t\tcs.current_order.save()\n\t\tcs.current_order = None\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")",
"def test_orders_complete_post(self):\n body = OrdersCompletePostRequest()\n response = self.client.open(\n '/orders/complete',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts the auto incremented id of a ShortURL object and turns it into a shorturl hash
|
def encode(shorturl_id: int) -> str:
short_resource = []
while shorturl_id > 0:
character_index = shorturl_id % BASE
short_resource.append(CHARACTER_SPACE[character_index])
shorturl_id //= BASE
return "".join(short_resource[::-1])
|
[
"def _create_url_hash(url):\n try:\n url_obj = Urlshort.objects.create(hash_value=\"\", original_url=\"\")\n url_obj.save()\n url_obj = Urlshort.objects.latest(\"id\")\n\n url_hash = _url_id_encode(url_obj.id)\n url_encoded = url.encode(\"utf8\")\n Urlshort.objects.filter(id=url_obj.id).update(hash_value=url_hash, original_url=url_encoded)\n except :\n raise \n \n return url_hash",
"def shortURLToId(self, shortURL):\n id = 0\n for i in shortURL: \n val_i = ord(i) \n if(val_i >= ord('a') and val_i <= ord('z')): \n id = id*62 + val_i - ord('a') \n elif(val_i >= ord('A') and val_i <= ord('Z')): \n id = id*62 + val_i - ord('Z') + 26\n else: \n id = id*62 + val_i - ord('0') + 52\n return id",
"def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))",
"def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url",
"def short_hash():\n return hashlib.sha1(uuid.uuid1().bytes).hexdigest()",
"def self_assign_short_url(self):\n self.image_short_url = short_url.encode_url(self.id)\n return self.image_short_url",
"def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl",
"def shortened_id(self):\n return str(self.id)[-8:]",
"def long_to_short(self, url, url_mobile=None, url_tablet=None):\n\n temp_short = uuid4() #temporary short code so we can get lastworid after insert\n query = 'INSERT into urls(short,default_url,mobile_url,tablet_url) VALUES (\"{short}\",\"{url}\",\"{mobile}\",\"{tablet}\");'.\\\n format(short=temp_short, url=url,\n mobile=url_mobile, tablet=url_tablet)\n with sq.connect(self.DB) as conn:\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n url_id = cursor.lastrowid + 1\n based_id = base36.encode(url_id)\n #Update to the definitive short url\n update_query = 'UPDATE urls SET short = \"{new_short}\" WHERE short = \"{temp_uuid}\";'.\\\n format(new_short=based_id, temp_uuid=temp_short)\n cursor.execute(update_query)\n return based_id\n except sq.OperationalError:\n print(\"ERROR\")\n return False\n except ValueError:\n return False",
"def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]",
"async def hash_link(link):\n return str(uuid.uuid3(uuid.NAMESPACE_URL, link))",
"def track_to_hash(track):\n return hashlib.sha1(track.encode('utf-8')).hexdigest()",
"def banner_hash(self) -> undefined.UndefinedNoneOr[str]:",
"def get_short_url(self, long_url):\n integer_value = self.dbconnections.dbhandler.insert_data_in_mysql(long_url)\n short_url = self.get_key_from_index(integer_value)\n if short_url:\n self.dbconnections.dbhandler.set_data_in_redis(short_url, long_url)\n return short_url\n return None",
"def _calculate_hash(self, entry):\n entry.pop('id', None)\n return hashlib.sha224(json.dumps(\n entry, cls=DjangoJSONEncoder).encode('utf-8')).hexdigest()",
"def short_id(self):\n return self._short_id",
"def encode(self, longUrl: str) -> str:\n hash_string = self.get_hash()\n self.url_mappings[hash_string] = longUrl\n return \"http://tinyurl.com/\" + hash_string",
"def createShortenedUrl():\n data = request.get_json(force=True)\n\n # checking the validity of the request body\n if not validateRequestBody(data):\n raise BadRequest('Request payload is malformed')\n\n # validate the provided slug is not in use\n if 'slug' in data:\n slug = data['slug']\n if ShortenedUrl.query.get(slug) != None:\n raise BadRequest('Slug is not unique')\n else:\n slug = uuid.uuid4().hex[:6].lower()\n # validate the generated slug is not in use\n while ShortenedUrl.query.get(slug) != None:\n slug = uuid.uuid4().hex[:6].lower()\n\n url = data['url']\n response = Response()\n returnObj = {\n 'url': url,\n 'slug': slug,\n 'shortened_url': '{}r/{}'.format(request.url_root, slug)\n }\n response.headers['location'] = '/r/{}'.format(slug)\n response.headers['Content-Type'] = 'application/json'\n response.status_code = 201\n response.data = json.dumps(returnObj)\n\n # create object and write to db\n shortenedUrl = ShortenedUrl(slug=slug, url=url)\n\n db.session.add(shortenedUrl)\n db.session.commit()\n\n return response",
"def save_shorten_url(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overrides the save to initially save to get the id then computes the shorturl hash and saves it to the model within a transaction context
|
def save(self, **kwargs):
res = super().save(**kwargs)
short_path_component = encode(res.id)
self.validated_data["short_path_component"] = short_path_component
return super().save(**kwargs)
|
[
"def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None):\n # If the short url wasn't specified\n if not self.short_url:\n # We pass the model instance that is being saved\n self.short_url = create_shortened_url(self)\n\n super().save(force_insert, force_update, using, update_fields)",
"def save(self, force_insert=False, force_update=False, using=None):\n hash = hashlib.md5(str(self.id)+self.created.isoformat())\n self.vendor_tx_code = hash.hexdigest()\n super(SagePayTransaction, self).save(force_update, force_update, using)",
"def save(self, *args, **kwargs):\n if not self.unique_id:\n self.unique_id = self._generate_unique_id()\n if not self.price:\n self.price = self._update_price()\n super().save(*args, **kwargs)",
"def _create_url_hash(url):\n try:\n url_obj = Urlshort.objects.create(hash_value=\"\", original_url=\"\")\n url_obj.save()\n url_obj = Urlshort.objects.latest(\"id\")\n\n url_hash = _url_id_encode(url_obj.id)\n url_encoded = url.encode(\"utf8\")\n Urlshort.objects.filter(id=url_obj.id).update(hash_value=url_hash, original_url=url_encoded)\n except :\n raise \n \n return url_hash",
"def save_shorten_url(self):\n pass",
"def save(self, *args, **kwargs):\n\n if not self.id:\n self.date = timezone.now()\n\n # Could be done in templates. However, perhaps better to save\n # once, rather than evaluating it over and over again upon\n # accessing the template?\n if not self.steps:\n self.steps = 'No steps provided. Time to get creative!'\n if not self.description:\n self.description = 'No description provided.'\n\n i = 2 # user-friendly; if we find something, there are 2 instances\n slug = slugify(self.title)\n while Recipe.objects.filter(slug=slug):\n slug = f'{slug}-{i}'\n i += 1\n self.slug = slug\n self.title = capwords(self.title)\n\n return super(Recipe, self).save(*args, **kwargs)",
"def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)",
"def save(self, *args, **kwargs):\n if not self.tracking_number:\n self.tracking_number = self._generate_tracking_number()\n super().save(*args, **kwargs)",
"def post_save(sender, instance, created, **kwargs):\n if created and not instance.hash_key:\n instance.hash_key = hashlib.md5(\n str(instance.testing_center).encode('utf-8')\n + str(instance.course.course_run).encode('utf-8')\n + str(instance.course_event_id).encode('utf-8')\n + str(instance.proctor.pk).encode('utf-8')\n + str(instance.start_date).encode('utf-8')).hexdigest()\n instance.save()",
"def save(self, *args, **kwargs):\r\n\r\n if not self.trackerid:\r\n self.trackerid = generate_trackerid()\r\n super(Profile, self).save(*args, **kwargs)",
"def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()",
"def save(self, *args, **kwargs):\n\n if not self.trackerid:\n self.trackerid = generate_trackerid()\n super(Profile, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()",
"def save(self, update_site=False, *args, **kwargs):\n if update_site or not self.id:\n self.site_id = current_site_id()\n super(SiteRelated, self).save(*args, **kwargs)",
"def _save(self, **kwargs): #signal, sender, instance):\r\n tags = self._get_instance_tag_cache(kwargs['instance'])\r\n Tag.objects.update_tags(kwargs['instance'], tags)",
"def save(self, *args, **kwargs):\n if not self.primary_key:\n self.primary_key = slugify(self.title)\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.set_slug()\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if self.pk is None:\n self.created = timezone.now()\n self.updated = timezone.now()\n super(Base, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.pk = None\n super(AbstractRevision, self).save(*args, **kwargs)\n self.tracked_model.current_revision = self\n self.tracked_model.save()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Attempt to place mover into contents. Returns a Boolean representation of success.
|
def contain(self, mover):
# Check if mover can exit old location
old_location = mover.location
if(not old_location):
return False
if(not old_location.allow_exit(mover)):
return False
# Check if mover can enter current location
if(not self.allow_entry(mover)):
return False
# Set new location
if(not self.contents):
self.contents = []
self.contents.append(mover)
mover.location = self
# Inform both locations of movement
if(old_location):
old_location.exited(mover)
self.entered(mover)
return True
|
[
"def mover(self, mapa):\n # Obter nova posicao da cabeça \n prox_posicaoX = (self.corpo[CABECA][X] + self.vel[X]) % mapa.tamanho[X]\n prox_posicaoY = (self.corpo[CABECA][Y] + self.vel[Y]) % mapa.tamanho[Y]\n prox_posicao = [[prox_posicaoX, prox_posicaoY]]\n # Adicionar cabeca ao inicio da lista [Que representa neste caso a cobra]\n self.corpo = prox_posicao + self.corpo\n\n if prox_posicao == [mapa.maca]:\n # A posicao da cabeça vai ser a maca [Cobra comeu a maça]\n mapa.mapa[prox_posicao[0][X]][prox_posicao[0][Y]] = snake_sprite\n self.tamanho += 1\n\n self.tempo_comeu_maca2 = time()\n\n if (self.tempo_comeu_maca2 - self.tempo_comeu_maca) < TEMPO_BONUS:\n bonus = 2\n else:\n bonus = 1\n\n self.tempo_comeu_maca = self.tempo_comeu_maca2\n self.tempo_comeu_maca2 = 0\n\n self.score += (100 + self.tamanho * 5) * bonus\n\n mapa.nova_maca()\n\n elif self.morreu(mapa.paredes):\n # A posicao da cabeca vai ser o corpo da cobra\n return False\n else:\n # A posicao da cabeça vai vser um espaço vazio\n ultimo_el = self.corpo.pop(-1)\n mapa.update(prox_posicao[0], ultimo_el)\n return True",
"def allow_entry(self, mover):\n return True",
"def _move_misplaced_objects(self, broker, src_broker=None,\n src_bounds=None):\n self.debug(broker, 'Looking for misplaced objects')\n self._increment_stat('misplaced', 'attempted')\n src_broker = src_broker or broker\n if src_bounds is None:\n src_bounds = self._make_misplaced_object_bounds(broker)\n # (ab)use ShardRange instances to encapsulate source namespaces\n src_ranges = [ShardRange('dont/care', Timestamp.now(), lower, upper)\n for lower, upper in src_bounds]\n self.debug(broker, 'misplaced object source bounds %s', src_bounds)\n policy_index = broker.storage_policy_index\n success = True\n num_placed = num_unplaced = 0\n for src_shard_range in src_ranges:\n part_success, part_placed, part_unplaced = self._move_objects(\n src_broker, src_shard_range, policy_index,\n self._make_shard_range_fetcher(broker, src_shard_range))\n success &= part_success\n num_placed += part_placed\n num_unplaced += part_unplaced\n\n if num_placed or num_unplaced:\n # the found stat records the number of DBs in which any misplaced\n # rows were found, not the total number of misplaced rows\n self._increment_stat('misplaced', 'found', statsd=True)\n self.debug(broker, 'Placed %s misplaced objects (%s unplaced)',\n num_placed, num_unplaced)\n self._increment_stat('misplaced', 'success' if success else 'failure',\n statsd=True)\n self.debug(broker, 'Finished handling misplaced objects')\n return success",
"def test_object_move(self):\n self.assertTrue(self.obj1 in self.room1.contents)\n # use move_to hook\n self.obj1.move_to(self.room2)\n self.assertFalse(self.obj1 in self.room1.contents)\n self.assertTrue(self.obj1 in self.room2.contents)\n\n # move back via direct setting of .location\n self.obj1.location = self.room1\n self.assertTrue(self.obj1 in self.room1.contents)\n self.assertFalse(self.obj1 in self.room2.contents)",
"def allow_exit(self, mover):\n return True",
"def entered(self, mover):\n pass",
"def move_atoms(self):\n return self.abivars.ionmov != 0",
"def can_promote(self, location):\n piece = self.pieces[location]\n if isinstance(piece, King) or isinstance(piece, GoldGeneral) or piece.promoted:\n raise Exception(self.name, 'illegal move', 'Cannot promote {0}'.format(piece))",
"def has_moved(self):\n return self.move_count > 0",
"def can_be_moved(self, card):\n return is_one_rank_apart(self.waste_card, card)",
"def is_over(self):\n\t\treturn self.move_limit_reached() or not self.get_possible_moves()",
"def test_verify_move(self):\n self._verify([self.applied_commands['move']])",
"def move_and_sew():\r\n pass",
"def test_handle_move_side_effect(self, castle_mock, passant_mock):\n # TODO",
"def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):\n if len(source_chunk.paths) <= 1:\n return False\n\n move_time = source_chunk.paths[path_index].time\n\n new_source_badness = self._badness(source_chunk.time - move_time)\n new_target_badness = self._badness(target_chunk.time + move_time)\n\n delta_badness = ((new_source_badness + new_target_badness) -\n (source_chunk.badness + target_chunk.badness))\n if delta_badness < 0:\n move_func()\n return True\n\n return False",
"def game_over_animation(self):\r\n try:\r\n coord, block = self.landed.popitem()\r\n self.delete_block(block)\r\n del block\r\n return True\r\n except KeyError:\r\n return False",
"def legal_move(self, new_op):\n\n curr_node = self\n # ToDO: Make sure all agents are safe, i.e we went so far back that their max time is less than the min time of\n # The operation being tested.\n safe_agents = {new_op.agent} # Agents who cannot conflict with the new operation. Starts with only the moving agent\n\n while curr_node:\n # We aren't interested in nodes that were reached by operations performed by the current moving agent\n if curr_node.prev_op and curr_node.prev_op.agent == new_op.agent:\n curr_node = curr_node.prev_node\n continue\n\n if curr_node.creates_vertex_conflict(new_op):\n return False\n\n last_op = curr_node.prev_op\n if last_op and curr_node.creates_edge_conflict(last_op, new_op):\n return False\n\n if last_op and last_op.time[1] < new_op.time[0]:\n safe_agents.add(last_op.agent)\n if len(safe_agents) == len(self.curr_positions):\n return True\n curr_node = curr_node.prev_node\n\n return True",
"def perform_is_relocatable(self) -> bool:\n\t\treturn False",
"def move_piece(self, addr_from: str, addr_to: str) -> bool:\n\n pos_from = self.get_pos(addr_from)\n pos_to = self.get_pos(addr_to)\n piece = pos_from.piece\n\n if isinstance(piece, Piece):\n res = piece.move(pos_to)\n # print(f\"moved {piece} from {addr_from} to {addr_to}\")\n if res:\n self._push_move(res)\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determines if the mover is permitted to enter the room
|
def allow_entry(self, mover):
return True
|
[
"def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False",
"def can_enter(self, direction):\n\n # Make a direction with all of the compass directions inverted so that we can see if the current room has an\n # Entrance that allows player to enter\n inverse_direction = {\"n\": \"s\", \"s\": \"n\", \"w\": \"e\", \"e\": \"w\"}\n direction = inverse_direction[direction]\n\n # Checks if the inverted direction is in the list of possible entrances\n if direction in self.entrances:\n return True",
"def is_legal(self, move):\n pass",
"def _roomVacant(self, room, timeslot):\n if room in [self._chromosome[x][1] for x in range(self._courseCount) if len(self._chromosome[x]) > 1 and self._chromosome[x][0] == timeslot]:\n return False\n return True",
"def allow_exit(self, mover):\n return True",
"def validate_can_enter(self, user, contest_pool):\n\n # the contest attempting to be joined\n target_skill_level = contest_pool.skill_level\n if target_skill_level.enforced == False:\n return # the skill level of this contest is not enforced -- anyone can join no matter what\n\n # find any enforced skill_levels we have an entry in not matching our target.\n # if any are found, that means we cant join and must raise exception\n entries = Entry.objects.filter(\n user=user,\n contest_pool__draft_group=contest_pool.draft_group,\n contest_pool__skill_level__enforced=True\n ).exclude(contest_pool__skill_level=target_skill_level)\n\n if entries.count() > 0:\n raise self.CanNotEnterSkillLevel()",
"def _user_has_room_access(user, room, manage=False):\n if user.is_superuser or user.is_admin_for(room.org):\n return True\n elif manage:\n return user.manage_rooms.filter(pk=room.pk).exists()\n else:\n return user.rooms.filter(pk=room.pk).exists()",
"def _checkPlayer(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.owner == self.player):\r\n message = (\"Player (%r) is not allowed to move that pawn (%r)\" %\r\n (self.player, pawn))\r\n raise IllegalMoveException(message)",
"def can_exist_outside_of_game(self):\n return True",
"def can_agent_occupy(self, r, c, agent):\n if not self.is_location_accessible(r, c, agent):\n return False\n if self.maps.unoccupied[r, c]:\n return True\n return False",
"def check_allowed(self):\n if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:\n tango.Except.throw_exception(\n f\"Command TelescopeStandby is not allowed in current state {self.state_model.op_state}.\",\n \"Failed to invoke Standby command on CspMasterLeafNode.\",\n \"CspMasterLeafNode.TelescopeStandby()\",\n tango.ErrSeverity.ERR,\n )\n\n return True",
"def contain(self, mover):\n # Check if mover can exit old location\n old_location = mover.location\n if(not old_location):\n return False\n if(not old_location.allow_exit(mover)):\n return False\n # Check if mover can enter current location\n if(not self.allow_entry(mover)):\n return False\n # Set new location\n if(not self.contents):\n self.contents = []\n self.contents.append(mover)\n mover.location = self\n # Inform both locations of movement\n if(old_location):\n old_location.exited(mover)\n self.entered(mover)\n return True",
"def room(self, rooms):\n return self.state.current_room in rooms",
"def check_moveable(self, station):\n if len(station.trains) < 1 or station.station_type == \"E\":\n return True\n return False",
"def can_assign(self):\r\n return False",
"async def check_role_full(op: dict, role: str) -> bool:\n if role == \"Reserve\" or role == \"Any\":\n return False\n elif len(await Operations.find_role(op, role)) >= op[\"Size\"][1].get(role, 0):\n return True\n return False",
"def is_occupied(self):\n if (self.occupant != None):\n return True\n else:\n return False",
"def __valid_arguments(self):\n return len(self.rooms) == self.room_num",
"def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Determines if the mover is permitted to exit the room.
|
def allow_exit(self, mover):
return True
|
[
"def check_exit(self, position, direction):\n if self.get_room((position[0] + direction[0], position[1] + direction[1])):\n return True\n return False",
"def is_exit(self, state):\n if state.cell.x == self.maze.exit_x and state.cell.y == self.maze.exit_y:\n return True\n else:\n return False",
"def is_current_level_unlocked(self):\n return self.current_level.level_exit_unlocked",
"def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False",
"def check_stop_game(self):\n if len(self.actions) < 15 and self.snake_parts[-1] == self.special_coord:\n return True\n return False",
"def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame",
"def endState(self):\n return not(self.state.winner() == None and len(self.state.get_actions()) > 0)",
"def can_exist_outside_of_game(self):\n return True",
"def _check_whether_game_end(self):\n flag = False\n if len(self.asteroids_list) == 0:\n self.__screen.show_message(\"Congratulations\", \"You won, no more \" \n \"asteroids\")\n flag = True\n if self.__ship.life == 0:\n self.__screen.show_message(\"Game over!\", \"no more life\")\n flag = True\n if self.__screen.should_end():\n self.__screen.show_message(\"End of the game\", \"You choose to exit\")\n flag = True\n if flag:\n self.__screen.end_game()\n sys.exit()",
"def _reached_exit(self):\n for exit_neighbour in self.model.grid.get_neighborhood(self._goal, moore=True):\n if self.pos == exit_neighbour:\n return True\n return False",
"def _is_last_admin_leaving(\n event: EventBase,\n power_level_content: dict,\n state_events: StateMap[EventBase],\n) -> bool:\n # Get every admin user defined in the room's state\n admin_users = {\n user\n for user, power_level in power_level_content[\"users\"].items()\n if power_level >= 100\n }\n\n if event.sender not in admin_users:\n # This user is not an admin, ignore them\n return False\n\n if any(\n event_type == EventTypes.Member\n and event.membership in [Membership.JOIN, Membership.INVITE]\n and state_key in admin_users\n and state_key != event.sender\n for (event_type, state_key), event in state_events.items()\n ):\n # There's another admin user in, or invited to, the room\n return False\n\n return True",
"def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)",
"def reservation_mark_exit(user: User, reservation: Reservation):\n owns_restaurant = reservation.restaurant.operator == user\n if owns_restaurant and reservation.status is ReservationState.SEATED:\n #Might want to add user notification\n reservation.exit_time = datetime.datetime.now()\n reservation.status = ReservationState.DONE\n db.session.commit()\n return True\n\n return False",
"def level_unlocked(self) -> bool:\r\n return self.player_profile.is_level_unlocked(self.level_num)",
"def checkMissionEnd(self) -> bool:\n if getTimestamp() - self.mission['timestamp'] < self.TAKE_OFF_DELAY:\n return False\n drone: Drone\n for drone in self.dronesSet.getDrones().values():\n if drone['state'] != 'onTheGround' and drone['state'] != 'crashed':\n return False\n\n self.endMission()\n return True",
"def exited(self, mover):\n pass",
"def allow_entry(self, mover):\n return True",
"def unlock(self) -> bool:\n if not self.can_unlock(False):\n self.locked = False\n return True\n return False",
"def __is_exit_key_valid(rooms, exit_door, key):\n if not isinstance(exit_door, tuple) or not isinstance(key, tuple):\n return False\n if len(exit_door) != 2 or len(key) != 2 or exit_door == key:\n if len(key) == 0:\n return True\n return False\n\n rooms_info = {}\n for room in rooms:\n rooms_info.update(room.convert_coord_to_map_dic())\n if len(key) != 0 and rooms_info.get(exit_door) == Tile.Empty and rooms_info.get(key) == Tile.Empty:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called after the mover has entered the room.
|
def entered(self, mover):
pass
|
[
"def exited(self, mover):\n pass",
"def after_turn(self):\n pass",
"def game_over(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_lose_label)\n Clock.schedule_once(self.goto_menu, 5)",
"def leaving(self):\n pass",
"def complete_room(self):\n assert not self.complete, 'Trying to complete an already complete room!'\n self.complete = True\n self.reveal_exit()",
"def game_ended(self):\n\t\tpass",
"def player_joined(self):\n\n self.in_game = True",
"def leave(self):\n pass",
"def event_game_over(self):\n print('Game over!')\n self._cmd_exit()",
"def out_of_canvas(self, event):\n if not self.__game.is_game_over():\n if self.__list_of_items:\n self._delete_cursor()",
"def when_enter(self, Actor):\n pass",
"def state_finish_enter(cfg, app, win):",
"def _handle_disconnected(self, event):\n self.roster = {}",
"def at_after_move(self, source_location):\n pass",
"def end_ride(self):\n # TODO\n self.location = self.destination\n self.is_idle = True\n self.destination = None",
"def on_leave(self, room, user):\n pass",
"def leave(self):\n p = GameOverPopup(self)\n p.open()",
"def finished(self):\n pass",
"def postloop(self):\n # print \"exiting tournament command loop\"\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called after the mover has exited the room.
|
def exited(self, mover):
pass
|
[
"def leave(self):\n pass",
"def leaving(self):\n pass",
"def end_game(self) -> None:\n pass",
"def game_ended(self):\n\t\tpass",
"def after_turn(self):\n pass",
"def entered(self, mover):\n pass",
"def on_leave(self, room, user):\n pass",
"def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None",
"def post_leave_channel(self, leaver):\n self.msg(\"%s has left this channel.\" % leaver.name)\n pass",
"async def on_room_deinit(self, room_obj):\n pass",
"def _handle_disconnected(self, event):\n self.roster = {}",
"def game_over(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_lose_label)\n Clock.schedule_once(self.goto_menu, 5)",
"def complete_room(self):\n assert not self.complete, 'Trying to complete an already complete room!'\n self.complete = True\n self.reveal_exit()",
"def leave(self):\n p = GameOverPopup(self)\n p.open()",
"def endCompetition(self):\n self.robot_exit = True",
"def end_ride(self):\n # TODO\n self.location = self.destination\n self.is_idle = True\n self.destination = None",
"def event_game_over(self):\n print('Game over!')\n self._cmd_exit()",
"def postloop(self):\n # print \"exiting tournament command loop\"\n pass",
"def exit_game(self):\n state.set_new(EXIT)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a tree from list. First element is root value, others are children nodes. (values or subtrees).
|
def construct(lst):
t = Tree()
t.root = lst[0]
for node in lst[1:]:
if isinstance(node, list):
t.nodes.append(construct(node))
else:
t.nodes.append(node)
return t
|
[
"def _make_tree(pddl_list):\n\n root = PDDL_Tree(pddl_list[0])\n\n for child in pddl_list[1:]:\n if isinstance(child, list):\n if len(child) == 0:\n root.add_child(PDDL_Tree(PDDL_Tree.EMPTY))\n else:\n subtree = PDDL_Tree._make_tree(child)\n root.add_child(subtree)\n else:\n root.add_child(PDDL_Tree(child))\n\n return root",
"def create_tree_from_list(values: List[int], root: BinaryTreeNode, parent: BinaryTreeNode,\r\n i: int, length: int) -> BinaryTreeNode:\r\n if i < length:\r\n if values[i] is not None:\r\n root = BinaryTreeNode(data=values[i], parent=parent)\r\n root.left = create_tree_from_list(values, root.left, root, 2 * i + 1, length)\r\n root.right = create_tree_from_list(values, root.right, root, 2 * i + 2, length)\r\n\r\n return root",
"def build_from_list(cls, inlist=[]):\n\n tree = BinaryTree()\n for x in inlist:\n tree.build(val=x)\n\n return tree",
"def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n # Tree indices work as follows:\n # 0 is the root\n # 2n+1 is the left child of n\n # 2n+2 is the right child of n\n # So we now rearrange `values` into that format...\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[:2 * len_ragged_row:2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1:2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree",
"def make_tree(list, category):\n\t#takes in a list separated by + and - and puts it into a very left-heavy tree\n\ttree = list[0]\n\tlist.pop(0)\n\tfor i in range(0, len(list)):\n\t\tif list[i] in category:\n\t\t\tsubtree = list[i+1]\n\t\t\tif subtree.count('*') or subtree.count('/'):\n\t\t\t\tsubtree = split_ops(subtree, ['*', '/'])\n\t\t\t\tsubtree = make_tree(subtree, ['*', '/'])\n\t\t\ttree = [list[i], tree, subtree] \n\treturn tree",
"def build_tree_from_preorder(values): \r\n \r\n if len(values) == 0 or values[0] == None:\r\n return None\r\n root = TreeNode(values[0])\r\n if len(values) == 1:\r\n return root\r\n root.left = build_tree_from_preorder(values[1:((len(values)-1) // 2 + 1)])\r\n root.right = build_tree_from_preorder(values[((len(values)-1) // 2 + 1):]) \r\n if root.left != None:\r\n root.left.parent = root\r\n if root.right != None:\r\n root.right.parent = root\r\n \r\n return root",
"def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[: 2 * len_ragged_row : 2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1 : 2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree",
"def genTree(lst, i=1):\n if lst and i <= len(lst) and lst[i-1] is not None:\n node = TreeNode(lst[i-1])\n node.left = genTree(lst, i*2)\n node.right = genTree(lst, i*2+1)\n return node",
"def list2tree(node):\n if isinstance(node, list):\n tree = []\n for child in node:\n tree.append(list2tree(child))\n return nltk.Tree('<l>', tree)\n elif isinstance(node, dict):\n return nltk.Tree(node['tag'], [node['word']])",
"def make_node(value, left=None, right=None):\n return [value, left, right]",
"def makeMinTree(lst):\n if len(lst) == 0:\n return None\n root = Node(lst[0])\n needs_children = [root]\n for i in xrange(1, len(lst), 2):\n n = needs_children[0]\n needs_children = needs_children[1:]\n n.left = Node(lst[i])\n if i+1 == len(lst):\n break\n n.right = Node(lst[i+1])\n needs_children.append(n.left)\n needs_children.append(n.right)\n return root",
"def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root",
"def _sorted_list_to_bst(cls, items=[], start=None, end=None, parent=None):\n if start > end:\n return None\n mid = start + (end - start) // 2\n node = Node(items[mid], parent)\n node.left = cls._sorted_list_to_bst(items, start, mid - 1, node)\n node.right = cls._sorted_list_to_bst(items, mid + 1, end, node)\n return node",
"def create_large_tree():\n value_of_nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'a', 'b', 'c', 'd', 'e']\n tree = ''\n depth = 0\n count = 0\n\n while depth < 4:\n if depth == 0:\n tree = [value_of_nodes[0], [], []]\n depth += 1\n count += 1\n elif depth == 1:\n for i in [1,2]:\n tree[i] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n elif depth == 2:\n for i,j in itertools.product([1,2], repeat=depth):\n tree[i][j] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n elif depth == 3:\n for i, j, k in itertools.product([1,2], repeat=depth):\n tree[i][j][k] = [value_of_nodes[count], [], []]\n count += 1\n depth += 1\n return tree",
"def from_value(cls, value: Any) -> MappingTree:\n if isinstance(value, MappingTree):\n return value\n elif isinstance(value, list):\n return ListNode([\n MappingTree.from_value(child)\n for child in value\n ])\n elif isinstance(value, dict):\n return DictNode({\n key: MappingTree.from_value(child)\n for key, child in value.items()\n })\n else:\n return ValueNode(value)",
"def __init__(self, value: T):\n self.value = value\n self.children: List[Tree] = []",
"def make_tree(arr):\n\n for i in range(len(arr)):\n arr, val = mid(arr)\n\n if i == 0: \n binary = BinaryNode(val)\n\n else:\n binary.insert(val)\n\n return binary",
"def build_tree(elements):\n print(\"Building tree with these elements:\",elements)\n root = BinarySearchTreeNode(elements[0])\n\n for i in range(1, len(elements)):\n root.add_child(elements[i])\n\n return root",
"def build_simple_tree():\n node = TreeNode(1)\n node.left = TreeNode(2)\n node.right = TreeNode(3)\n node.right.left = TreeNode(4)\n node.right.right = TreeNode(5)\n return node"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert list of dicts to ndarray of type np.float32
|
def dicts2ndarray(data_dicts):
# NEVER make any assumption about the order of .keys() return
aps = [ap for ap in data_dicts[0].keys() if ap != 'tag']
aps.sort()
data_num = len(data_dicts)
data_len = len(data_dicts[0][aps[0]])
ndary = np.zeros([data_num, len(aps), data_len], dtype=np.float32)
for idx, d in enumerate(data_dicts):
for aidx, ap in enumerate(aps):
ndary[idx, aidx, :] = d[ap]
return ndary
|
[
"def dict2array(datadict):\n data = np.zeros(len(datadict.keys()), len(datadict[datadict.keys()[0]]))\n idx = 0\n for key in datadict.keys():\n data[i] = np.asarray(datadict[key], dtype=float32)\n\n return data",
"def translate_pandas_to_numpy(data_list:list) -> list:\n list_size = len(data_list)\n for i in range(list_size):\n data_list[i] = data_list[i].values.astype('float32')\n return data_list",
"def list_dict_to_float(l):\n for d in l:\n d = dict_to_float(d)\n\n return l",
"def convert_dict_to_ndarray(*dictionaries):\n\n array_list = []\n\n # Loop all dicts\n for dictionary in dictionaries:\n # Loop all keys\n for key in dictionary.keys():\n # Skip non-ndarray types\n if not isinstance(dictionary[key], np.ndarray):\n continue\n # Append each item to a list\n array_list.append(dictionary[key])\n\n # Check non-uniform length between arrays\n for item in array_list:\n assert len(item) == len(array_list[0]), 'All arrays must have the same length'\n\n return np.vstack(array_list) # .swapaxes(0, 1)",
"def dict_list_to_dict_np(input_dict):\n input_dict = copy.copy(input_dict)\n if type(input_dict) is dict:\n for name in list(input_dict.keys()):\n input_dict.update({name: np.array(input_dict[name])})\n return input_dict\n else:\n return np.array(input_dict)",
"def tensor_dict_to_ndarray_dict(tensor_dict): # -> dict[Unknown, Unknown]:\n ...",
"def _concat_dict(xlist: List[TData]) -> TData:\n out_dict = {}\n for k, v in xlist[0].items():\n if isinstance(v, np.ndarray):\n out_dict[k] = np.concatenate([di[k] for di in xlist], axis=0)\n else:\n out_dict[k] = _concat_dict([di[k] for di in xlist])\n return out_dict",
"def stack_ndarray_dicts(lst, axis=0):\n res = {}\n for k in lst[0].keys():\n res[k] = np.stack([d[k] for d in lst], axis=axis)\n return res",
"def to_ndarray(item):\n \n return type(item), sp.array(item, sp.float64, ndmin=1)",
"def dict_to_array(self, d):\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = [d['fit_parameters'][p] for p in self.fit_parameters]\n arr[n_fit_p:n_fit_p+n_nui_p] = [d['nuisance_parameters'][p] for p in self.nuisance_parameters]\n arr[n_fit_p+n_nui_p:] = [d['fit_wc'][c] for c in self.fit_wc_names]\n return arr",
"def flatten(data, name_list):\n if 'ip3d_ntrk' in name_list:\n tens = np.ones_like(data['ip3d_ntrk']) * 10.0\n print(data['ip3d_ntrk'].shape)\n data['ip3d_ntrk'] = tens\n print('hacking')\n ftype = [(name, float) for name in name_list]\n flat = data.astype(ftype).view(float).reshape(data.shape + (-1,))\n flat = flat.swapaxes(1, len(data.shape))\n if len(flat.shape) > 2:\n flat = np.reshape(flat, (flat.shape[0], flat.shape[1] * flat.shape[2]))\n return flat",
"def array_from_dict_values(dct, sorted_keys=None, flat_output=False, dtype=np.float):\n if sorted_keys is None:\n sorted_keys = sorted(dct)\n iterable_values = isinstance(dct[sorted_keys[0]], collections.abc.Iterable)\n if iterable_values:\n it = itertools.chain.from_iterable(dct[key] for key in sorted_keys)\n else:\n it = (dct[key] for key in sorted_keys)\n\n flat_arr = np.fromiter(it, dtype=dtype)\n if flat_output:\n return flat_arr\n return flat_arr.reshape((len(dct), -1))",
"def unwrap(list_of_datastorages):\n retout = DataStorage()\n for key in list_of_datastorages[0].keys():\n retout[key] = np.asarray([r[key] for r in list_of_datastorages])\n return retout\n useful",
"def dict_to_numpyarr_extractFeature( idict, ifeaturelist):\r\n\r\n result_list = []\r\n \r\n for key in sorted(idict.keys()):\r\n tmp_list = []\r\n for feature in ifeaturelist:\r\n value = idict[key][feature]\r\n tmp_list.append( float(value) )\r\n\r\n # Add the value to the result array\r\n output = True\r\n # Exclude the first feature, if the feature is a classification label\r\n if ifeaturelist[0] == 'champs':\r\n test_list = tmp_list[1:]\r\n else:\r\n test_list = tmp_list\r\n \r\n if output:\r\n result_list.append( np.array(tmp_list) )\r\n\r\n return np.array(result_list)",
"def list_to_numpy_f32(\n long_list: list\n) -> np.ndarray:\n np_array = (\n np.zeros(\n [len(max(long_list, key=lambda x: len(x))), len(long_list)],\n dtype=np.float32,\n )\n - 1\n )\n for i, j in enumerate(long_list):\n np_array[0 : len(j), i] = j\n\n return np_array",
"def dict_to_array(dicti):\n vals = [i for i in dicti.keys()]\n counts = [i for _, i in dicti.items()]\n return np.repeat(vals, counts)",
"def _construct_data_array(data, var_names_list):\n # It considers the argument data as a structured array\n\n if isinstance(data, np.ndarray) and isinstance(data.dtype.names, tuple):\n return data.view(float)\n\n else:\n return data",
"def arr_from_dict(dct, datatype):\n tpl = tuple(dct[name] for name in datatype.names)\n return np.array([tpl], dtype=datatype)",
"def convert_dict_to_numpy(dict_data):\n\tdata, target, labels = [], [], []\n\tfor county in dict_data:\n\t\tdata.append(dict_data[county][:-1])\n\t\ttarget.append(dict_data[county][-1])\n\t\tlabels.append(county)\n\tdata, target, labels = np.array(data), np.array(target), np.array(labels)\n\treturn data, target, labels"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assembles a list of circuits into a qobj which can be run on the backend.
|
def assemble_circuits(circuits, run_config=None, qobj_header=None, qobj_id=None):
qobj_header = qobj_header or QobjHeader()
run_config = run_config or RunConfig()
if isinstance(circuits, QuantumCircuit):
circuits = [circuits]
userconfig = QobjConfig(**run_config.to_dict())
experiments = []
max_n_qubits = 0
max_memory_slots = 0
for circuit in circuits:
# header stuff
n_qubits = 0
memory_slots = 0
qubit_labels = []
clbit_labels = []
qreg_sizes = []
creg_sizes = []
for qreg in circuit.qregs:
qreg_sizes.append([qreg.name, qreg.size])
for j in range(qreg.size):
qubit_labels.append([qreg.name, j])
n_qubits += qreg.size
for creg in circuit.cregs:
creg_sizes.append([creg.name, creg.size])
for j in range(creg.size):
clbit_labels.append([creg.name, j])
memory_slots += creg.size
# TODO: why do we need creq_sizes and qreg_sizes in header
# TODO: we need to rethink memory_slots as they are tied to classical bit
experimentheader = QobjExperimentHeader(qubit_labels=qubit_labels,
n_qubits=n_qubits,
qreg_sizes=qreg_sizes,
clbit_labels=clbit_labels,
memory_slots=memory_slots,
creg_sizes=creg_sizes,
name=circuit.name)
# TODO: why do we need n_qubits and memory_slots in both the header and the config
experimentconfig = QobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)
instructions = []
for opt in circuit.data:
current_instruction = QobjInstruction(name=opt.name)
if opt.qargs:
qubit_indices = [qubit_labels.index([qubit[0].name, qubit[1]])
for qubit in opt.qargs]
current_instruction.qubits = qubit_indices
if opt.cargs:
clbit_indices = [clbit_labels.index([clbit[0].name, clbit[1]])
for clbit in opt.cargs]
current_instruction.memory = clbit_indices
if opt.params:
params = list(map(lambda x: x.evalf(), opt.params))
params = [sympy.matrix2numpy(x, dtype=complex)
if isinstance(x, sympy.Matrix) else x for x in params]
if len(params) == 1 and isinstance(params[0], numpy.ndarray):
# TODO: Aer expects list of rows for unitary instruction params;
# change to matrix in Aer.
params = params[0]
current_instruction.params = params
# TODO (jay): I really dont like this for snapshot. I also think we should change
# type to snap_type
if opt.name == "snapshot":
current_instruction.label = str(opt.params[0])
current_instruction.type = str(opt.params[1])
if opt.control:
mask = 0
for clbit in clbit_labels:
if clbit[0] == opt.control[0].name:
mask |= (1 << clbit_labels.index(clbit))
current_instruction.conditional = QobjConditional(mask="0x%X" % mask,
type='equals',
val="0x%X" % opt.control[1])
instructions.append(current_instruction)
experiments.append(QobjExperiment(instructions=instructions, header=experimentheader,
config=experimentconfig))
if n_qubits > max_n_qubits:
max_n_qubits = n_qubits
if memory_slots > max_memory_slots:
max_memory_slots = memory_slots
userconfig.memory_slots = max_memory_slots
userconfig.n_qubits = max_n_qubits
return Qobj(qobj_id=qobj_id or str(uuid.uuid4()), config=userconfig,
experiments=experiments, header=qobj_header,
type=QobjType.QASM.value)
|
[
"def assemble_circuits(circuits, qobj_id=None, qobj_header=None, run_config=None):\n qobj_config = QasmQobjConfig()\n if run_config:\n qobj_config = QasmQobjConfig(**run_config.to_dict())\n\n # Pack everything into the Qobj\n experiments = []\n max_n_qubits = 0\n max_memory_slots = 0\n for circuit in circuits:\n # header stuff\n n_qubits = 0\n memory_slots = 0\n qubit_labels = []\n clbit_labels = []\n\n qreg_sizes = []\n creg_sizes = []\n for qreg in circuit.qregs:\n qreg_sizes.append([qreg.name, qreg.size])\n for j in range(qreg.size):\n qubit_labels.append([qreg.name, j])\n n_qubits += qreg.size\n for creg in circuit.cregs:\n creg_sizes.append([creg.name, creg.size])\n for j in range(creg.size):\n clbit_labels.append([creg.name, j])\n memory_slots += creg.size\n\n # TODO: why do we need creq_sizes and qreg_sizes in header\n # TODO: we need to rethink memory_slots as they are tied to classical bit\n experimentheader = QobjExperimentHeader(qubit_labels=qubit_labels,\n n_qubits=n_qubits,\n qreg_sizes=qreg_sizes,\n clbit_labels=clbit_labels,\n memory_slots=memory_slots,\n creg_sizes=creg_sizes,\n name=circuit.name)\n # TODO: why do we need n_qubits and memory_slots in both the header and the config\n experimentconfig = QasmQobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)\n\n # Convert conditionals from QASM-style (creg ?= int) to qobj-style\n # (register_bit ?= 1), by assuming device has unlimited register slots\n # (supported only for simulators). Map all measures to a register matching\n # their clbit_index, create a new register slot for every conditional gate\n # and add a bfunc to map the creg=val mask onto the gating register bit.\n\n is_conditional_experiment = any(op.control for (op, qargs, cargs) in circuit.data)\n max_conditional_idx = 0\n\n instructions = []\n for op_context in circuit.data:\n instruction = op_context[0].assemble()\n\n # Add register attributes to the instruction\n qargs = op_context[1]\n cargs = op_context[2]\n if qargs:\n qubit_indices = [qubit_labels.index([qubit[0].name, qubit[1]])\n for qubit in qargs]\n instruction.qubits = qubit_indices\n if cargs:\n clbit_indices = [clbit_labels.index([clbit[0].name, clbit[1]])\n for clbit in cargs]\n instruction.memory = clbit_indices\n # If the experiment has conditional instructions, assume every\n # measurement result may be needed for a conditional gate.\n if instruction.name == \"measure\" and is_conditional_experiment:\n instruction.register = clbit_indices\n\n # To convert to a qobj-style conditional, insert a bfunc prior\n # to the conditional instruction to map the creg ?= val condition\n # onto a gating register bit.\n if hasattr(instruction, '_control'):\n ctrl_reg, ctrl_val = instruction._control\n mask = 0\n val = 0\n for clbit in clbit_labels:\n if clbit[0] == ctrl_reg.name:\n mask |= (1 << clbit_labels.index(clbit))\n val |= (((ctrl_val >> clbit[1]) & 1) << clbit_labels.index(clbit))\n\n conditional_reg_idx = memory_slots + max_conditional_idx\n conversion_bfunc = QasmQobjInstruction(name='bfunc',\n mask=\"0x%X\" % mask,\n relation='==',\n val=\"0x%X\" % val,\n register=conditional_reg_idx)\n instructions.append(conversion_bfunc)\n instruction.conditional = conditional_reg_idx\n max_conditional_idx += 1\n # Delete control attribute now that we have replaced it with\n # the conditional and bfuc\n del instruction._control\n\n instructions.append(instruction)\n\n experiments.append(QasmQobjExperiment(instructions=instructions, header=experimentheader,\n config=experimentconfig))\n if n_qubits > max_n_qubits:\n max_n_qubits = n_qubits\n if memory_slots > max_memory_slots:\n max_memory_slots = memory_slots\n\n qobj_config.memory_slots = max_memory_slots\n qobj_config.n_qubits = max_n_qubits\n\n return QasmQobj(qobj_id=qobj_id,\n config=qobj_config,\n experiments=experiments,\n header=qobj_header)",
"def test_assemble_multiple_circuits(self):\n qr0 = QuantumRegister(2, name='q0')\n qc0 = ClassicalRegister(2, name='c0')\n circ0 = QuantumCircuit(qr0, qc0, name='circ0')\n circ0.h(qr0[0])\n circ0.cx(qr0[0], qr0[1])\n circ0.measure(qr0, qc0)\n\n qr1 = QuantumRegister(3, name='q1')\n qc1 = ClassicalRegister(3, name='c1')\n circ1 = QuantumCircuit(qr1, qc1, name='circ0')\n circ1.h(qr1[0])\n circ1.cx(qr1[0], qr1[1])\n circ1.cx(qr1[0], qr1[2])\n circ1.measure(qr1, qc1)\n\n run_config = RunConfig(shots=100, memory=False, seed=6)\n qobj = assemble_circuits([circ0, circ1], run_config=run_config)\n self.assertIsInstance(qobj, QasmQobj)\n self.assertEqual(qobj.config.seed, 6)\n self.assertEqual(len(qobj.experiments), 2)\n self.assertEqual(qobj.experiments[1].config.n_qubits, 3)\n self.assertEqual(len(qobj.experiments), 2)\n self.assertEqual(len(qobj.experiments[1].instructions), 6)",
"def qobj_to_circuits(qobj):\n if qobj.experiments:\n circuits = []\n for x in qobj.experiments:\n if hasattr(x.header, 'compiled_circuit_qasm'):\n circuits.append(\n QuantumCircuit.from_qasm_str(x.header.compiled_circuit_qasm))\n return circuits\n # TODO(mtreinish): add support for converting a qobj if the qasm isn't\n # embedded in the header\n return None",
"def qobj_to_circuits(qobj):\n if qobj.experiments:\n circuits = []\n for x in qobj.experiments:\n if hasattr(x.header, 'compiled_circuit_qasm'):\n circuits.append(\n load_qasm_string(x.header.compiled_circuit_qasm))\n return circuits\n # TODO(mtreinish): add support for converting a qobj if the qasm isn't\n # embedded in the header\n return None",
"def execute(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n skip_transpiler=False):\n qobj = compile(circuits, backend,\n config, basis_gates, coupling_map, initial_layout,\n shots, max_credits, seed, qobj_id, hpc,\n skip_transpiler)\n return backend.run(qobj)",
"def qv_circuits(qubit_lists=None, ntrials=1,\n qr=None, cr=None):\n\n circuits = [[] for e in range(ntrials)]\n circuits_nomeas = [[] for e in range(ntrials)]\n\n # get the largest qubit number out of all the lists (for setting the\n # register)\n\n depth_list = [len(qubit_list) for qubit_list in qubit_lists]\n\n # go through for each trial\n for trial in range(ntrials):\n\n # go through for each depth in the depth list\n for depthidx, depth in enumerate(depth_list):\n\n n_q_max = np.max(qubit_lists[depthidx])\n\n qr = qiskit.QuantumRegister(int(n_q_max+1), 'qr')\n qr2 = qiskit.QuantumRegister(int(depth), 'qr')\n cr = qiskit.ClassicalRegister(int(depth), 'cr')\n\n qc = qiskit.QuantumCircuit(qr, cr)\n qc2 = qiskit.QuantumCircuit(qr2, cr)\n\n qc.name = 'qv_depth_%d_trial_%d' % (depth, trial)\n qc2.name = qc.name\n\n # build the circuit\n for _ in range(depth):\n # Generate uniformly random permutation Pj of [0...n-1]\n perm = np.random.permutation(depth)\n # For each pair p in Pj, generate Haar random SU(4)\n for k in range(int(np.floor(depth/2))):\n unitary = random_unitary(4)\n pair = int(perm[2*k]), int(perm[2*k+1])\n qc.append(unitary, [qr[qubit_lists[depthidx][pair[0]]],\n qr[qubit_lists[depthidx][pair[1]]]])\n qc2.append(unitary, [qr2[pair[0]],\n qr2[pair[1]]])\n\n # append an id to all the qubits in the ideal circuits\n # to prevent a truncation error in the statevector\n # simulators\n qc2.u1(0, qr2)\n\n circuits_nomeas[trial].append(qc2)\n\n # add measurement\n for qind, qubit in enumerate(qubit_lists[depthidx]):\n qc.measure(qr[qubit], cr[qind])\n\n circuits[trial].append(qc)\n\n return circuits, circuits_nomeas",
"def execute(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n skip_transpiler=False):\n # pylint: disable=missing-param-doc, missing-type-doc\n if isinstance(backend, str):\n backend = _DEFAULT_PROVIDER.get_backend(backend)\n qobj = compile(circuits, backend,\n config, basis_gates, coupling_map, initial_layout,\n shots, max_credits, seed, qobj_id, hpc,\n skip_transpiler)\n return backend.run(qobj)",
"def circuit_to_qir(circuit, format, module_name=\"qutip_circuit\"):\n\n # Define as an inner function to make it easier to call from conditional\n # branches.\n def append_operation(\n module: pqg.SimpleModule, builder: pqg.BasicQisBuilder, op: Gate\n ):\n if op.classical_controls:\n result = op.classical_controls[0]\n value = \"zero\" if op.classical_control_value == 0 else \"one\"\n # Pull off the first control and recurse.\n op_with_less_controls = Gate(**op.__dict__)\n op_with_less_controls.classical_controls = (\n op_with_less_controls.classical_controls[1:]\n )\n op_with_less_controls.classical_control_value = (\n op_with_less_controls.classical_control_value\n if isinstance(\n op_with_less_controls.classical_control_value, int\n )\n else (op_with_less_controls.classical_control_value[1:])\n if op_with_less_controls.classical_control_value is not None\n else None\n )\n branch_body = {\n value: (\n lambda: append_operation(\n module, builder, op_with_less_controls\n )\n )\n }\n builder.if_result(module.results[result], **branch_body)\n return\n\n if op.controls:\n if op.name not in (\"CNOT\", \"CX\", \"CZ\") or len(op.controls) != 1:\n raise NotImplementedError(\n \"Arbitrary controlled quantum operations are not yet supported.\"\n )\n\n if op.name == \"X\":\n builder.x(module.qubits[op.targets[0]])\n elif op.name == \"Y\":\n builder.y(module.qubits[op.targets[0]])\n elif op.name == \"Z\":\n builder.z(module.qubits[op.targets[0]])\n elif op.name == \"S\":\n builder.s(module.qubits[op.targets[0]])\n elif op.name == \"T\":\n builder.t(module.qubits[op.targets[0]])\n elif op.name == \"SNOT\":\n builder.h(module.qubits[op.targets[0]])\n elif op.name in (\"CNOT\", \"CX\"):\n builder.cx(\n module.qubits[op.controls[0]], module.qubits[op.targets[0]]\n )\n elif op.name == \"CZ\":\n builder.cz(\n module.qubits[op.controls[0]], module.qubits[op.targets[0]]\n )\n elif op.name == \"RX\":\n builder.rx(op.arg_value, module.qubits[op.targets[0]])\n elif op.name == \"RY\":\n builder.ry(op.arg_value, module.qubits[op.targets[0]])\n elif op.name == \"RZ\":\n builder.rz(op.arg_value, module.qubits[op.targets[0]])\n elif op.name in (\"CRZ\", \"TOFFOLI\"):\n raise NotImplementedError(\n \"Decomposition of CRZ and Toffoli gates into base \"\n + \"profile instructions is not yet implemented.\"\n )\n else:\n raise ValueError(\n f\"Gate {op.name} not supported by the basic QIR builder, \"\n + \"and may require a custom declaration.\"\n )\n\n fmt = QirFormat.ensure(format)\n\n module = pqg.SimpleModule(module_name, circuit.N, circuit.num_cbits or 0)\n builder = pqg.BasicQisBuilder(module.builder)\n\n for op in circuit.gates:\n # If we have a QuTiP gate, then we need to convert it into one of\n # the reserved operation names in the QIR base profile's quantum\n # instruction set (QIS).\n if isinstance(op, Gate):\n # TODO: Validate indices.\n append_operation(module, builder, op)\n\n elif isinstance(op, Measurement):\n builder.mz(\n module.qubits[op.targets[0]],\n module.results[op.classical_store],\n )\n\n else:\n raise NotImplementedError(\n f\"Instruction {op} is not implemented in the QIR base \"\n + \"profile and may require a custom declaration.\"\n )\n\n if fmt == QirFormat.TEXT:\n return module.ir()\n elif fmt == QirFormat.BITCODE:\n return module.bitcode()\n elif fmt == QirFormat.MODULE:\n bitcode = module.bitcode()\n f = NamedTemporaryFile(suffix=\".bc\", delete=False)\n try:\n f.write(bitcode)\n finally:\n f.close()\n module = pqp.QirModule(f.name)\n try:\n os.unlink(f.name)\n except:\n pass\n return module\n else:\n assert (\n False\n ), \"Internal error; should have caught invalid format enum earlier.\"",
"def from_cirq(cls, circuit:cirq.Circuit):\n qubits = quple.get_circuit_qubits(circuit)\n symbols = quple.get_circuit_symbols(circuit)\n cq = cls(qubits)\n cq.append(circuit)\n return cq",
"def circuit_list(self):\r\n return self.circuits.itervalues()",
"def _create_quantum_circuit(self):\n reg_list = []\n for entry in self.regdefs:\n is_qreg = self._match_entry_type(entry, [ASTType.QREG])\n\n if is_qreg:\n reg_list.append(QuantumRegister(entry.get('qreg_num'), entry.get('qreg_name')))\n else:\n reg_list.append(ClassicalRegister(entry.get('creg_num'), entry.get('creg_name')))\n\n self.circuit = QuantumCircuit(*reg_list)\n return self.circuit",
"def test_assemble_single_circuit(self):\n qr = QuantumRegister(2, name='q')\n cr = ClassicalRegister(2, name='c')\n circ = QuantumCircuit(qr, cr, name='circ')\n circ.h(qr[0])\n circ.cx(qr[0], qr[1])\n circ.measure(qr, cr)\n\n run_config = RunConfig(shots=2000, memory=True)\n qobj = assemble_circuits(circ, run_config=run_config)\n self.assertIsInstance(qobj, QasmQobj)\n self.assertEqual(qobj.config.shots, 2000)\n self.assertEqual(qobj.config.memory, True)\n self.assertEqual(len(qobj.experiments), 1)\n self.assertEqual(qobj.experiments[0].instructions[1].name, 'cx')",
"def circuit_to_program(circuit):\n qubits = 0\n clbits = 0\n qregs_offset = {}\n raw_qoffset = 0\n for qreg in circuit.qregs:\n qubits += qreg.size\n qregs_offset[qreg.name] = raw_qoffset\n raw_qoffset = qreg.size\n cregs_offset = {}\n raw_coffset = 0\n for creg in circuit.cregs:\n clbits += creg.size\n cregs_offset[creg.name] = raw_coffset\n raw_coffset = creg.size\n prog = pyquil.Program()\n for inst in circuit.data:\n func = get_mapped(inst.name)\n qargs = [qregs_offset[x[0].name] + x[1] for x in inst.qargs]\n cargs = [cregs_offset[x[0].name] + x[1] for x in inst.cargs]\n # NOTE(mtreinish): In qiskit-terra >=0.8.0 inst.param will be renamed\n # inst.params\n params = inst.param + qargs + cargs\n prog += func(*params)\n return prog",
"def test_circuit_qasm_with_multiple_composite_circuits_with_same_name(self):\n\n my_gate = QuantumCircuit(1, name=\"my_gate\")\n my_gate.h(0)\n my_gate_inst1 = my_gate.to_instruction()\n\n my_gate = QuantumCircuit(1, name=\"my_gate\")\n my_gate.x(0)\n my_gate_inst2 = my_gate.to_instruction()\n\n my_gate = QuantumCircuit(1, name=\"my_gate\")\n my_gate.x(0)\n my_gate_inst3 = my_gate.to_instruction()\n\n qr = QuantumRegister(1, name=\"qr\")\n circuit = QuantumCircuit(qr, name=\"circuit\")\n circuit.append(my_gate_inst1, [qr[0]])\n circuit.append(my_gate_inst2, [qr[0]])\n my_gate_inst2_id = id(circuit.data[-1].operation)\n circuit.append(my_gate_inst3, [qr[0]])\n my_gate_inst3_id = id(circuit.data[-1].operation)\n\n expected_qasm = \"\"\"OPENQASM 2.0;\ninclude \"qelib1.inc\";\ngate my_gate q0 {{ h q0; }}\ngate my_gate_{1} q0 {{ x q0; }}\ngate my_gate_{0} q0 {{ x q0; }}\nqreg qr[1];\nmy_gate qr[0];\nmy_gate_{1} qr[0];\nmy_gate_{0} qr[0];\\n\"\"\".format(\n my_gate_inst3_id, my_gate_inst2_id\n )\n self.assertEqual(circuit.qasm(), expected_qasm)",
"def test_circuit_qasm_with_composite_circuit_with_many_params_and_qubits(self):\n original_str = \"\"\"OPENQASM 2.0;\ninclude \"qelib1.inc\";\ngate nG0(param0,param1) q0,q1 { h q0; h q1; }\nqreg q[3];\nqreg r[3];\ncreg c[3];\ncreg d[3];\nnG0(pi,pi/2) q[0],r[0];\\n\"\"\"\n qc = QuantumCircuit.from_qasm_str(original_str)\n\n self.assertEqual(original_str, qc.qasm())",
"def recombination ( qasm_list, loc_fixed ):\n\n if not isinstance( qasm_list, list ):\n raise TypeError( \"qasm_list must be a list.\" )\n\n if not all( isinstance( qasm, str ) for qasm in qasm_list ):\n raise TypeError( \"qasm_list must contain QASM strings.\" )\n\n if not isinstance( loc_fixed, list ):\n raise TypeError( \"loc_fixed must be a list.\" )\n\n if ( not all( isinstance( loc, tuple ) for loc in loc_fixed )\n or not all( len( loc ) == len( set( loc ) ) for loc in loc_fixed )\n or not all( isinstance( q, int ) for loc in loc_fixed for q in loc ) ):\n raise TypeError( \"loc_fixed must contain valid locations.\" )\n\n # Calculate Output Circuit Size\n max_qubit = 0\n\n for loc in loc_fixed:\n for qub in loc:\n max_qubit = max( qub, max_qubit )\n\n # Convert to circuits\n circs = []\n for qasm in qasm_list:\n try:\n circs.append( QuantumCircuit.from_qasm_str( qasm ) )\n except:\n raise ValueError( \"Invalid QASM string: %s\" % qasm )\n\n if all( len( circ.qubits ) != len( loc )\n for circ, loc in zip( circs, loc_fixed ) ):\n raise ValueError( \"Location and QASM qubit counts don't match.\" )\n\n # Join into One Circuit\n out_circ = QuantumCircuit( max_qubit + 1 )\n\n for circ, loc in zip( circs, loc_fixed ):\n for gate in circ.data:\n if gate[0].name == 'cx':\n out_circ.cx( loc[ gate[1][0].index ], loc[ gate[1][1].index ] )\n elif gate[0].name == 'u1':\n out_circ.u1( *gate[0].params, loc[ gate[1][0].index ] )\n elif gate[0].name == 'u2':\n out_circ.u2( *gate[0].params, loc[ gate[1][0].index ] )\n elif gate[0].name == 'u3':\n out_circ.u3( *gate[0].params, loc[ gate[1][0].index ] )\n elif gate[0].name == 'rx':\n out_circ.rx( *gate[0].params, loc[ gate[1][0].index ] )\n elif gate[0].name == 'ry':\n out_circ.ry( *gate[0].params, loc[ gate[1][0].index ] )\n elif gate[0].name == 'rz':\n out_circ.rz( *gate[0].params, loc[ gate[1][0].index ] )\n else:\n raise ValueError( \"QASM must be in \\'u1, u2, u3, cx\\' basis.\" )\n\n out_circ = qiskit.compiler.transpile( out_circ, basis_gates = ['u3', 'cx'],\n optimization_level = 3 )\n return out_circ.qasm()",
"def circuits(self) -> List[Union[QuantumCircuit, Schedule]]:\n pass",
"def circuits(self, backend: Optional[Backend] = None) -> List[QuantumCircuit]:\n schedule = self.experiment_options.get(\"schedule\", None)\n\n if schedule is None:\n schedule = self._default_gate_schedule(backend=backend)\n else:\n if self.physical_qubits[0] not in set(ch.index for ch in schedule.channels):\n raise CalibrationError(\n f\"User provided schedule {schedule.name} does not contain a channel \"\n \"for the qubit on which to run Rabi.\"\n )\n\n if len(schedule.parameters) != 1:\n raise CalibrationError(\"Schedule in Rabi must have exactly one free parameter.\")\n\n param = next(iter(schedule.parameters))\n\n # Create template circuit\n circuit = self._template_circuit(param)\n circuit.add_calibration(\n self.__rabi_gate_name__, (self.physical_qubits[0],), schedule, params=[param]\n )\n\n # Create the circuits to run\n circs = []\n for amp in self.experiment_options.amplitudes:\n amp = np.round(amp, decimals=6)\n assigned_circ = circuit.assign_parameters({param: amp}, inplace=False)\n assigned_circ.metadata = {\n \"experiment_type\": self._type,\n \"qubits\": (self.physical_qubits[0],),\n \"xval\": amp,\n \"unit\": \"arb. unit\",\n \"amplitude\": amp,\n \"schedule\": str(schedule),\n }\n\n if backend:\n assigned_circ.metadata[\"dt\"] = getattr(backend.configuration(), \"dt\", \"n.a.\")\n\n circs.append(assigned_circ)\n\n return circs",
"def create_quantum_model():\n data_qubits = cirq.GridQubit.rect(4, 4) # a 4x4 grid.\n readout = cirq.GridQubit(-1, -1) # a single qubit at [-1,-1]\n circuit = cirq.Circuit()\n \n # Prepare the readout qubit.\n circuit.append(cirq.X(readout))\n circuit.append(cirq.H(readout))\n \n builder = CircuitLayerBuilder(\n data_qubits = data_qubits,\n readout=readout)\n\n # Then add layers (experiment by adding more).\n builder.add_layer(circuit, cirq.XX, \"xx1\")\n builder.add_layer(circuit, cirq.ZZ, \"zz1\")\n\n # Finally, prepare the readout qubit.\n circuit.append(cirq.H(readout))\n\n return circuit, cirq.Z(readout)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser.
|
def unique_config_sections(config_file):
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_counters[section])
section_counters[section] += 1
line = line.replace(section, _section)
output_stream.write(line)
output_stream.seek(0)
return output_stream
|
[
"def find_unique_keys(base_config, comp_config, base_name):\n unique_keys = []\n unique_sections = []\n\n for section in base_config:\n if str(section) == 'DEFAULT':\n continue #.cfg has DEFAULT key, we do not use\n if not comp_config.has_section(section):\n unique_label = base_name + '.' + str(section)\n unique_sections.append(unique_label)\n continue\n\n for key in base_config[section]:\n if not comp_config.has_option(section, key):\n unique_label = str(section) + '.' + str(key)\n unique_keys.append(unique_label)\n continue\n #TODO: compare values?\n return unique_sections, unique_keys",
"def _config_tag(prefix, tag):\n tag = tag.replace('.', '_')\n tag = tag.replace('/', '_')\n tag = tag.replace(' ', '_')\n return prefix + tag.upper()",
"def add_section_names(self):\n # Looks for all the UNIQUE combos of sectname segment name that can be found in the binary\n # no offsets :( but at least we can look for a really long string to keep things accurate\n sections = {}\n for header in self.__mm.headers:\n for cmd in header.commands:\n load_cmd = cmd[0]\n cmd_data = cmd[2]\n try:\n if load_cmd.get_cmd_name() in ('LC_SEGMENT', 'LC_SEGMENT_64'):\n for section_data in cmd_data:\n sd_info = section_data.describe()\n if hasattr(section_data, 'section_data'):\n # Might need to care about byte order here\n sectname = sd_info['sectname']\n segname = sd_info['segname']\n nulls = \"00\" * (16 - len(sectname))\n data = sectname.encode(\"hex\") + nulls + segname.encode(\"hex\")\n sections[sectname+\"_\"+segname] = data\n except Exception as e:\n print str(e)\n for sec in sections:\n self.__sig.add_named_hex(sec, sections[sec])",
"def interface_to_chain_suffix(config, iface_name):\n for prefix in sorted(config.IFACE_PREFIX, reverse=True):\n if iface_name.startswith(prefix):\n iface_name = iface_name[len(prefix):]\n break\n iface_name = futils.uniquely_shorten(iface_name, 16)\n return iface_name",
"def get_suffix_configuration(lst):\n suffix_conf = ''\n for elem in lst: \n suffix_conf += '_'\n if type(elem) != str: \n elem = str(elem)\n suffix_conf += elem\n return suffix_conf",
"def _move_all_to_config_section(self):\n for section in self.OLD_SECTIONS:\n if not self.has_section(section):\n continue\n\n all_configs = self.keys(section)\n for key in all_configs:\n self.set('config',\n key,\n super().getraw(section, key))\n\n self._conf.remove_section(section)",
"def _parseConfig(config):\n options = {}\n for section in config.sections():\n options[section] = {}\n for item in config.items(section):\n options[section.lower()][item[0]] = item[1]\n return options",
"def normalize_discs(name: str, config: Config) -> str:\r\n\r\n for key, value in config.tags_disc_rename.items():\r\n if key in name:\r\n name = name.replace(key, value)\r\n\r\n return name",
"def _format_bases_config(bases_config: BasesConfiguration) -> str:\n return \"_\".join([_format_run_on_base(r) for r in bases_config.run_on])",
"def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs",
"def sort_sections(self):\n # longer sections should appear first because they'll be more\n # \"unique.\"\n #\n # For example: If the URL is \"lvrj.com/blogs/sherm\" and the config\n # file contains both \"lvrj.com/blogs/sherm\" and \"lvrj.com,\" we want\n # it to match with \"lvrj.com/blogs/sherm\" instead of just \"lvrj.com.\"\n for section in sorted(self.config.sections(), key=len, reverse=True):\n yield section",
"def test_normalize_configuration_dict_select_long_names_for_every_argument_in_config(base_config_parser):\n conf = load_configuration_from_json_file(file_name='basic_configuration_with_long_and_short_names.json')\n expected_conf = load_configuration_from_json_file(file_name='basic_configuration.json')\n expected_conf['argumento1'] = expected_conf.pop('arg1')\n expected_conf['arg5'] = conf['5']\n expected_conf['dded'] = expected_conf.pop('arg4')\n\n result = base_config_parser.normalize_configuration(conf=conf)\n\n assert result == expected_conf",
"def remove_copy_and_dev_suffixes_from_name(self):\n print('Removing _dev and _copy suffixes from name and display tags')\n if self.data['name']:\n self.data['name'] = self.data.get('name', '').replace('_copy', '').replace('_dev', '')\n if self.data.get('display'):\n self.data['display'] = self.data.get('display', '').replace('_copy', '').replace('_dev', '')",
"def _clean_backend_config(config):\r\n\r\n return dict([\r\n (key.lower(), val)\r\n for key, val in config.iteritems()\r\n ])",
"def section_name(name, n, prefix='py-{pid}'.format(pid=os.getpid())):\n return '.'.join(filter(bool, [prefix, name, str(n)]))",
"def suffixes(self):\n suffixes = []\n for constraint, suffix in self.conf.get(\"suffixes\", {}).items():\n if constraint in self.spec:\n suffixes.append(suffix)\n suffixes = list(dedupe(suffixes))\n if self.hash:\n suffixes.append(self.hash)\n return suffixes",
"def __check_short_section_name(self, name: str) -> None:\n if name in self.__short_section_names:\n logging.critical('Section abbreviation must be unique: %s already used: %s',\n name, self.__short_section_names)\n raise ValueError('Section abbreviation {0!s} is not unique'.format(name))\n else:\n self.__short_section_names.add(name)",
"def reverse_aliases():\n result = {}\n aliases = construct_aliases()\n for key in aliases:\n cat, idx = key.split(':')\n prp = ':'.join(aliases[key].split(':')[1:])\n # TODO TODO\n result[cat + '.' + prp] = cat + ':' + idx\n return result",
"def create_config(ta_map, aliasfile, newaliasfile):\n\n def config_split(l):\n k, v = l.split(\":\")\n return k, [x.strip() for x in v.split(\",\")]\n\n config_dict = defaultdict(set)\n\n with open(aliasfile, \"r\") as config:\n for l in config:\n k, v = config_split(l)\n config_dict[k] = v\n\n with open(newaliasfile, \"w\") as config:\n\n config.truncate()\n\n while ta_map:\n k, v = ta_map.popitem()\n\n if len(v) == 1:\n config.write(k + \":\\n\")\n else:\n k_decide_key = k[:]\n v_decide_key = set(v)\n ta_name, ta_aliases = decide_on_key(\n k_decide_key, v_decide_key, config_dict\n )\n config.write(\"{}: {}\\n\".format(ta_name, \",\".join(ta_aliases)))\n v.remove(k)\n for x in v:\n ta_map.pop(x)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The last value for the W array is correct
|
def test_W_end(self):
self.assertAlmostEqual(attempt.W[-1], 9.494852380803035)
|
[
"def new_w_vector():\n return [1] * 201",
"def null_w(self, t):\n\n B = self.B_w\n u = self.u_w(t)\n\n null = np.matmul(B, u)\n\n return np.reshape(null, (2 * self.vec_num_points, ))",
"def uw(self):\n return sm.unitvec(self.w)",
"def _build_n_w(self):\n\n F = np.zeros((2, self.vec_num_points))\n F[0, self.vec_num_points - 1] = 1\n F[1, self._num_points - 1] = 1\n\n B = self.A_22_w.copy()\n\n A = -self._gamma**2 * B\n A_inv = lin.inv(A)\n\n Nt = F * A_inv\n\n N = Nt.transpose()\n fill = np.zeros(N.shape)\n\n self.N_w = np.concatenate((N, fill), axis=0)",
"def trace_wib_index(self):\n if(self.det_WG <= 0 ):\n print(\"replacing with insignificant because |WG| <= 0\")\n return self.insignificant\n else:\n # print(\"------------T and |WG| > 0 ------------------\")\n return np.trace(np.linalg.inv(self.WG).dot(self.BG))",
"def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]",
"def _reset_w_vec( self ):\n self._reset_v_vec( 'w' )",
"def update_w(self):\n def update_single_w(i):\n \"\"\" compute single W[:,i] \"\"\"\n # optimize beta using qp solver from cvxopt\n FB = base.matrix(np.float64(np.dot(-self.data.T, W_hat[:,i])))\n be = solvers.qp(HB, FB, INQa, INQb, EQa, EQb)\n self.beta[i,:] = np.array(be['x']).reshape((1, self._num_samples))\n\n # float64 required for cvxopt\n HB = base.matrix(np.float64(np.dot(self.data[:,:].T, self.data[:,:])))\n EQb = base.matrix(1.0, (1, 1))\n W_hat = np.dot(self.data, pinv(self.H))\n INQa = base.matrix(-np.eye(self._num_samples))\n INQb = base.matrix(0.0, (self._num_samples, 1))\n EQa = base.matrix(1.0, (1, self._num_samples))\n\n for i in range(self._num_bases):\n update_single_w(i)\n\n self.W = np.dot(self.beta, self.data.T).T",
"def _build_f_plus_w(self):\n\n F = np.zeros((2, 2 * self.vec_num_points))\n\n F[0, self.vec_num_points - 1] = 1.0\n F[1, self._num_points - 1] = 1.0\n\n self.F_plus_w = F",
"def _build_init_w(self):\n\n vec0 = self.init_w0\n vec1 = self.init_w1\n\n self.init_w = np.concatenate((vec0, vec1))",
"def update_W(self):\n self.sigma_W = [\n np.linalg.inv(self.E_tau(m) * self.E_ZZ() + np.diag(self.alpha[m]))\n for m in range(self.groups)]\n self.m_W = [self.E_tau(m) * self.sigma_W[m] @ self.E_Z() @ self.X[m].T\n for m in range(self.groups)]",
"def initialize_w(self):\n\n\t\tself.W = np.random.random((self.X_dim, self._rank))",
"def get_main_array_bottom(self):\n return self.bitcell_array_inst.by()",
"def test_array_wind_components():\n speed = np.array([10,17,40,0])\n wdir = np.array([3,92,210,297])\n true_u = np.array([-0.523359, -16.98964, 20.0, 0.0])\n true_v = np.array([-9.986295, 0.593291, 34.641016, 0.0])\n u,v = get_wind_components(speed,wdir)\n assert_array_almost_equal(u,true_u,4)\n assert_array_almost_equal(v,true_v,4)",
"def _initialise_w_vec( self ):\n # check current value\n if hasattr( self, 'w_vec' ):\n if len( self.w_vec ) == self._required_w_vec_length:\n return\n else:\n del self.w_vec\n # source\n try:\n source = self.posterior_w\n except AttributeError:\n source = self.initial_conditions\n # reproject\n w_vec = self._reproject_to_w_vec( posterior=source )\n # get the last c value\n if len( self._posterior_history ) > 0:\n c = self._most_recent_c\n w_vec[ -self.M: ] = c\n # set it\n self.w_vec = w_vec",
"def _coupling_w(self, z, w):\n\n array = self.A_200_w\n\n z0, z1 = self.split_eqn(z)\n w0, w1 = self.split_eqn(w)\n\n coupling = np.zeros((self.vec_num_points, ))\n zeros = np.zeros((self.vec_num_points, ))\n\n for index, A_k in enumerate(array):\n temp = A_k * z0\n term = np.dot(w1, temp)\n coupling[index] = -term\n\n return np.concatenate((coupling, zeros))",
"def set_values(self): \n self.W_values = self.params_values[:self.n_in * self.n_out].reshape((self.n_in, self.n_out))\n self.b_values = self.params_values[self.n_in * self.n_out:].reshape((self.n_out,))",
"def energia(s,w):\n \n Energy = []\n #for over the signal in steps of len(w)\n #for n in range(0,len(s)-len(w),len(w)):\n for n in range(0,len(s)-len(w)):\n \n #print(n,':',n+len(w))\n #print(len(s))\n trama = s[n:n+len(w)] * w #actual windowed segment\n \n Energy.append(np.sum(trama**2))\n \n return np.array(Energy)",
"def _weichall():\n try:\n LAchall=LOSC('a').rchall();LBchall=LOSC('b').rchall();L2chall=LOSC('2').rchall();\n allwvfm=[*LAchall[:2],*LBchall,*L2chall];\n allenergy=[*EMeters.EG1wYFE1in(),*EMeters.EG1w2in()[0],*EMeters.EG()[0][0]]\n allweich=[]\n for ii in range(10):\n templistQ=allwvfm[ii]\n bkgrdbuffer=int(0.038*len(templistQ))\n bkgrdQ=np.mean(templistQ[:bkgrdbuffer])\n ensampQ=allenergy[ii]\n weightQ=ensampQ/np.sum(np.array(templistQ)-bkgrdQ)\n allweich.append(np.array(weightQ*(np.array(templistQ)-bkgrdQ)))\n return allweich\n except:\n print('Weighted waveform generation failed!')\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The last value for the Z array is correct
|
def test_Z_end(self):
self.assertAlmostEqual(attempt.Z[-1], 41.47999849170943)
|
[
"def get_z(self):\n return self._z",
"def z(self):\r\n return self.unif[2]",
"def relu(z):\r\n\r\n a = np.maximum(z, 0)\r\n\r\n return a",
"def z(self):\n return self.coords[2]",
"def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]",
"def de_addressing_array(Z):\n assert len(Z.shape) == 2\n \n if(Z.shape[0] == 1): return Z.copy()\n \n Zd = np.zeros(Z.shape, dtype=np.int)\n Zd[-1] = Z[-1]\n for i in range(-2,-Z.shape[0]-1,-1):\n Zd[i] = np.roll(Zd[i+1],-1)+Z[i]\n\n return Zd",
"def get_z(self) -> int:\n return self.__z",
"def z(self):\n return self.position[2]",
"def get_z_locations(self):\r\n zi = self.z0\r\n z = [zi]\r\n for i in range(self.nPlies()):\r\n t = self.Thickness(i)\r\n zi += t\r\n z.append(zi)\r\n return array(z)",
"def z_axis_reading(self):\n\n # output in 2s complement\n zH = self.single_access_read(0x2D)\n zL = self.single_access_read(0x2C)\n\n zTotal = self.twos_complement_conversion(zH, zL)\n\n #print (bin(zH),bin(zL),bin(zTotal), zTotal)\n\n return zTotal",
"def num_z(self):\n return self._xyzct['Z']",
"def ddz(self):\n partialZ = 1.j * self.bArr.reshape((1,self.bArr.size,1,1)) * self\n return partialZ",
"def maxfield(self):\n\t\tz=self.zValues()\n\t\twindow=max(z)-min(z)\n\t\ti_z= where(z>(0.8*window+min(z)))\n\t\tEy = self.field_on_axis_Ey()\n\t\tnew_z=take(z,i_z[0])\n\t\n\t\tnew_Ey = take(Ey,i_z[0])\n\t\tfor j in range(0,len(new_z)):\n\t\t\tif new_z[j]>0.:\n\t\t\t\ti=argmax(abs(new_Ey))\n\t\treturn new_z[i],z",
"def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64",
"def vec_z(self):\t\t\t\r\n if self.oz != 0:\r\n ov = self.oz\r\n lv = self.self.lz + self.oz\r\n else:\r\n ov = self.dz / 2\r\n lv = self.lz\r\n\r\n zv = \"\"\r\n for num in np.arange(ov, lv, self.dz):\r\n zv += str(num) + \" \"\r\n\r\n return zv",
"def get_z(self):\n return self.coords[2]",
"def z ( self ) :\n return self.zvar",
"def getZ(self):\n\t\treturn self.coords.z",
"def test_z_test(self):\r\n sample = array([1, 2, 3, 4, 5])\r\n self.assertFloatEqual(z_test(sample, 3, 1), (0, 1))\r\n self.assertFloatEqual(z_test(sample, 3, 2, 'high'), (0, 0.5))\r\n self.assertFloatEqual(z_test(sample, 3, 2, 'low'), (0, 0.5))\r\n # check that population mean and variance, and tails, can be set OK.\r\n self.assertFloatEqual(z_test(sample, 0, 1), (6.7082039324993694,\r\n 1.9703444711798951e-11))\r\n self.assertFloatEqual(z_test(sample, 1, 10), (0.44721359549995793,\r\n 0.65472084601857694))\r\n self.assertFloatEqual(z_test(sample, 1, 10, 'high'),\r\n (0.44721359549995793, 0.65472084601857694 / 2))\r\n self.assertFloatEqual(z_test(sample, 1, 10, 'low'),\r\n (0.44721359549995793, 1 - (0.65472084601857694 / 2)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The maxIndex variables are correct
|
def test_maxIndex(self):
self.assertEqual(attempt.maxIndexZ, 113)
self.assertEqual(attempt.maxIndexW, 134)
|
[
"def maxQualifiedIndex(self, indices):\n entry = self.getConfig()\n # the leader keep its own record updated to the newest\n indices[self.datacenter_id] = len(self.log) - 1\n # print('!!!!!', indices)\n if entry['config'] == 'single':\n return sorted([indices[x] for x in entry['data']])[(len(entry['data'])-1)/2]\n maxOld = sorted([indices[x] for x in entry['data'][0]])[(len(entry['data'][0])-1)/2]\n maxNew = sorted([indices[x] for x in entry['data'][1]])[(len(entry['data'][1])-1)/2]\n return min(maxOld, maxNew)",
"def max_index(self):\n return self.indices[-1] if len(self.indices) else -1",
"def index(self, value, max=False):\n if max:\n return len(self.nxdata)-len(self.nxdata[self.nxdata>=value])\n else:\n return len(self.nxdata[self.nxdata<value])",
"def get_max_index(a):\n return a.argmax()",
"def _findMaxIndex(data, mark):\n # assume the maximum value is at initial mark position\n maxIndex = mark\n # loop over the remaining positions greater than the mark\n for mark in range(mark+1, len(data)):\n # if a bigger value is found, record its index\n if data[mark][1][2] > data[maxIndex][1][2]:\n maxIndex = mark\n return maxIndex",
"def maxfield(self):\n\t\tz=self.zValues()\n\t\twindow=max(z)-min(z)\n\t\ti_z= where(z>(0.8*window+min(z)))\n\t\tEy = self.field_on_axis_Ey()\n\t\tnew_z=take(z,i_z[0])\n\t\n\t\tnew_Ey = take(Ey,i_z[0])\n\t\tfor j in range(0,len(new_z)):\n\t\t\tif new_z[j]>0.:\n\t\t\t\ti=argmax(abs(new_Ey))\n\t\treturn new_z[i],z",
"def storage_upper_bound(index):\n i = index[0]\n return storage_para[i].pmax",
"def get_index_max_value(tab):\n max_val = 0\n n = 0\n index = 0\n\n for i in tab:\n n+=1\n if i > max_val:\n max_val = i\n index = n\n\n return index",
"def get_max(self):",
"def maxAbsIndex(self):\n ax = abs(self.x)\n ay = abs(self.y)\n az = abs(self.z)\n\n if ax>=ay and ax>=az:\n return 0\n elif ay>=az:\n return 1\n else:\n return 2",
"def MAXINDEX(df, time_period=30):\n close = df['close']\n return talib.MAXINDEX(close, timeperiod=time_period)",
"def _get_end_index(self):\n return max(self.index + self.source_window,\n self._get_target_index() + self.target_window)",
"def find_max(self):\n treeMaxIdx = self.best_score_tree.argmax()\n # get the max in the penalized projection\n maxIdx = self.pen_projs_matrix[treeMaxIdx * self.scale /\n 2: (treeMaxIdx + 1) * self.scale / 2].argmax()\n \n# if maxIdx==0:\n# import matplotlib.pyplot as plt\n## plt.figure()\n## plt.plot(self.best_score_tree)\n### plt.show()\n# plt.figure()\n# plt.plot(self.pen_projs_matrix)\n# plt.plot(self.projs_matrix)\n# plt.show()\n \n self.maxIdx = maxIdx + treeMaxIdx * self.scale / 2\n self.max_value = self.projs_matrix[self.maxIdx]",
"def _arg_max(next_state):\n max_index_list = []\n max_value = next_state[0]\n for index, value in enumerate(next_state):\n if value > max_value:\n max_index_list.clear()\n max_value = value\n max_index_list.append(index)\n elif value == max_value:\n max_index_list.append(index)\n return random.choice(max_index_list)",
"def max(self) -> int:",
"def max_index(lst):\n mx = lst[0][0]\n mi = 0\n for i in range(len(lst)):\n if lst[i][0] > mx:\n mx = lst[i][0]\n mi = i\n return mi",
"def maxIndex(lst):\n return max(range(len(lst)), key=lst.__getitem__)",
"def max_level(self):\n return np.max(np.array(list(self.I), dtype=np.int8))",
"def find_longest_axis(self, vector):\n max_value_index = 0\n for i in range(1, 5):\n if abs(vector[i]) > abs(vector[max_value_index]):\n max_value_index = i\n return max_value_index"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks mount point is owned by swift
|
def is_ug_swift(d, r):
stats = os.stat(d.mount)
uid = stats.st_uid
gid = stats.st_gid
user = pwd.getpwuid(uid).pw_name
group = grp.getgrgid(gid).gr_name
if user == group == 'swift':
return True
else:
r.msgkey('user', user)
r.msgkey('group', group)
return False
|
[
"def is_mount_point(self):\n return os.path.ismount(self._full_name)",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def validate_permissions(pod_obj):\n\n cmd_output = pod_obj.exec_cmd_on_pod(command=\"ls -l /etc/healing-controller.d/\")\n cmd_output = cmd_output.split()\n assert \"root\" in cmd_output[4] and cmd_output[13], \"Owner is not set to root \"\n assert \"9999\" in cmd_output[5] and cmd_output[14], \"Owner group is not set to 9999\"\n log.info(\"FSGroup is correctly set on subPath volume for CephFS CSI \")",
"def must_be_owned(self):\n return True",
"def _mount_point_exists(self, mountpoint):\n cmd = ['dir', mountpoint]\n logger.debug('running command: %s' % (' '.join(cmd)))\n stdout, stderr, retval = self._run_cli_process(cmd)\n\n if not retval:\n logger.debug(\"mountpoint %s ready\" % mountpoint)\n else:\n logger.debug(\"mountpoint %s reported not ready with error '%s'\" %\n (mountpoint, stderr.strip()))\n\n return not retval",
"def mounted(self):\n return os.path.ismount(self.get(\"~mountpoint\", \"/\"))",
"def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner",
"def is_mountpoint(path: str) -> bool:\n mtpt = subprocess.run([\"mountpoint\", path], check=False, capture_output=True)\n return mtpt.returncode == 0",
"def is_owner(self):\n return self._is_owner",
"def test_manage_existing_good_uid_not_mapped(self):\n\n # Create a volume as a way of getting a vdisk created, and find out the\n # UID of that vdisk.\n _volume, uid = self._create_volume_and_return_uid('manage_test')\n\n # Descriptor of the Cinder volume that we want to own the vdisk\n # referenced by uid.\n new_volume = self._generate_vol_info(None, None)\n\n # Submit the request to manage it.\n ref = {'source-id': uid}\n size = self.driver.manage_existing_get_size(new_volume, ref)\n self.assertEqual(10, size)\n self.driver.manage_existing(new_volume, ref)\n\n # Assert that there is a disk named after the new volume that has the\n # ID that we passed in, indicating that the disk has been renamed.\n uid_of_new_volume = self._get_vdisk_uid(new_volume['name'])\n self.assertEqual(uid, uid_of_new_volume)",
"def test_mount_status_nas_share(self):\n pass",
"def is_owner(self):\n return Scopes.SCOPE_COMMANDS in self.scopes",
"def mounted(fs):\r\n return any(fs == entry.dev or fs == entry.point for entry in mount_table())",
"def is_developer_mounted(self) -> bool:\n return len(self.lookup()) > 0",
"def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True",
"def flash_writable(mntpoint):\n mounts = dict()\n with open('/proc/mounts','r') as f:\n for partition in f.readlines():\n mount = partition.split()[1]\n mode = partition.split()[3].split(',')[0]\n opts = partition.split()[3].split(',')\n mounts[mount] = mode\n if mounts.get(mntpoint, None):\n if mounts.get(mntpoint, None) == \"rw\":\n return True\n else:\n return False\n else:\n logger.error(\"root partition missing\")\n return False",
"def assert_same_owner(path):\n try:\n assert find_owner(path) == getuser(), f\"{path} must be owned by {getuser()}\"\n except AssertionError as error:\n raise click.UsageError(str(error))\n except FileNotFoundError:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks the relevant swift mount points and diskusage
|
def main():
results = []
results.extend(check_mounts())
results.extend(diskusage())
return results
|
[
"def _mock_disk_usage(self, blocks, avail, frsize=1024) -> None:\n mock_statvfs_patcher = patch(\"eden.cli.doctor.os.statvfs\")\n mock_statvfs = mock_statvfs_patcher.start()\n self.addCleanup(lambda: mock_statvfs.stop())\n statvfs_tuple = collections.namedtuple(\"statvfs\", \"f_blocks f_bavail f_frsize\")\n mock_statvfs.return_value = statvfs_tuple(blocks, avail, frsize)\n\n mock_getmountpt_and_deviceid_patcher = patch(\n \"eden.cli.doctor.check_filesystems.get_mountpt\"\n )\n mock_getmountpt_and_deviceid = mock_getmountpt_and_deviceid_patcher.start()\n self.addCleanup(lambda: mock_getmountpt_and_deviceid.stop())\n mock_getmountpt_and_deviceid.return_value = \"/\"",
"def check_disk_usage(disk):\n du = shutil.disk_usage(disk)\n free = du.free / du.total * 100\n return free > 20",
"def get_filesystem_used(**kwargs):\n disk = None\n try:\n disk = psutil.disk_usage(\"/var/k8s\")\n except:\n disk = psutil.disk_usage(\"/\")\n\n return disk.used",
"def getDiskUsage (self, disksSet = None):\n if disksSet == None:\n disksSet = self._disksSet\n\n if platform.system() == \"Linux\":\n \n totalBlocks = 0\n totalUsedBlocks = 0\n\n if len(disksSet)==0:\n return 0\n \n for disk in disksSet:\n\n if self._isPrediction is False:\n mediaRootDir = os.path.join(self._cfg.mediaBaseDir, \"%02u\" % disk)\n actualPath = os.path.realpath(mediaRootDir)\n # Get the statistics for the file system\n statistics = os.statvfs(actualPath)\n # Sum used and total blocks\n totalUsedBlocks += float(statistics[statvfs.F_BLOCKS]) - float(statistics[statvfs.F_BFREE])\n totalBlocks += float(statistics[statvfs.F_BLOCKS])\n else:\n\n diskData = self.getVirtualDiskData(disk)\n\n if diskData is not None:\n totalUsedBlocks += float(diskData[\"totalUsedBytes\"])\n totalBlocks += float(diskData[\"totalDiskBytes\"])\n else:\n totalBlocks+=1.0\n\n \n # Calculate the \"disk\" usage \n diskUsage = 100*(totalUsedBlocks/totalBlocks)\n\n else:\n self._logGeneral().error(\"not on linux. can't calculate disk usage\")\n raise Exception(\"platformError\",\"not linux\")\n \n self._logGeneral(\"disk-usage\").debug2(\"diskUsage=%s\", diskUsage)\n return diskUsage",
"def disk_info():\n for particion in psutil.disk_partitions():\n ocupacion = psutil.disk_usage(particion.mountpoint).percent\n print('device:{0:40} mount:{1:20} ocup:{2:6} tipo:{3:10} '\n 'ops:{4}'.format(particion.device,\n particion.mountpoint,\n ocupacion,\n particion.fstype,\n particion.opts))",
"def DiskUsage(cls):\n\t\t# >> df -iP\n\t\t# Sys. de fich. Inodes IUtil. ILib. IUti% Monte sur\n\t\t# /dev/sda1 915712 241790 673922 27% /\n\t\t# none 210977 788 210189 1% /dev\n\t\t# none 215028 19 215009 1% /dev/shm\n\t\t# none 215028 71 214957 1% /var/run\n\t\t# none 215028 2 215026 1% /var/lock\n\t\t# /dev/sda5 8364032 500833 7863199 6% /home\n\t\t# /home/sebastien/.Private 8364032 500833 7863199 6% /home/sebastien\n\t\tres = {}\n\t\tfor line in popen(\"df -kP\").split(\"\\n\")[1:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tsystem, inodes, used_inodes, free_inodes, usage, mount = line\n\t\t\ttry:\n\t\t\t\tusage = float(usage[:-1])\n\t\t\texcept ValueError:\n\t\t\t\tusage = 0\n\t\t\tres[mount] = float(usage) / 100.0\n\t\treturn res",
"def test_check_disk_space_sufficient(self):\n self.assertTrue(self.command.check_disk_space(1, self.temp_dir))\n self.assertTrue(self.command.check_disk_space(\n 1, self.temp_dir,\n label=\"Hello\", context=\"Contextual detail\", die=True))",
"def check_disk_space(self, required_disk_space, fs='/opt'):\n\n stats = admin_tasks.df_stats(fs)\n if stats:\n __, __, available = stats\n\n space_left = available - required_disk_space\n\n if space_left > 0.5:\n self.log.info(\"%.1fG of disk space is available from approximately %.1fG in %s\" %\n (required_disk_space, available, fs))\n elif space_left > 0 and space_left <= 0.5:\n self.log.warning(\"Low disk space. Only %.1fG will be free from approximately available space of %.1fG in %s.\" % (\n space_left, available, fs))\n else:\n self.log.error(\"Not enough disk space. %.1fG is not available from approximately avaiable space of %.1fG in %s.\" % (\n required_disk_space, available, fs))\n sys.exit(1)",
"def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False",
"def _get_drive_usage(path):\n if sys.version_info >= (3, 3):\n usage = shutil.disk_usage(path)\n return {\"total\": usage.total, \"used\": usage.used, \"free\": usage.free}\n if on_android():\n from jnius import autoclass\n\n StatFs = autoclass(\"android.os.StatFs\")\n AndroidString = autoclass(\"java.lang.String\")\n stats = StatFs(AndroidString(path))\n return {\n \"total\": stats.getBlockCountLong() * stats.getBlockSizeLong(),\n \"free\": stats.getAvailableBlocksLong() * stats.getBlockSizeLong(),\n }\n # with os.statvfs, we need to multiple block sizes by block counts to get bytes\n stats = os.statvfs(path)\n total = stats.f_frsize * stats.f_blocks\n free = stats.f_frsize * stats.f_bavail\n return {\"total\": total, \"free\": free, \"used\": total - free}",
"def test_903_disk_usage_action(self):\n u.log.info(\"Testing diskusage action\")\n action_id = u.run_action(self.swift_proxy_sentry, \"diskusage\")\n assert u.wait_on_action(action_id), \"diskusage action failed.\"\n\n u.log.info('OK')",
"def check_root_full():\n return check_disk_full(disk='/',min_gb=2,min_percent=10)",
"def _processing_mountpoints(\n self, location_subpath: Path, tools_configmaps: Dict[str, str]\n ):\n mount_points = [\n {\n \"name\": constants.PROCESSING_VOLUME_NAME,\n \"mountPath\": os.fspath(constants.PROCESSING_VOLUME),\n \"subPath\": os.fspath(location_subpath),\n \"readOnly\": False,\n },\n ]\n if constants.INPUTS_VOLUME_NAME in storage_settings.FLOW_VOLUMES:\n mount_points.append(\n {\n \"name\": constants.INPUTS_VOLUME_NAME,\n \"mountPath\": os.fspath(constants.INPUTS_VOLUME),\n \"readOnly\": False,\n }\n )\n mount_points += [\n {\n \"name\": connector.name,\n \"mountPath\": f\"/{storage_name}_{connector.name}\",\n \"readOnly\": storage_name != \"upload\",\n }\n for storage_name, connector in get_mountable_connectors()\n ]\n\n mount_points += [\n {\n \"name\": \"files-volume\",\n \"mountPath\": \"/etc/passwd\",\n \"subPath\": \"passwd\",\n },\n {\n \"name\": \"files-volume\",\n \"mountPath\": \"/etc/group\",\n \"subPath\": \"group\",\n },\n {\n \"name\": \"files-volume\",\n \"mountPath\": \"/socket_utils.py\",\n \"subPath\": \"socket-utils\",\n },\n {\n \"name\": \"files-volume\",\n \"mountPath\": \"/processing.py\",\n \"subPath\": \"startup-script\",\n },\n {\n \"name\": \"files-volume\",\n \"mountPath\": \"/constants.py\",\n \"subPath\": \"constants\",\n },\n {\n \"name\": \"files-volume\",\n \"mountPath\": f\"/{constants.BOOTSTRAP_PYTHON_RUNTIME}\",\n \"subPath\": \"bootstrap-python-runtime\",\n },\n {\n \"name\": \"files-volume\",\n \"mountPath\": \"/communicator.py\",\n \"subPath\": \"communicator\",\n },\n {\n \"name\": constants.SOCKETS_VOLUME_NAME,\n \"mountPath\": os.fspath(constants.SOCKETS_VOLUME),\n },\n {\n \"name\": constants.SECRETS_VOLUME_NAME,\n \"mountPath\": os.fspath(constants.SECRETS_VOLUME),\n \"readOnly\": True,\n },\n ]\n for tool_name in tools_configmaps:\n mount_points.append(\n {\n \"name\": f\"tools-{tool_name}\",\n \"mountPath\": f\"{self.tools_path_prefix / tool_name}\",\n }\n )\n return mount_points",
"def check_root_full():\r\n return check_disk_full(disk=\"/\",min_gb=2, min_percent=10)",
"def mounted(fs):\r\n return any(fs == entry.dev or fs == entry.point for entry in mount_table())",
"def check_root_full():\n return check_disk_full(disk=\"/\", min_gb=2, min_percent=10)",
"def get_mount_points():\n\n points = []\n t = subprocess.check_output(['mount'])\n t = t.decode()\n\n for line in t.splitlines():\n t = line.find('smbfs')\n if t < 0: continue\n b = line.find(' on ')\n points.append(line[b+4: t-2])\n # //share@win10.shared/storage on /Volumes/storage (smbfs, nodev, nosuid, mounted by ruan)\n return points",
"def test_009_fs(self):\n # stats_to_check = [ ]\n print('INFO: [TEST_009] Check FS stats')\n stats_grab = stats.get_plugin('fs').get_raw()\n self.assertTrue(type(stats_grab) is list, msg='FileSystem stats is not a list')\n print('INFO: FS stats: %s' % stats_grab)",
"def test_mount_status_nas_share(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sort variables based on their rank and shift. Note that this relies on all variables having a unique rank.
|
def sort_variables(variables):
return tuple(sorted(variables, key=lambda v: (v.rank, v.shift)))
|
[
"def sort(self, varnames):\n varnames = self._find_vars(varnames, unique=True, empty_ok=False)\n var_ind_list = list(map(self._varlist.index, varnames))\n new_srtlist = var_ind_list + [None]*(self._nvar - len(varnames))\n if self._srtlist == new_srtlist:\n return\n sort_key = lambda row: [row[i] for i in var_ind_list]\n self._varvals.sort(key = sort_key)\n self._srtlist = new_srtlist\n self._changed = True",
"def _sort_variables(self):\n logging.info('enter')\n self._variables_ordered_for_init = self._sort_variables_for(\n for_init=True)\n self._variables_ordered_for_step = self._sort_variables_for(\n for_init=False)\n self._is_ordered = True",
"def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()",
"def _sort(self):\n self.forecast=sorted(self.forecast,key=lambda x: -x[0])\n self.history=sorted(self.history,key=lambda x: -x[0])",
"def __crowdingDistanceSort(self):\n\n\t\t# Create a tuple which allows for sorting primarily on rank, followed by\n\t\t# crowding distance. The negative of the crowding distance is used to\n\t\t# ensure that less crowded regions are prefered.\n\t\trank_and_individual = [((ind.rank, -ind.crowdingDistance), ind) for ind in self.population]\n\t\trank_and_individual.sort()\n\n\t\t# Assign the sorted individuals to the population\n\t\tself.population = [x[1] for x in rank_and_individual]\n\n\t\treturn",
"def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populated NR as 0 - so we set rank as\n # 0 too\n row['_RANK'] = 0\n row['_NR'] = 0\n continue\n\n # Increment our count\n rank += 1\n if row['PERC1'] == prev_perc:\n row['NR'] = \"\"\n row['_NR'] = prev_rank # I.e. joint 6th will be 6 here\n row['_RANK'] = rank # I.e. joint 6th could be 7, or 8 etc. here\n else:\n row['NR'] = rank\n row['_NR'] = rank\n row['_RANK'] = rank\n prev_perc = row['PERC1']\n prev_rank = rank",
"def init_sorted_variables(self):\n variables_by_neighbors = [] # A list of (var_name, |neighbors|)\n for variable in self.var_names:\n variables_by_neighbors.append(\n (self.variables[variable].get_name(), len(self.variables[variable].get_neighbors())))\n\n # In this part we sort the variables according to the heuristic:\n variables_by_neighbors = sorted(variables_by_neighbors, key=lambda tup: tup[1], reverse=True)\n # (J) Notice that there can be many variables with same neighbour, thus the order between them isn't determined.\n self.sorted_variables = [*map(lambda x: x[0], variables_by_neighbors)]",
"def rearrangeMovieArray():\n # using lambda to sort by values of dict and return list \n new_ranked= sorted(movieViewCounts, key=lambda v:movieViewCounts[v], reverse=True)\n moviesRanked = new_ranked",
"def _ranking_sort_func(self, model, iter1, iter2):\n #print \"_sort_func()\"\n item1 = model.get_value(iter1, COL_ITEM)\n item2 = model.get_value(iter2, COL_ITEM)\n if item1 == None or item2 == None:\n return 0\n if item1.rank < item2.rank: return 1\n elif item1.rank > item2.rank: return -1\n else: return 0",
"def reorder(self):\n\t\tif self.N<2: return\n\t\tix = np.argsort(self.extern(self.P_w[0:self.N]))[::-1]\n\t\tself.P_w \t= self.intern(self.P_w.get()[ix])\n\t\tself.nj \t= self.intern(self.nj.get()[ix])\n\t\tself.logS \t= self.intern(self.logS.get()[ix])\n\t\tself.mu \t= self.intern(self.mu.get()[ix,])\n\t\tself.sigma \t= self.intern(self.sigma.get()[ix,])\n\t\tself.sigi \t= self.intern(self.sigi.get()[ix,])",
"def sorted_rank(self):\n\n sorted_players = sorted(self.players, key=itemgetter(\"Pairing number\"))\n\n return sorted_players",
"def rank_sort(first_restriction_code, second_restriction_code):\n rank_a = RESTRICTION_RANKS.get(first_restriction_code, 0)\n rank_b = RESTRICTION_RANKS.get(second_restriction_code, 0)\n\n if rank_b > rank_a:\n return 1\n elif rank_a > rank_b:\n return -1\n else:\n return 0",
"def sort_by_reranker_scores(self):\n self.parses.sort(key=lambda parse: (parse.reranker_score,\n parse.parser_score),\n reverse=True)",
"def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()",
"def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored",
"def sort(self,*cols):\r\n for c in cols[::-1]:\r\n reverse = c[0] == \"-\"\r\n if reverse:\r\n v = c[1:]\r\n else:\r\n v = c\r\n self.data.sort(key = lambda x:x[self.col_to_location(v)],reverse=reverse)",
"def sort_data(self):\n\n # zips the game_list and game_Scores, sorts the result by scores, and then puts them back.\n self.game_list, self.game_scores = zip(*sorted(zip(self.game_list, self.game_scores), key=lambda pair: pair[1]))",
"def sort(self):\r\n\t\t\r\n\t\t# get variables, add i\r\n\t\tv = self.scan(p=False)\r\n\t\tv.append('i')\r\n\t\t\r\n\t\t# reverse so least weighted variables come first\r\n\t\tv.reverse()\r\n\t\t\r\n\t\t# assign a weight to each variable, based on position in list\r\n\t\tw = {}\r\n\t\tfor n,i in enumerate(v):\r\n\t\t\tw[i] = 1000 ** (n + 1)\r\n\t\t\t\r\n\t\t# assign score based on weights and exponents\r\n\t\ts = {}\r\n\t\tfor i in self:\r\n\t\t\t\r\n\t\t\t# sum weights\r\n\t\t\tc = 0\r\n\t\t\tfor k,j in i.items():\r\n\t\t\t\t\r\n\t\t\t\t# adjust weights based on exponent\r\n\t\t\t\tif k != 'i':\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j)\r\n\t\t\t\t\t\r\n\t\t\t\t# i is adjusted based on even or odd exponents\r\n\t\t\t\telse:\r\n\t\t\t\t\tc += w.get(k,0) * (100 + j % 2)\r\n\t\t\t\t\t\r\n\t\t\t# use score as key\r\n\t\t\ts[c] = i\r\n\t\t\t\t\r\n\t\t# sort keys largest to smallest\r\n\t\ty = s.keys()\r\n\t\ty.sort()\r\n\t\ty.reverse()\r\n\t\t\r\n\t\t# new term list\r\n\t\tn = [s[k] for k in y]\r\n\t\t\r\n\t\treturn Li(n,c=False)",
"def double_sort(data, last_var=0):\n \n # doing simply np.sort(np.sort(pairs, axis=1), axis=0)\n # would uncouple first and second elements of pairs\n # during the second sorting (axis=0)\n data = np.sort(data, axis=1)\n x_sort = np.argsort(data[:, 0])\n data = data[x_sort]\n \n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a set of criteria, find the matching variables(s).
|
def get_matching(variables, strict=True, single=True, **criteria):
matching = []
for var in variables:
for crit_name, crit_info in criteria.items():
if getattr(var, crit_name) == crit_info:
continue
else:
break
else:
matching.append(var)
if not matching and strict:
raise RuntimeError("No matching variables were found.")
if single:
if len(matching) > 1:
raise RuntimeError(
f"Expected to find 1 matching variable. Found '{matching}'."
)
if not matching:
return ()
return matching[0]
return tuple(matching)
|
[
"def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2:my_var_name.find(\",\")])\n v = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\",\")])\n t = int(my_var_name[my_var_name.rfind(\",\") + 1:-1])\n\n # print('%s = %f ' % (my_var_name, my_var_value))\n x_searchers[(s, v, t)] = my_var_value\n\n if t > t_max:\n t_max = t\n\n elif 'beta' in my_var_name and '_s' not in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember: b[0] is probability of capture\n v = int(my_var_name[5:my_var_name.find(\",\")])\n t = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\"]\")])\n b_target[(v, t)] = my_var_value\n\n # make sure x is binary\n x_searchers = enforce_binary(x_searchers, t_max)\n b_target = enforce_sum_1(b_target, t_max)\n\n # x_searchers[(s, v, t)] and b_target[(v, t)]\n return x_searchers, b_target",
"def _find_vars(self, varnames, unique=False, evars=False, all_ok=False, \n empty_ok=False, single=False):\n if isinstance(varnames, str):\n varnames = (varnames,)\n elif not isinstance(varnames, collections.Iterable):\n raise TypeError(\"variable names should be str or iterable of str\")\n \n # first split into list of single abbrevs per str\n split_names = []\n for name in varnames:\n if not isinstance(name, str):\n raise TypeError(\"must specify variables as string(s)\")\n split_names += name.split()\n nnames = len(split_names)\n \n # check for _all, check for proper usage, and return copy of varlist\n # if evars==False or ['_dta'] + varlist if evars==True\n all_specified = False\n if '_all' in split_names:\n if not all_ok:\n raise ValueError(\"\\\"_all\\\" not allowed in this context\")\n elif not nnames == 1:\n raise ValueError(\n \"\\\"_all\\\" may not be combined with other names\")\n all_specified = True\n all_names = (['_dta'] if evars else []) + list(self._varlist)\n nnames = len(all_names)\n \n # check that more than 0 names specified if empty_ok==False, and\n # ignore extras (with message) if single==True\n if not empty_ok and nnames == 0:\n raise ValueError(\"no variables specified\")\n if single and nnames > 1:\n if not self._quiet:\n smcl = \"{err}\" if IN_STATA else \"\"\n msg = smcl + \"only one {}varname allowed; ignoring the rest\"\n print(msg.format('e' if evars else ''))\n split_names = split_names[:1]\n \n # if all_specified, return aleady-constructed all_names\n if all_specified:\n return all_names\n \n # Create match list of [abbrev, match1, match2, ...].\n # The loops below identify when exact varname given, but that varname\n # happens to be abbreviation of other varnames.\n varlist = self._varlist\n matches = []\n append = matches.append\n if evars:\n for name in split_names:\n if name == \"_dta\":\n append([name, name])\n else:\n match = [var for var in varlist if var.startswith(name)]\n append([name, name] if name in match else [name] + match)\n else:\n for name in split_names:\n match = [var for var in varlist if var.startswith(name)]\n append([name, name] if name in match else [name] + match)\n \n # abbreviation was a good, unambiguous abbreviation if exactly\n # one match found, i.e. if the corresponding entry in -matches- \n # is [abbrev, match1]\n if not all(len(m) == 2 for m in matches):\n # there were unmatched or ambiguous abbreviations\n zeros = \" \".join([m[0] for m in matches if len(m) == 1])\n twos = \" \".join([m[0] for m in matches if len(m) >= 3])\n if zeros != \"\" and twos != \"\":\n msg = \"no variables found for {}; multiple found for {}\"\n raise ValueError(msg.format(zeros, twos))\n if zeros != \"\":\n raise ValueError(\n \"no variables found for {}\".format(zeros, twos))\n # if getting here, twos != \"\" and zeros == \"\"\n raise ValueError(\"multiple variables found for '{}'\".format(twos))\n \n if not unique:\n return [m[1] for m in matches]\n seen = set()\n # if name has not been encountered, add to list and set of encountered\n return [m[1] for m in matches \n if m[1] not in seen and not seen.add(m[1])]",
"def searchGroups(**criteria):",
"def basic_find_one_independent_choose(all_set_variables):\n task_list = []\n for choose_keyword in list(all_set_variables):\n # for choose_keyword, set_vars in six.iteritems(value):\n task_list.append(choose_keyword)\n task_list = basic_add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def find_one_independent_choose(all_set_variables):\n task_list = []\n for key in all_set_variables:\n value = all_set_variables[key]\n choose_keywords = list(value)\n for choose_keyword in choose_keywords:\n set_vars = value[choose_keyword]\n task_list.append((key, choose_keyword))\n task_list = add_more_important_tasks(\n choose_keyword, all_set_variables, task_list\n )\n logging.debug(task_list)\n return task_list[0]",
"def best_match(ds,base_cond,cond1,cond2,var_dict):\n \n idx = ds['cond'] == base_cond\n ds_base_cond = ds.sub(idx)\n \n idx = ds['cond'] == cond1\n ds_cond1 = ds.sub(idx)\n \n idx = ds['cond'] == cond2\n ds_cond2 = ds.sub(idx) \n \n idx = ds['cond'] == cond3\n ds_cond3 = ds.sub(idx)\n \n idx = ds['cond'] == cond4\n ds_cond4 = ds.sub(idx)\n \n pairs_w1 = []\n pairs_w1_m = []\n \n pairs_w2 = []\n pairs_w2_m = []\n \n pairs_w3 = []\n pairs_w3_m = []\n\n pairs_w4 = []\n pairs_w4_m = []\n \n full_set = []\n \n for i in range(0,len(ds_base_cond['Word'])):\n targ_surf_log = ds_base_cond['Log_Freq_HAL'][i]\n targ_surf_raw = ds_base_cond['surf_freq'][i]\n #targ_stem = ds_base_cond['free_stem_freq'][i]\n targ_RT = ds_base_cond['mean_RT'][i]\n targ_length = ds_base_cond['Length'][i]\n targ_suff = ds_base_cond['Suffix'][i]\n word = ds_base_cond['Word'][i]\n \n \n dses = (ds_cond1,ds_cond2,ds_cond3,ds_cond4)\n \n for ds_ in dses:\n booln = []\n \n for i in range(0,len(ds_['Word'])):\n surf_log1 = ds_['Log_Freq_HAL'][i]\n surf_raw1 = ds_['surf_freq'][i]\n #stem1 = ds_['free_stem_freq'][i]\n RT1 = ds_['mean_RT'][i]\n length1 = ds_['Length'][i]\n suff1 = ds_['Suffix'][i]\n word1 = ds_['Word'][i]\n \n \n if targ_suff == suff1 and abs(targ_surf_log - surf_log1) < lower_log_freq and abs(targ_surf_raw - surf_raw1) < lower_raw_freq and abs(targ_RT - RT1) < mean_RT and abs(targ_length - length1) < lower_length:\n # and abs(targ_stem - stem1 < 200) and \n \n if ds_ == ds_cond1:\n if word1 not in pairs_w1_m and word not in pairs_w1:\n pairs_w1.append(word)\n pairs_w1_m.append(word1)\n \n if ds_ == ds_cond2:\n if word1 not in pairs_w2_m and word not in pairs_w2:\n pairs_w2.append(word) \n pairs_w2_m.append(word1) \n \n if ds_ == ds_cond3:\n if word1 not in pairs_w3_m and word not in pairs_w3:\n pairs_w3.append(word) \n pairs_w3_m.append(word1)\n \n if ds_ == ds_cond4:\n if word1 not in pairs_w4_m and word not in pairs_w4:\n pairs_w4.append(word) \n pairs_w4_m.append(word1)\n \n booln.append(True)\n \n else:\n booln.append(False)\n \n ds_['booln'] = booln\n \n for i in range(0,len(ds_['Word'])):\n surf_log1 = ds_['Log_Freq_HAL'][i]\n surf_raw1 = ds_['surf_freq'][i]\n #stem1 = ds_['free_stem_freq'][i]\n RT1 = ds_['mean_RT'][i]\n length1 = ds_['Length'][i]\n suff1 = ds_['Suffix'][i]\n word1 = ds_['Word'][i]\n boo = ds_['booln'][i]\n \n if boo is False and abs(targ_surf_log - surf_log1) < upper_log_freq and abs(targ_surf_raw - surf_raw1) < upper_raw_freq and abs(targ_RT - RT1) < mean_RT and abs(targ_length - length1) < lower_length:\n \n if ds_ == ds_cond1:\n if word1 not in pairs_w1_m and word not in pairs_w1:\n pairs_w1.append(word)\n pairs_w1_m.append(word1)\n\n if ds_ == ds_cond2:\n if word1 not in pairs_w2_m and word not in pairs_w2:\n pairs_w2.append(word) \n pairs_w2_m.append(word1) \n \n if ds_ == ds_cond3:\n if word1 not in pairs_w3_m and word not in pairs_w3:\n pairs_w3.append(word) \n pairs_w3_m.append(word1)\n \n if ds_ == ds_cond4:\n if word1 not in pairs_w4_m and word not in pairs_w4:\n pairs_w4.append(word) \n pairs_w4_m.append(word1) \n \n if word in pairs_w1 and pairs_w2 and pairs_w3 and pairs_w4:\n full_set.append(word)\n \n cond1_match = dict(zip(pairs_w1,pairs_w1_m))\n cond2_match = dict(zip(pairs_w2,pairs_w2_m))\n cond3_match = dict(zip(pairs_w3,pairs_w3_m))\n cond4_match = dict(zip(pairs_w4,pairs_w4_m))\n\n print len(cond1_match), '/', len(ds_base_cond['Word'])\n print len(cond2_match), '/', len(ds_base_cond['Word'])\n print len(cond3_match), '/', len(ds_base_cond['Word'])\n print len(cond4_match), '/', len(ds_base_cond['Word'])\n \n cond1_match_nomatch = []\n cond2_match_nomatch = []\n cond3_match_nomatch = []\n cond4_match_nomatch = []\n \n for item in full_set:\n cond1_match_nomatch.append(cond1_match.get(item))\n cond2_match_nomatch.append(cond2_match.get(item))\n cond3_match_nomatch.append(cond3_match.get(item))\n cond4_match_nomatch.append(cond4_match.get(item))\n \n ds_match = Dataset()\n ds_match[base_cond] = Factor(full_set)\n ds_match[cond1] = Factor(cond1_match_nomatch) \n ds_match[cond2] = Factor(cond2_match_nomatch) \n ds_match[cond3] = Factor(cond3_match_nomatch) \n ds_match[cond4] = Factor(cond4_match_nomatch) \n \n return ds_match",
"def variables_match(regex, scope):\n # XXX We should prune the scope tree directly globbing the relevent part\n # of *regex* as necessary instead of flatting the whole scope tree\n # and iterating through the list to prune the variable list.\n results = []\n for path in paths_from_scope(scope):\n look = re.match(regex, path)\n if look:\n results += [Variable(path=path, is_leaf=path.endswith(NODE_SEP))]\n return results",
"def findatoms(self, **keywords):\n #atomlist = []\n #for i in range(self.natoms):\n # if self.data.field(field)[i] == name:\n # atomlist.append(i)\n\n # This code is to advance to a new, set-based method of\n # selecting atoms. I haven't switched to it yet.\n # This should take an intersection of all given conditions. In\n # order to do this, I will find a list of atoms which match each\n # condition, and then find the intersection of all of them.\n\n # Process natoms: if not given, natoms=self.natoms(), if given,\n # only iterate over those many atoms\n\n if keywords.has_key(\"natoms\"):\n natoms = keywords[\"natoms\"]\n del keywords[\"natoms\"]\n else:\n natoms = self._natoms\n\n lists_to_intersect = []\n for key,value in keywords.iteritems():\n # We want to be able to pick if we are finding an exact\n # match, or a range. If it is an exact match, then\n # \"value\" will be the element to match. If it is a range,\n # \"value\" should be a tuple containing (low, high),\n # inclusively.\n\n fiel = self.data[key]\n if type(value) == tuple and len(value) == 2: # we have a range match\n low = value[0]\n high = value[1]\n #new_list = [ i for i in range(natoms)\n # if ( self.data.field(key)[i] >= low and self.data.field(key)[i] <= high ) ]\n new_list = [ i for i in range(natoms)\n if ( fiel[i] >= low and fiel[i] <= high ) ]\n lists_to_intersect.append(new_list)\n elif type(value) == list: # We have a \"match any\" match\n # Here is the way it works:\n # - value contains a list of things to match\n # - \n # This could be optimized some...\n vals = {}\n for x in value:\n vals[x] = 1\n new_list = [ i for i in range(natoms)\n if fiel[i] in vals ]\n lists_to_intersect.append(new_list)\n\n \n else: # we have an exact match\n new_list = [ i for i in range(natoms)\n if (fiel[i] == value) ]\n lists_to_intersect.append(new_list)\n # End branch between exact and range matches.\n # this should be moved the built in `set`, implemented in C.\n # But it's only in python2.4.\n atomset = set(lists_to_intersect[0])\n for otherlist in lists_to_intersect[1:]:\n atomset &= set(otherlist)\n return atomset\n #return atomlist",
"def search_mel_variables(searchString=None, *args, **kwargs):\n\tfor var in sorted(mel.eval(\"env\")):\n\t\tif not searchString or var.lower().count(searchString):\n\t\t\tyield var",
"def _universal_query(self, universal_criteria):\n result_counts = defaultdict(lambda: 0)\n desired_names = [x[0] for x in universal_criteria]\n LOGGER.info('Finding Records where data in %s exist', desired_names)\n expected_result_count = len(universal_criteria)\n for query_table in DATA_TABLES:\n # We know that a single Record can never have more than one datum with\n # a given name, so all we need to get is count.\n query = (self.session.query(query_table.id, sqlalchemy.func.count(query_table.name))\n .filter(query_table.name.in_(desired_names))\n .group_by(query_table.id))\n for result in query:\n result_counts[result[0]] += result[1] # Add the number of found names\n for entry, val in six.iteritems(result_counts):\n if val == expected_result_count:\n yield entry",
"def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes",
"def solve(self, num_variables, clauses, repeats = None):\n if repeats is None:\n repeats = math.ceil(math.log2(num_variables))\n for _ in range(repeats):\n variables = [random.random() > 0.5 for _ in range(num_variables)]\n for _ in range(2 * num_variables ** 2):\n unsatisfied = []\n for lit_1, lit_2 in clauses:\n idx_1 = abs(lit_1) - 1\n idx_2 = abs(lit_2) - 1\n if lit_1 < 0:\n var_1 = not variables[idx_1]\n else:\n var_1 = variables[idx_1]\n if lit_2 < 0:\n var_2 = not variables[idx_2]\n else:\n var_2 = variables[idx_2]\n if not (var_1 or var_2):\n unsatisfied.append((idx_1, idx_2))\n if not unsatisfied:\n return variables\n to_flip = random.choice(random.choice(unsatisfied))\n variables[to_flip] = not variables[to_flip]\n return None",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def used_variables(*terms):\n\n t = terms[0] if len(terms) == 1 else terms\n\n if type(t) is Var:\n return frozenset((t,))\n\n elif type(t) in (tuple, Const, Apply, Eq, Ite, Not, And, Or,\n Implies, Iff):\n return union(*(used_variables(x) for x in t))\n\n elif type(t) in (ForAll, Exists, Lambda, NamedBinder):\n return union(used_variables(t.body), t.variables)\n\n elif hasattr(t,'args'):\n return union(*(used_variables(x) for x in t.args))\n\n else:\n assert False, type(t)",
"def find_direct_containing(rules, param):\n\n return_list = []\n for rule in rules:\n if param in rules[rule]:\n return_list.append(rule)\n\n return return_list",
"def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates",
"def find_actor_variants_in_argu(actors, argu):\n founds = {}\n for actortype, defined_actors in actors.items():\n for dblabel, variants in defined_actors.items():\n for variant in variants:\n mymatch = re.search(re.compile(\n ut.ACTARG.format(ut.rgescape(variant)),\n re.I|re.U), argu)\n if mymatch:\n # need variant as key cos use it to search against\n # word-forms in the dep-based workflow below\n founds[variant] = (mymatch.start(), mymatch.end(),\n dblabel)\n #founds[dblabel] = (mymatch.start(), mymatch.end())\n return founds",
"def find_matching_varieties(self, a_variety, conditions=[], limit=None):\n\n var_conditions = [\n (\"name =\", a_variety.get_name()),\n ]\n var_conditions.extend(conditions)\n\n conditions = []\n values = []\n for cond, val in var_conditions:\n conditions.append(cond)\n values.append(val)\n\n row_list = []\n for table in self._db.variety_tables:\n rows = self._db.select(\n [\"id\", \"name\", \"hash\", \"content\"], [table], conditions, values, [\"id\"]\n )\n row_list.extend(rows)\n if not (limit is None) and len(row_list) >= limit:\n _logger.info(\"Limit for find_matching_resources reached\")\n break\n\n variety_list = []\n for row in row_list:\n translated_hash = list(row[\"hash\"][1:-2].split(\" \"))\n\n variety = Resource.Variety(\n content=row[\"content\"],\n hash=translated_hash,\n )\n variety.set_name(row[\"name\"])\n variety.set_id(row[\"id\"])\n variety_list.append(variety)\n return variety_list",
"def identify_variables(line):\n\n # list of match tuple\n matches = []\n # macthes $(variable) or $variable $( variable )\n reg = re.compile('\\$\\( *(\\w+) *\\)')\n iterator = reg.finditer(line);\n for match in iterator:\n # start and end position\n start, end = match.span()\n # raw name. Name without deleimiters\n raw_name = match.group()\n # name of the varialbe\n name = match.group(1)\n # start, name , variable tuple\n matches.append((start,end,raw_name,name))\n return matches"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Match variable to VariableFactory using rank, name, and units.
|
def match_factory(variable, factories):
if not isinstance(factories, tuple):
factories = (factories,)
for factory in factories:
if (
variable.rank == factory.rank
and variable.name == factory.name
and variable.units == factory.units
):
return True
return False
|
[
"def _variable_factory(self, var):\n try:\n variable = MOM6Variable(var, self.fh, **self.initializer)\n except TypeError:\n variable = self.fh.variables[var][:]\n return variable",
"def createVariable(self, name: unicode, offset: int, dataType: ghidra.program.model.data.DataType, source: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.listing.Variable:\n ...",
"def _find_or_create_variable(self, cname, vname, source):\n try:\n var = self.model.get_variable_by_name(cname, source.name)\n raise KeyError()\n except KeyError:\n # Have we created it already?\n try:\n var = self.model.get_variable_by_name(cname, vname)\n except KeyError:\n # Create it and add to model\n units = source.component.get_units_by_name(source.units)\n var = self.add_variable(cname, vname, units)\n return var",
"def Variable(name):\n placeholder_node = placeholder_op()\n placeholder_node.name = name\n return placeholder_node",
"def variable_instance(self, variable_name, treatment_name):\n # A more pythonic approach than checking for this known string?\n if variable_name == '__model__':\n return self._pseudo_variable\n else:\n return self.variable(variable_name).by_treatment(treatment_name)",
"def variable(variable_name, *args):\n logging.info('Creating variable %s', variable_name)\n return _parse_and_create(variable_name, VariableInstance, 'Variable', args)",
"def CreateFLV(self, variableName):\n self._variables[variableName] = FuzzyVariable()\n\n return self._variables[variableName]",
"def Variable(initial_value, name, trainable=None):\n raise NotImplementedError",
"def _variable(el):\n el = str(el).split('_')\n name = el[0]\n try:\n indices = el[1]\n except IndexError:\n # No indices\n indices = \"\"\n return Variable(name=name, indices=tuple(indices))",
"def variable(name):\n placeholder_node = placeholder_op() # 调用 call 函数\n placeholder_node.name = name\n return placeholder_node",
"def find_variable(self, standard_name=None, source_var=None,\n any_scope=True, clone=None,\n search_call_list=False, loop_subst=False):\n # First, see if the variable is already in our path\n srch_clist = search_call_list\n var = super().find_variable(standard_name=standard_name,\n source_var=source_var,\n any_scope=any_scope,\n clone=None,\n search_call_list=srch_clist,\n loop_subst=loop_subst)\n if var is None:\n # No dice? Check for a group variable which can be promoted\n if standard_name in self.__gvar_stdnames:\n group = self.__gvar_stdnames[standard_name]\n var = group.find_variable(standard_name=standard_name,\n source_var=source_var,\n any_scope=False,\n search_call_list=srch_clist,\n loop_subst=loop_subst)\n if var is not None:\n # Promote variable to suite level\n # Remove this entry to avoid looping back here\n del self.__gvar_stdnames[standard_name]\n # Let everyone know this is now a Suite variable\n var.source = ParseSource(_API_SOURCE_NAME,\n _API_SUITE_VAR_NAME,\n var.context)\n self.add_variable(var, self.__run_env)\n # Remove the variable from the group\n group.remove_variable(standard_name)\n else:\n emsg = (\"Group, {}, claimed it had created {} \"\n \"but variable was not found\")\n raise CCPPError(emsg.format(group.name, standard_name))\n # end if\n # end if\n # end if\n if (var is None) and (clone is not None):\n # Guess it is time to clone a different variable\n var = super().find_variable(standard_name=standard_name,\n source_var=source_var,\n any_scope=any_scope, clone=clone)\n # end if\n return var",
"def var(*args, **kwargs):\n return Variable(*args, **kwargs)",
"def get_var(my_vars: dict, name: str):\n desired_var = my_vars.get(name)\n if desired_var is not None:\n return desired_var\n else:\n var_names = 'x, y, alpha, beta, zeta, psi'\n print('No variable with this name, current model accepts only:' + var_names)\n return None",
"def create_variable() -> tf.Variable:",
"def _name_to_variable(self, name: str) -> Parameter:\n return cast(Parameter, super()._name_to_variable(name))",
"def get_variable(self, node, name, ty=None):\n if name in self.local_map:\n var = self.local_map[name]\n else:\n # Create a variable with the given name\n # TODO: for now i64 is assumed to be the only type!\n if ty is None:\n self.error(node, \"Undefined variable\")\n else:\n mem = self.emit(ir.Alloc(\"alloc_{}\".format(name), 8, 8))\n addr = self.emit(ir.AddressOf(mem, \"addr_{}\".format(name)))\n var = Var(addr, True, ty)\n self.local_map[name] = var\n return var",
"def var():\n\n return variable.Variable(\"generic_var\",template_units.kg_s,\"A generic var\")",
"def _variable(self, name, vars_set):\n if not re.match(r\"[_a-zA-Z][_a-zA-Z0-9]*$\", name):\n self._syntax_error(\"Not a valid name\", name)\n vars_set.add(name)",
"def __get_variable_from_dictionary(dictionary, variable_name):\n if variable_name not in dictionary.keys():\n dictionary[variable_name] = Variable(variable_name, None)\n return dictionary.get(variable_name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the lags for a given VariableFactory.
|
def get_variable_lags(var_factory):
if var_factory in shifted_variables:
return lags
return (0,)
|
[
"def lags(fs):\n\n return type(fs)(f.lag for f in fs)",
"def get_shifted_variables(var_factory):\n shifted = []\n for lag in get_variable_lags(var_factory):\n shifted.append(var_factory[lag])\n return tuple(shifted)",
"def get_valid_lags(binned_spiketrain_i, binned_spiketrain_j):\n\n bin_size = binned_spiketrain_i._bin_size\n\n # see cross_correlation_histogram for the examples\n if binned_spiketrain_i.n_bins < binned_spiketrain_j.n_bins:\n # ex. 1) lags range: [-2, 5] ms\n # ex. 2) lags range: [1, 2] ms\n left_edge = (binned_spiketrain_j._t_start -\n binned_spiketrain_i._t_start) / bin_size\n right_edge = (binned_spiketrain_j._t_stop -\n binned_spiketrain_i._t_stop) / bin_size\n else:\n # ex. 3) lags range: [-1, 3] ms\n left_edge = (binned_spiketrain_j._t_stop -\n binned_spiketrain_i._t_stop) / bin_size\n right_edge = (binned_spiketrain_j._t_start -\n binned_spiketrain_i._t_start) / bin_size\n right_edge = int(right_edge)\n left_edge = int(left_edge)\n lags = np.arange(left_edge, right_edge + 1, dtype=np.int32)\n\n return lags",
"def lags(self):\n lags = collections.defaultdict(list)\n for port in self.lacp_ports():\n lags[port.lacp].append(port)\n return lags",
"def _lags_num(self):\r\n \r\n r_new = np.zeros(10)\r\n # Step 1\r\n L = 1\r\n while True:\r\n\r\n # Step 2\r\n augmented = self.augmentation(L, self.data1.copy())\r\n\r\n # Step 3\r\n _, s_fault, _ = np.linalg.svd(augmented)\r\n\r\n # Step 4\r\n j = self.data1.shape[1]*L-1\r\n r = 0\r\n\r\n # Step 5 and 6, choose threshold 0.01\r\n while s_fault[j] < 0.01:\r\n j = j - 1\r\n r = r + 1\r\n\r\n # Step 7\r\n summa = 0\r\n for i in range(L):\r\n summa += (L-i+1)*r_new[i] \r\n r_new[L] = r - summa\r\n\r\n # Step 8\r\n if r_new[L] <= 0:\r\n break\r\n\r\n # Step 9\r\n L = L + 1\r\n return L, augmented",
"def lagged_features(df, lags):\n df_list = []\n for lag in lags:\n df_shifted = df.shift(lag)\n df_shifted.columns = [x + \"_lag\" + str(lag) for x in df_shifted.columns]\n df_list.append(df_shifted)\n fea = pd.concat(df_list, axis=1)\n return fea",
"def LDFlags(self):\n return self._g_linkflags",
"def user_iflags_prev(*args):\n return _ida_hexrays.user_iflags_prev(*args)",
"def get_feature_times_past(\n past_covariates: TimeSeries,\n past_covariates_lags: Sequence[int],\n ) -> pd.Index:\n times = past_covariates.time_index\n min_lag = -max(past_covariates_lags)\n # Add times after end of series for which we can create features:\n times = times.union(\n [times[-1] + i * past_covariates.freq for i in range(1, min_lag + 1)]\n )\n max_lag = -min(past_covariates_lags)\n times = times[max_lag:]\n return times",
"def get_user_iflags(self, *args):\n return _ida_hexrays.cfunc_t_get_user_iflags(self, *args)",
"def _add_lagged_features(self, X: pd.DataFrame, lags: list) -> pd.DataFrame:\n for l in lags:\n X[f'sales_lag_{l + self.shift_days}'] = (X[['id', 'sales', 'd']]\n .groupby('id')['sales']\n .transform(lambda x: x.shift(l + self.shift_days))\n .fillna(0))\n return X",
"def _get_lags_dict(self):\n lags_dict = {}\n for fcst_date in self.dates:\n day_of_year = self.calculate_day_of_year(fcst_date)\n for init_date in self.init_dates:\n lag = day_of_year - self.calculate_day_of_year(init_date)\n days_of_year = lags_dict.get(lag)\n if days_of_year:\n days_of_year.append(day_of_year)\n else:\n lags_dict[lag] = [day_of_year]\n \n return lags_dict",
"def test_lagf_gradient_2(self):\n np.random.seed(4897)\n n, dt = 8, 1.0\n t = np.arange(n, dtype=np.double)\n x = np.random.randn(n) + 4\n y = np.random.randn(n) + 4.5\n xe = x*0+1.0\n fqL = np.array([0.25,0.5])\n inpars = np.array([0.0, 1.0, 0.0, 0.1])\n x0 = np.array([0.1, 0.0, 1.0])\n ifunc = np.array([1,1,1,1], np.int32)\n p = plag._plag.lagf([t,t], [x,x], [xe,xe], dt, fqL, 0, 1, x0, x0, ifunc, 10)\n logLike1, g1, h = p.dLogLikelihood(inpars)\n \n from scipy.misc import derivative\n def fun(x, i, inp):\n pp = np.array(inp)\n pp[i] = x\n return p.logLikelihood(pp, 1, 0)\n g2 = [derivative(fun, inpars[i], 1e-4, 1, (i,inpars)) \n for i in range(4)]\n np.testing.assert_almost_equal(g1,g2, 6)",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def lag(f: feature, val: int = 1) -> feature:\n \n return feature(f.tag, f.val, f.lag + val)",
"def get_user_iflags(self, *args):\n return _ida_hexrays.cfuncptr_t_get_user_iflags(self, *args)",
"def test_feature_times_lags_range_idx(self):\n target = linear_timeseries(start=0, length=20, freq=2)\n # Expect same behaviour when training and predicting:\n for is_training in (False, True):\n for max_lags in (-1, -2, -3, -4, -5):\n feature_times = _get_feature_times(\n target_series=target,\n lags=[-1, max_lags],\n is_training=is_training,\n )\n assert feature_times[0][0] == target.start_time() + target.freq * abs(\n max_lags\n )",
"def create_lag_features(df, window):\n\n feature_cols = [\"air_temperature\", \"cloud_coverage\", \"dew_temperature\", \"precip_depth_1_hr\"]\n df_site = df.groupby(\"site_id\")\n\n df_rolled = df_site[feature_cols].rolling(window=window, min_periods=0)\n\n df_mean = df_rolled.mean().reset_index().astype(np.float16)\n df_median = df_rolled.median().reset_index().astype(np.float16)\n df_min = df_rolled.min().reset_index().astype(np.float16)\n df_max = df_rolled.max().reset_index().astype(np.float16)\n df_std = df_rolled.std().reset_index().astype(np.float16)\n df_skew = df_rolled.skew().reset_index().astype(np.float16)\n\n for feature in feature_cols:\n df[f\"{feature}_mean_lag{window}\"] = df_mean[feature]\n df[f\"{feature}_median_lag{window}\"] = df_median[feature]\n df[f\"{feature}_min_lag{window}\"] = df_min[feature]\n df[f\"{feature}_max_lag{window}\"] = df_max[feature]\n df[f\"{feature}_std_lag{window}\"] = df_std[feature]\n df[f\"{feature}_skew_lag{window}\"] = df_std[feature]\n\n return df",
"def test_feature_times_invalid_lag_values_error(self):\n series = linear_timeseries(start=1, length=3, freq=1)\n # `lags` not <= -1:\n with pytest.raises(ValueError) as err:\n _get_feature_times(target_series=series, lags=[0], is_training=False)\n assert (\n \"`lags` must be a `Sequence` containing only `int` values less than 0.\"\n ) == str(err.value)\n # `lags_past_covariates` not <= -1:\n with pytest.raises(ValueError) as err:\n _get_feature_times(\n past_covariates=series, lags_past_covariates=[0], is_training=False\n )\n assert (\n \"`lags_past_covariates` must be a `Sequence` containing only `int` values less than 0.\"\n ) == str(err.value)\n # `lags_future_covariates` can be positive, negative, and/or zero - no error should be thrown:\n _get_feature_times(\n future_covariates=series,\n lags_future_covariates=[-1, 0, 1],\n is_training=False,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all possible shifted variables given a VariableFactory.
|
def get_shifted_variables(var_factory):
shifted = []
for lag in get_variable_lags(var_factory):
shifted.append(var_factory[lag])
return tuple(shifted)
|
[
"def get_variable_lags(var_factory):\n if var_factory in shifted_variables:\n return lags\n return (0,)",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def basis(self, index, vars = None) :\n if index == self.__index :\n if vars is None :\n vars = range(self.__ngens)\n \n return [ tuple(i*[0] + [1] + (self.__ngens - i - 1)*[0])\n for i in vars ]\n else :\n return []",
"def reset_variables(n):\n return [None]*n",
"def collect_variables(self):\n variables = []\n for eq in self:\n variables.extend(eq.collect_variables())\n\n # Make the list items unique.\n variables = list(set(variables))\n\n return variables",
"def variables(self):\n for state in self.states:\n yield self.assert_state(state)\n yield self.deassert_state(state)",
"def _variable_iterator(self):\n return self._variables.values()",
"def get_all_variables(self):\n return []",
"def get_transform_vars(self):\n return [v for v in (self.rotation_vars + self.translation_vars)\n if isinstance(v, tf.Variable)]",
"def _get_dependent_variables(input_ops, output_ops):\n\n # avoids the edge-case when input_ops == output_ops.\n output_ops = nest.map_structure(gen_array_ops.identity, output_ops)\n inbetween_ops = op_selector.get_backward_walk_ops(\n seed_ops=output_ops,\n stop_at_ts=input_ops,\n inclusive=False,\n only_differentiable=True)\n var_ops = (op for op in inbetween_ops if op.type in VAR_OP_TYPES)\n var_names = (op.name for op in var_ops)\n tf_vars = (get_variable_by_name(var_name) for var_name in var_names)\n tf_vars = [v for v in tf_vars if v is not None]\n return tf_vars",
"def good_sets_forced_variables(self):\n def fanout_cmp(a,b):\n return cmp(fanout[b], fanout[a])\n interventions = []\n g = EssentialGraph.from_graph(self._adg.essential_graph())\n while g.vertices():\n # propagate any implied orientations\n g.resolve()\n\n # set of vertices adjacent to force variables\n blanket = set()\n\n # map from vertex to fanout\n fanout = {}\n\n # add another set of forced variables to the list\n interventions.append(set())\n\n # determine the fan-out of each node\n for v in g.vertices():\n f = len(g.neighbours(v))\n if f < 1:\n g.remove_vertex(v)\n continue\n fanout[v] = f\n\n # order nodes by their fan-out\n fanout_order = sorted(fanout.keys(), cmp=fanout_cmp)\n\n # intervene at the highest fan-out nodes first\n for v in fanout_order:\n # ... provided they are not adjacent to previous force\n # variables\n if v in blanket:\n continue\n interventions[-1].add(v)\n # update the adjacent nodes\n blanket |= g.neighbours(v)\n # remove this node so that it is not included in the next\n # intervention fan-out calculations\n g.remove_vertex(v)\n return interventions",
"def _get_variable_nodes(self):\n if self.variable_nodes == None:\n new_variables_nodes = []\n parents = list(self.roots)\n traversed_parents = []\n\n while len(parents) != 0:\n new_parents = []\n for parent in parents:\n if parent not in traversed_parents:\n new_variables_nodes.append(parent)\n\n for child in parent.children:\n if child not in new_parents:\n new_parents.append(child)\n\n traversed_parents.append(parent)\n parents = new_parents\n self.variable_nodes = new_variables_nodes\n\n\n return self.variable_nodes",
"def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name",
"def iterate_seeds(self):\n\n\t\tfor var in self._variables:\n\t\t\t# Reset derivatives\n\t\t\tfor v in self._variables:\n\t\t\t\tself._variables[v].set_derivative(0)\n\n\t\t\tif isinstance(self._variables[var].value(), np.ndarray):\n\t\t\t\tfor idx in self._variables[var].iterate_idxs():\n\t\t\t\t\tyield idx\n\t\t\telse:\n\t\t\t\tself._variables[var].set_derivative(1)\n\t\t\tyield var",
"def _shift_variables(self):\n new_expr = LpExpression(expression=copy.copy(self.lhs.expr))\n\n for var, coeff in self.rhs.expr.items():\n new_expr.expr[var] -= coeff\n\n if self.slack:\n new_expr.expr[self.slack_variable] = -1 if self.sense == 'leq' else 1\n\n const = self.rhs.const - self.lhs.const\n return new_expr, const",
"def getMaskVariables(self, product):\r\n mask_variable_names = self.getMaskVariableNames(product)\r\n mask_variables = [self.createMaskVariable(product, n) for n in mask_variable_names]\r\n mask_variables = [self.editMaskVariable(product, v) for v in mask_variables]\r\n\r\n return mask_variables",
"def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))",
"def get_multiple_variables(odb, inst_name, position, variables, step_numbers,\n increments=['0:-1'], tol=1.e-2):\n # Get node labels based on the positions\n node_label = get_node_labels(odb, inst_name, [position], tol)[0]\n node_spec = ((inst_name, (node_label,)),)\n \n # Translate the var list to the variable list format \n # requried by xyDataListFromField\n variable_list = get_variable_list(variables)\n \n # Set the active frames\n step_data, incr_data = set_active_frames(odb, step_numbers, increments)\n \n # Get xy_data_list\n # Need to set odb active, otherwise the xyDataListFromField will fail!\n viewport = session.viewports[session.viewports.keys()[0]]\n viewport.setValues(displayedObject=odb)\n xy_data_list = session.xyDataListFromField(odb=odb, \n outputPosition=NODAL,\n variable=variable_list, \n nodeLabels=node_spec)\n time_set = False\n node_data = {'step': step_data,\n 'incr': incr_data}\n\n for xy_data, variable in zip(xy_data_list, variables):\n data = np.array(xy_data.data)\n if not time_set:\n node_data['time'] = data[:,0]\n time_set = True\n node_data[variable] = data[:,1]\n \n return node_data",
"def get_variable_values(self, vars):\n raise NotImplementedError()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the path to our major ldso symlink. (Which allows us to change which ldso we are actively using without patching a bunch of binaries)
|
def ld_linux_path(root):
return os.path.join(root, 'lib', 'ld-linux-xpkg.so')
|
[
"def _find_ld_version():\n if sys.platform == 'darwin':\n return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION)\n else:\n return _find_exe_version('ld -v')",
"def get_linked_libpython():\n if is_windows():\n return\n libdl = ctypes.CDLL(ctypes.util.find_library(\"dl\"))\n libdl.dladdr.argtypes = [ctypes.c_void_p, ctypes.POINTER(_Dl_info)]\n libdl.dladdr.restype = ctypes.c_int\n\n dlinfo = _Dl_info()\n retcode = libdl.dladdr(\n ctypes.cast(ctypes.pythonapi.Py_GetVersion, ctypes.c_void_p),\n ctypes.pointer(dlinfo))\n if retcode == 0: # means error\n return\n path = os.path.realpath(dlinfo.dli_fname.decode())\n if path == os.path.realpath(sys.executable):\n return\n return path",
"def single_version_symlink_path(self):\r\n return os.path.join(self.doc_path, 'single_version')",
"def _GetLLVMSymbolizerPath():\n\n if GetHostArchFromPlatform() == 'x64':\n return X64_LLVM_SYMBOLIZER_PATH\n\n # Get distro codename from /etc/os-release.\n with open(os.path.join('/', 'etc', 'os-release')) as os_release_file:\n os_release_text = os_release_file.read()\n version_codename_re = r'^VERSION_CODENAME=(?P<codename>[\\w.-]+)$'\n match = re.search(version_codename_re, os_release_text, re.MULTILINE)\n codename = match.group('codename') if match else None\n\n if codename == 'xenial':\n return ARM64_XENIAL_LLVM_SYMBOLIZER_PATH\n elif codename == 'bionic':\n return ARM64_BIONIC_LLVM_SYMBOLIZER_PATH\n else:\n raise Exception('Unknown Ubuntu release \"%s\"' % codename)",
"def fixLDPath( root, ldpath, directory ):\n\n if os.path.exists( directory ):\n shutil.rmtree( directory )\n\n start = os.getcwd()\n os.mkdir( directory )\n os.chdir( directory )\n uniqueLD = uniquePath( ldpath )\n\n if DEBUG:\n print 'Unique LD LIBRARY PATH is:'\n print uniqueLD\n sys.stdout.flush()\n\n ldlist = string.split( uniqueLD, ':' )\n if DEBUG:\n print ''\n print 'LD List is:'\n print ldlist\n print ''\n sys.stdout.flush()\n\n for path in ldlist:\n if os.path.exists( path ):\n\n if DEBUG:\n print 'Searching for shared libraries in:'\n print path\n print '-----------------------------------------------'\n res = shellCall( 0, 'ls ' + path + '/*.so*' )\n if res['OK']:\n print res['Value']\n else:\n print res\n print '-----------------------------------------------'\n\n output = shellCall( 0, 'ls ' + path + '/*.so*' )\n #must be tidied for Windows (same below)\n\n if DEBUG:\n if not output['OK']:\n print '**************************'\n print 'Warning, problem with ls:'\n print output\n print '**************************'\n\n if not output['Value'][0]:\n ldlibs = output['Value'][1].split( '\\n' )\n for lib in ldlibs:\n if os.path.exists( lib ):\n filename = os.path.basename( lib )\n output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) )\n #N.B. for Windows this should be a copy...\n if DEBUG:\n if not output['OK']:\n print '********************************'\n print 'Warning, problem creating link:'\n print 'File: ', filename\n print 'Path: ', lib\n print output\n print '********************************'\n\n if DEBUG:\n print 'Searching for rootmap file in:'\n print path\n print '-----------------------------------------------'\n res = shellCall( 0, 'ls ' + path + '/*rootmap*' )\n if res['OK']:\n print res['Value']\n else:\n print res\n print '-----------------------------------------------'\n\n output = shellCall( 0, 'ls ' + path + '/*rootmap*' )\n\n if DEBUG:\n if not output['OK']:\n print '**************************'\n print 'Warning, problem with rootmap:'\n print output\n print '**************************'\n\n if not output['Value'][0]:\n ldlibs = output['Value'][1].split( '\\n' )\n for lib in ldlibs:\n if os.path.exists( lib ):\n if re.search( 'RELAX', lib ) is not None:\n filename = os.path.basename( lib )\n output = shellCall( 0, 'ln -s ' + str( lib ) + ' ' + str( filename ) )\n if DEBUG:\n if not output['OK']:\n print '********************************'\n print 'Warning, problem creating link:'\n print 'File: ', filename\n print 'Path: ', lib\n print output\n print '********************************'\n\n os.chdir( start )\n sys.stdout.flush()",
"def get_dlsym_offset():\n import ctypes\n libdl = ctypes.PyDLL('libdl.so')\n dlopen = ctypes.cast(libdl.dlopen, ctypes.c_void_p).value\n dlsym = ctypes.cast(libdl.dlsym, ctypes.c_void_p).value\n return dlsym - dlopen",
"def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]",
"def _build_so_path(libpath):\n # Assertions\n assert libpath.is_absolute(), \"Expected absolute library directory path, got \" + repr(str(libpath))\n # Path construction\n return libpath.parent / (libpath.name + \".so\") # Do not use 'with_suffix' since we want unconditional append",
"def _get_lsp_primary_path(self):\n return self.__lsp_primary_path",
"def libLineCalculator(siteSpecifics, mode, gcc_version):\n\n llp = os.environ.get('LD_LIBRARY_PATH','')\n if siteSpecifics.addToLibs():\n libLine = 'LD_LIBRARY_PATH='\n libLine += siteSpecifics.addToLibs() + siteSpecifics.pathToOracleInstantClient(gcc_version) + ':$LD_LIBRARY_PATH'\n else:\n libLine = ''\n\n return libLine",
"def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"",
"def ld_api_path(self):\n return self._ld_api_path",
"def find_lib_path(func):\n\n dl_info = DL_info() \n libdl.dladdr(func, ctypes.byref(dl_info))\n return dl_info.dli_fname",
"def symlink_single_version(version):\r\n default_version = version.project.default_version\r\n log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=default_version, msg=\"Symlinking single_version\"))\r\n\r\n # The single_version directory\r\n symlink = version.project.single_version_symlink_path()\r\n run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1]))\r\n\r\n # Where the actual docs live\r\n docs_dir = os.path.join(settings.DOCROOT, version.project.slug, 'rtd-builds', default_version)\r\n run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink))",
"def load_linux_so():\n shared_name = get_project_root() / \"build/libastyle.so\"\n\n shared = str(pl.Path(shared_name).absolute())\n # file_ = {f for f in pl.Path().iterdir() if f.name == shared_name}\n\n try:\n libc = cdll.LoadLibrary(shared)\n except OSError as err:\n # \"cannot open shared object file: No such file or directory\"\n print(err)\n raise FileNotFoundError(\"Cannot find \" + shared)\n return libc",
"def readLDLP():\t\n\tldlp = os.environ.get(\"LD_LIBRARY_PATH\")\n\tif ldlp==None:\n\t\tprint '$LD_LIBRARY_PATH is not defined - needed to pick the right lib version.'\n\t\tsys.exit(0)\n\treturn ldlp",
"def _so_symlinks(path):\n if not os.path.isdir(path):\n assert AssertionError(\"Failed to make so symlinks: path '%s' is not a directory.\", path)\n for dirent in os.listdir(path):\n fname = os.path.join(path, dirent)\n if os.path.isdir(fname) or os.path.islink(fname):\n continue\n m = re.match(r'(.+\\.so)\\.(\\d+)\\.(\\d+)\\.(\\d+)$', fname)\n if m:\n so,x,y,z = m.groups()\n symlink(fname, \"%s.%s.%s\" % (so, x, y))\n symlink(fname, \"%s.%s\" % (so, x))\n symlink(fname, so)",
"def get_lib_extension():\r\n if sys.platform == 'win32':\r\n return 'pyd'\r\n else:\r\n return 'so'",
"def _get_boot_path():\n boot_path = _get_default_kernel()\n return boot_path[: boot_path.rindex(\"/\")] or \"/\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n)))
|
def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):
start_x = start.x
start_y = start.y
goal_x = goal.x
goal_y = goal.y
h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))
h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)
h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)
return h
|
[
"def dist_between(current, neighbor,d_diagnoal,d_straight):\n start_x = current.x\n start_y = current.y\n goal_x = neighbor.x\n goal_y = neighbor.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)\n return h",
"def diagonal_distance(current_x, current_y, goal_x, goal_y):\n return max(abs(current_x - goal_x), abs(current_y - goal_y))",
"def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist",
"def diagonal_distance(a, b, D=1, D2=1):\r\n dx = abs(a[0] - b[0])\r\n dy = abs(a[1] - b[1])\r\n return D * (dx + dy) + (D2 - 2 * D) * min(dx, dy)",
"def _calculate_diag(self) -> np.ndarray:\n diags = np.zeros(self.n)\n for i in range(self.n):\n diags[i] = 1 / np.linalg.norm(self.X[:, i] - self.W @ self.H[:, i])\n return np.diag(diags)",
"def task6_diagonal(matrix):\n return np.diagonal(matrix)",
"def diagonal_distance(pa : Tuple[int, int], pb : Tuple[int, int]) -> int:\n (ax, ay) = pa\n (bx, by) = pb\n xdist = abs(ax - bx)\n ydist = abs(ay - by)\n dist = min(xdist, ydist) + abs(xdist - ydist)\n return dist",
"def dijkstra(self, start: int, goal: int):\n import heapq\n\n assert start < self.n_vertex\n assert goal < self.n_vertex\n\n visited = [False] * self.n_vertex\n\n q = [(0, start)]\n while q:\n w, v = heapq.heappop(q)\n if visited[v]:\n continue\n visited[v] = True\n\n if v == goal:\n return w\n\n for wn, vn in self.route[v]:\n heapq.heappush(q, (w + wn, vn))\n\n return float('inf')",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # Calc the absolute y distance\n dx = abs(x1 - x0) # CXalc the absolute x distance\n return dx == dy # They clash if dx == dy",
"def test_diagonalizing_gates(self):\n base = qml.Hadamard(0)\n diag_gate = Adjoint(base).diagonalizing_gates()[0]\n\n assert isinstance(diag_gate, qml.RY)\n assert qml.math.allclose(diag_gate.data[0], -np.pi / 4)",
"def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r",
"def mrr_diagonal(geom: base.BaseGeometry) -> float:\n if len(geom) <= 1:\n return 0\n if len(geom) == 2:\n return geo.distance( # type: ignore\n lat1=geom[0].y, lon1=geom[0].x, lat2=geom[1].y, lon2=geom[1].x\n )\n mrr = LineString(geom).minimum_rotated_rectangle\n if isinstance(mrr, Point):\n return 0\n try: # in most cases, mrr is a Polygon\n x, y = mrr.exterior.coords.xy\n except AttributeError: # then it should be a LineString\n p0, p1 = mrr.coords[0], mrr.coords[-1]\n return geo.distance(p0[1], p0[0], p1[1], p1[0]) # type: ignore\n return geo.distance(y[0], x[0], y[2], x[2]) # type: ignore",
"def share_diagonal(x0,y0,x1,y1):\r\n return abs(x0 - x1) == abs(y0 - y1)",
"def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))",
"def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0",
"def dijkstra(self, start: int, goal=None):\n import heapq\n\n assert start < self.n_vertex\n assert goal is None or goal < self.n_vertex\n\n visited = [False] * self.n_vertex\n if goal is None:\n distance = [float('inf')] * self.n_vertex\n distance[start] = 0\n\n q = [(0, 0, start)]\n while q:\n w, t, v = heapq.heappop(q)\n if visited[v]:\n continue\n visited[v] = True\n\n if goal is not None and v == goal:\n return (self.priority * w, t)\n\n for wn, tn, vn in self.route[v]:\n if goal is None:\n distance[vn] = min(distance[vn], self.priority * (w + wn))\n heapq.heappush(q, (w + wn, t + tn, vn))\n\n return (float('inf'), 0)",
"def diagonal(dims, dtype=np.float32):\n\n W = np.zeros(dims, dtype=dtype)\n m = (float(W.shape[1]-1) / float(W.shape[0]-1)) if float(W.shape[0]-1) > 0 else 1.\n \n for x in xrange(W.shape[0]):\n y = m*x\n W[x, np.floor(y)] = 1#(int(y)+1)-y\n W[x, min(np.ceil(y), W.shape[1]-1)] = 1#y-int(y)\n\n return W",
"def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full",
"def diagonal(self) -> Float[Array, \" N\"]:\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n)))
|
def dist_between(current, neighbor,d_diagnoal,d_straight):
start_x = current.x
start_y = current.y
goal_x = neighbor.x
goal_y = neighbor.y
h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))
h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)
h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)
return h
|
[
"def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):\n start_x = start.x\n start_y = start.y\n goal_x = goal.x\n goal_y = goal.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal)\n return h",
"def diagonal_distance(current_x, current_y, goal_x, goal_y):\n return max(abs(current_x - goal_x), abs(current_y - goal_y))",
"def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist",
"def diagonal_distance(a, b, D=1, D2=1):\r\n dx = abs(a[0] - b[0])\r\n dy = abs(a[1] - b[1])\r\n return D * (dx + dy) + (D2 - 2 * D) * min(dx, dy)",
"def _calculate_diag(self) -> np.ndarray:\n diags = np.zeros(self.n)\n for i in range(self.n):\n diags[i] = 1 / np.linalg.norm(self.X[:, i] - self.W @ self.H[:, i])\n return np.diag(diags)",
"def task6_diagonal(matrix):\n return np.diagonal(matrix)",
"def diagonal_distance(pa : Tuple[int, int], pb : Tuple[int, int]) -> int:\n (ax, ay) = pa\n (bx, by) = pb\n xdist = abs(ax - bx)\n ydist = abs(ay - by)\n dist = min(xdist, ydist) + abs(xdist - ydist)\n return dist",
"def dijkstra(self, start: int, goal: int):\n import heapq\n\n assert start < self.n_vertex\n assert goal < self.n_vertex\n\n visited = [False] * self.n_vertex\n\n q = [(0, start)]\n while q:\n w, v = heapq.heappop(q)\n if visited[v]:\n continue\n visited[v] = True\n\n if v == goal:\n return w\n\n for wn, vn in self.route[v]:\n heapq.heappush(q, (w + wn, vn))\n\n return float('inf')",
"def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # Calc the absolute y distance\n dx = abs(x1 - x0) # CXalc the absolute x distance\n return dx == dy # They clash if dx == dy",
"def test_diagonalizing_gates(self):\n base = qml.Hadamard(0)\n diag_gate = Adjoint(base).diagonalizing_gates()[0]\n\n assert isinstance(diag_gate, qml.RY)\n assert qml.math.allclose(diag_gate.data[0], -np.pi / 4)",
"def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r",
"def mrr_diagonal(geom: base.BaseGeometry) -> float:\n if len(geom) <= 1:\n return 0\n if len(geom) == 2:\n return geo.distance( # type: ignore\n lat1=geom[0].y, lon1=geom[0].x, lat2=geom[1].y, lon2=geom[1].x\n )\n mrr = LineString(geom).minimum_rotated_rectangle\n if isinstance(mrr, Point):\n return 0\n try: # in most cases, mrr is a Polygon\n x, y = mrr.exterior.coords.xy\n except AttributeError: # then it should be a LineString\n p0, p1 = mrr.coords[0], mrr.coords[-1]\n return geo.distance(p0[1], p0[0], p1[1], p1[0]) # type: ignore\n return geo.distance(y[0], x[0], y[2], x[2]) # type: ignore",
"def share_diagonal(x0,y0,x1,y1):\r\n return abs(x0 - x1) == abs(y0 - y1)",
"def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))",
"def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0",
"def dijkstra(self, start: int, goal=None):\n import heapq\n\n assert start < self.n_vertex\n assert goal is None or goal < self.n_vertex\n\n visited = [False] * self.n_vertex\n if goal is None:\n distance = [float('inf')] * self.n_vertex\n distance[start] = 0\n\n q = [(0, 0, start)]\n while q:\n w, t, v = heapq.heappop(q)\n if visited[v]:\n continue\n visited[v] = True\n\n if goal is not None and v == goal:\n return (self.priority * w, t)\n\n for wn, tn, vn in self.route[v]:\n if goal is None:\n distance[vn] = min(distance[vn], self.priority * (w + wn))\n heapq.heappush(q, (w + wn, t + tn, vn))\n\n return (float('inf'), 0)",
"def diagonal(dims, dtype=np.float32):\n\n W = np.zeros(dims, dtype=dtype)\n m = (float(W.shape[1]-1) / float(W.shape[0]-1)) if float(W.shape[0]-1) > 0 else 1.\n \n for x in xrange(W.shape[0]):\n y = m*x\n W[x, np.floor(y)] = 1#(int(y)+1)-y\n W[x, min(np.ceil(y), W.shape[1]-1)] = 1#y-int(y)\n\n return W",
"def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full",
"def diagonal(self) -> Float[Array, \" N\"]:\n raise NotImplementedError"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert the path in real to grid, e.g. 21 > 2.15 sx= ix reso + reso/2
|
def convertGridPathToReal(pathInGrid, sx, sy, gx, gy, grid_reso = 0.1):
pathInReal = (pathInGrid * grid_reso + grid_reso / 2)
stepNum = pathInReal.shape[1]
# Replace head and tail
pathInReal[:, 0] = [sx, sy]
pathInReal[:, 0] = [sx, sy]
pathInReal[:, stepNum - 1] = [gx, gy]
pathInReal[:, stepNum - 1] = [gx, gy]
return pathInReal
|
[
"def solve_path(grid: List, path: List[Step]):\n current_xy = (int(len(grid) / 2), int(len(grid) / 2))\n for step in path:\n\n x, y = current_xy\n direction = step[0]\n distance = int(step[1:4])\n print(f'Traveling {distance} steps {readable_direction(direction)} from (x = {x}, y = {y})', end='\\r')\n\n if direction == 'U':\n grid[x, y + 1:y + distance] = [cell + 1 for cell in grid[x, y + 1:y + distance]]\n\n current_xy = (x, y + distance)\n\n elif direction == 'D':\n grid[x, y - distance:y - 1] = [cell + 1 for cell in grid[x, y - distance:y - 1]]\n\n current_xy = (x, y - distance)\n\n elif direction == 'L':\n grid[x - distance:x - 1, y] = [cell + 1 for cell in grid[x - distance:x - 1, y]]\n\n current_xy = (x - distance, y)\n\n elif direction == 'R':\n grid[x + 1:x + distance, y] = [cell + 1 for cell in grid[x + 1:x + distance, y]]\n current_xy = (x + distance, y)\n return",
"def transform_grid(grid):\n transformed_grid = np.zeros(grid.shape, dtype='c')\n transformed_grid[grid == FREE] = \".\"\n transformed_grid[grid == OBSTACLE] = \"#\"\n transformed_grid[grid == ROBOT] = \"R\"\n transformed_grid[grid == OBJECT] = \"G\"\n\n return transformed_grid",
"def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)",
"def cell_path(self,i):\n cell_nodes = self.cell_to_nodes(i)\n cell_codes = np.ones(len(cell_nodes),np.int32)*Path.LINETO\n cell_codes[0] = Path.MOVETO \n cell_codes[-1] = Path.CLOSEPOLY\n return Path(self.nodes['x'][cell_nodes])",
"def adjust_grid(route, grid):\n\n for location in route:\n # If the position in the route is the end destination, dont make it a 1. \n if location == route[-1]:\n grid[location[0]][location[1]][location[2]] = 0\n\n # Else if the location on the grid is not a 1, make it one.\n elif grid[location[0]][location[1]][location[2]] == 0 or grid[location[0]][location[1]][location[2]] == 'x' or grid[location[0]][location[1]][location[2]] == 'y':\n grid[location[0]][location[1]][location[2]] = 1\n else:\n continue\n\n return grid",
"def translate_grid(lines, lcnt):\n\n while True:\n if not \"<grid>\" in lines[lcnt]:\n lcnt += 1\n continue\n lcnt += 1\n break\n\n txt = \"\"\n empty_line = None\n for l, line in enumerate(lines[lcnt:]):\n if \"</grid>\" in line:\n break\n line = line.replace(line.split()[1], \"x=\\\"%d\\\"\"\\\n % (int(line.split()[1].split('\"')[1]) + x_offset))\n line = line.replace(line.split()[2], \"y=\\\"%d\\\"\"\\\n % (int(line.split()[2].split('\"')[1]) + y_offset))\n txt += line\n if empty_line is None:\n empty_line = line\n\n if FPGA_SIZES is not None:\n dimensions = [tuple(get_fpga_dimensions(\"%s_W%d_H%d.xml\" % (args.arc.rsplit(\".xml\", 1)[0], s, s)))\n for s in FPGA_SIZES]\n total_w = sum([s[0] for s in dimensions])\n total_h = max([s[1] for s in dimensions])\n grid_w, grid_h = dimensions[FPGA_SIZES.index(int(filename.split(\"_H\")[1].split(\"_rr.xml\")[0]))]\n for y in range(grid_h, total_h):\n for x in range(0, grid_w):\n line = empty_line.replace(empty_line.split()[1], \"x=\\\"%d\\\"\"\\\n % (x + x_offset))\n line = line.replace(empty_line.split()[2], \"y=\\\"%d\\\"\" % y)\n txt += line\n\n return txt, lcnt + l + 1",
"def solutionPath2Labyrinth(self) -> None:\n for cell in self.stack.liste:\n cell.solutionMarker = self.calculateRect(cell)\n cell.solutionMarkerColor = SOLUTIONPATHCOLOR",
"def get_grid_size(self, ui, res_dir):\r\n print_it('determining grid size', PrintOpts.lvl1.value)\r\n self.sun.simple_clone()\r\n self.sun.clone.make_profile(PreSol.res_x.value, PreSol.res_y.value,\r\n self.init_force)\r\n self.planet.simple_clone()\r\n self.planet.clone.make_slave_to(self.sun.clone)\r\n\r\n init_displ = hertz_displ(self.sun.clone.e, self.planet.e,\r\n self.sun.clone.ny, self.planet.ny,\r\n self.sun.clone.r_hertz_x,\r\n self.sun.clone.r_hertz_y,\r\n self.planet.clone.r_hertz_x,\r\n self.planet.clone.r_hertz_y,\r\n self.sun.norm_forces[0])\r\n too_many_els_in_y = 1\r\n too_many_els_in_x = 1\r\n contact_width_y = 0.05\r\n contact_width_x = 0.05\r\n while too_many_els_in_y != 0 or \\\r\n too_many_els_in_x != 0:\r\n self.sun.clone.make_profile(self.sun.clone.res_x,\r\n self.sun.clone.res_y, self.init_force,\r\n contact_width=contact_width_y,\r\n contact_length=contact_width_x)\r\n self.planet.clone.make_slave_to(self.sun.clone)\r\n\r\n pressure, init_displ = \\\r\n pre_solve_half_space(self.sun.clone.profile,\r\n self.planet.clone.profile,\r\n self.sun.clone.x_axis,\r\n self.sun.clone.y_axis,\r\n self.sun.clone.res_x, self.sun.clone.res_y,\r\n self.sun.clone.delta_x,\r\n self.sun.clone.delta_y, self.sun.clone.e,\r\n self.planet.clone.e, self.sun.clone.ny,\r\n self.planet.clone.ny,\r\n self.sun.norm_forces[0],\r\n init_displ=init_displ, print_prog=False)\r\n\r\n pressure_els_y = sum(\r\n pressure[math.floor(self.sun.clone.res_y / 2), :] > 0)\r\n too_many_els_in_y = self.sun.clone.res_y - pressure_els_y - 2\r\n if too_many_els_in_y:\r\n contact_width_y += -np.sign(\r\n too_many_els_in_y) * contact_width_y / 25\r\n\r\n pressure_els_x = sum(\r\n pressure[:, math.floor(self.sun.clone.res_x / 2)] > 0)\r\n too_many_els_in_x = self.sun.clone.res_x - pressure_els_x - 2\r\n if too_many_els_in_x:\r\n contact_width_x += -np.sign(\r\n too_many_els_in_x) * contact_width_x / 25\r\n\r\n self.sun.make_profile(self.sun.res_x, self.sun.res_y, self.init_force,\r\n contact_width=contact_width_y,\r\n contact_length=contact_width_x)\r\n self.planet.make_slave_to(self.sun)\r\n return init_displ",
"def generate_all_locations(grid, shape):",
"def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")",
"def grid_numbering(n, x_0, y_0, x_1, y_1):\n \n if n == 0:\n return \"\"\n\n arg = complex_number(x_0 + 0.5 - x_1, y_0 + 0.5 - y_1).argument()\n\n if arg >= 0 and arg < np.pi / 2: \n x = \"1\"\n x_1 += 2 ** (n - 2)\n y_1 += 2 ** (n - 2)\n elif arg >= np.pi / 2 and arg <= np.pi:\n x = \"2\"\n x_1 -= 2 ** (n - 2)\n y_1 += 2 ** (n - 2)\n elif arg < 0 and arg >= -np.pi / 2:\n x = \"4\"\n x_1 += 2 ** (n - 2)\n y_1 -= 2 ** (n - 2)\n else:\n x = \"3\"\n x_1 -= 2 ** (n - 2)\n y_1 -= 2 ** (n - 2)\n\n return str(x) + grid_numbering(n - 1, x_0, y_0, x_1, y_1)",
"def _buildGridPoints(self):\n self.spacings = []\n for level in xrange(self.depth):\n levelSpacings = []\n refLevel = level + 1\n level = 2**level\n axisData = []\n for axis in self.size:\n spacing = axis / (level+1)\n levelSpacings.append(spacing)\n axisData.append([gridValue*spacing for gridValue in xrange(1, level+1)])\n pointList = [((i, j, k), np.array([axisData[0][i], axisData[1][j], axisData[2][k]]))\n for i in xrange(level)\n for j in xrange(level)\n for k in xrange(level)]\n self.grid[refLevel] = {point[0]: point[1] for point in pointList}\n self.spacings.append(levelSpacings)",
"def regex_grid(n):\n cx = 2 ** (n - 1)\n cy = 2 ** (n - 1)\n grid = [[grid_numbering(n, i , j, cx, cy) for i in range(2 ** n)] for j in range(2 ** n)]\n \n return grid",
"def reconstruct(ri, li, rs, v, x, y, phix, phiy):\n # TODO Add in option for masking the path-int B field.\n \n # Input variables.\n magnify = (rs + ri + .5*li)/(ri+.5*li)\n map_pot_x = np.copy(phix)\n map_pot_y = np.copy(phiy)\n plasma_x = np.copy(x)\n plasma_y = np.copy(y)\n \n # We multiply the whole expression by magnify to put the perp-deflection\n # fields into screen coordinates.\n wBx = magnify*(v/rs)*(map_pot_x - plasma_x)\n wBy = magnify*(v/rs)*(map_pot_y - plasma_y)\n \n return(wBx, wBy)",
"def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)",
"def easegrid(iopt, alat, alon, ascale):\n # ported from easegrid.m by JPB 21 Sept 2011\n pi2 = np.pi / 2.0\n dtr = pi2 / 90.0\n\n if iopt == 11: # ease grid north\n thelon = ascale * sin(alon * dtr) * sin(dtr * (45.0 - 0.5 * alat))\n thelat = ascale * cos(alon * dtr) * sin(dtr * (45.0 - 0.5 * alat))\n elif iopt == 12: # ease grid south\n thelon = ascale * sin(alon * dtr) * cos(dtr * (45.0 - 0.5 * alat))\n thelat = ascale * cos(alon * dtr) * cos(dtr * (45.0 - 0.5 * alat))\n elif iopt == 13: # ease cylindrical\n thelon = ascale * pi2 * alon * cos(30.0 * dtr) / 90.0\n thelat = ascale * sin(alat * dtr) / cos(30.0 * dtr)\n\n return thelon, thelat",
"def get_bandpath(unit_cell, path_string, special_points, n_points = 1000):\n\n # Get the reciprocal lattice\n bg = get_reciprocal_vectors(unit_cell) \n\n\n new_special_points = {x : np.array(special_points[x], dtype = np.double).dot(bg) for x in special_points}\n print(new_special_points)\n\n if len(path_string) < 2:\n raise ValueError(\"Error, at least 2 q points needs to be processed\")\n\n path_points = np.zeros((len(path_string), 3), dtype = np.double)\n for i, c in enumerate(path_string):\n path_points[i, :] = new_special_points[c]\n\n #print('BG:', bg * 2 * np.pi)\n #print('UC:', unit_cell)\n #print('SPECIAL POINTS:', {x : new_special_points[x] * 2 * np.pi for x in new_special_points})\n\n single_lenghts = np.linalg.norm(np.diff(path_points, axis = 0), axis = 1)\n total_lenght = np.sum(single_lenghts)\n\n xaxis = np.linspace(0, total_lenght, n_points)\n xticks = np.zeros(len(path_string))\n for i, ll in enumerate(single_lenghts):\n xticks[i+1] = xticks[i] + ll\n\n xlabels = [x.replace('G', r'$\\Gamma$') for x in path_string]\n\n\n q_path = np.zeros((n_points, 3), dtype = np.double)\n q_path[-1, :] = path_points[-1,:] # Set the starting point in the path\n dq = total_lenght / n_points\n counter = 0\n visited = []\n for i in range(1, n_points):\n \n # Identify in which line it is\n xval = xaxis[i]\n index = 0\n while xval >= single_lenghts[index] + __EPSILON__:\n\n print(xval, index, single_lenghts)\n xval -= single_lenghts[index]\n index += 1\n\n\n # If the line is changed, add a counter\n if not index in visited:\n visited.append(index)\n counter = 0\n else:\n counter += 1\n\n q_versor = (path_points[index+1,:] - path_points[index,:]) / single_lenghts[index]\n\n q_path[i-1, :] = path_points[index, :] + counter * dq * q_versor\n\n return q_path, (xaxis, xticks, xlabels)",
"def format_path(self, path):\n\t\thuman_readable_path = []\n\t\tfor pair in path:\n\t\t\tx1, y1 = self.list_to_grid_index(pair[0])\n\t\t\tx2, y2 = self.list_to_grid_index(pair[1])\n\n\t\t\tindex1 = \"Row {}, Col {}\".format(x1 + 1, y1 + 1)\n\t\t\tindex2 = \"Row {}, Col {}\".format(x2 + 1, y2 + 1)\n\n\t\t\thuman_readable_path.append((index1, index2))\n\n\t\treturn human_readable_path",
"def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale\n y_base = point[1] * scale + self.border * scale\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
One dimensional exponential cutoff power law derivative with respect to parameters
|
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
|
[
"def func_full_exp(x, c1, c2, c3, c4, c5, c6, c7):\n x = np.power(10, x)\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 = map(lambda y: 1 - y, b)\n a = np.array(a)\n b = np.array(b)\n b1 = 1.0 - b\n powerLaw = c3 * a * np.power(x, -c4) * (b1 + b * np.exp(-c7 * (x - c6)))\n #print thermalCore + powerLaw\n return np.log10(thermalCore + powerLaw)",
"def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)",
"def _de_exp_const_w(z,w):\n return np.log((z+1.)**(3.*(1.+w)))/3.",
"def dd_xpowalpha(cls,grid,alpha,cutoff=False):\n grid.l.info('bc.hom: Setting initial data to (-x)^alpha.')\n grid.l.debug('bc.hom: Parameters to dd_xpowalpha: alpha={},cutoff={}'.format(alpha,cutoff))\n if alpha is 0:\n def tmp(x): return float(x[1]<=0)\n return cls._tpl(grid, tmp) \n\n if cutoff:\n def tmp(x):\n return sum(pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n else:\n def tmp(x):\n return sum(pow(float(x[i]>=0)*x[i],alpha)-pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n return cls._tpl(grid, tmp)",
"def expdiff(x, a=a, n=5):\n return a**n * np.exp(a*x)",
"def fit_double_exp(x, y):\n time_ax = x\n spectrum = y\n def cost_func_double_exp(params):\n a = params[0]; k = params[1]; a2 = params[2]; k2 = params[3]; c = params[4]\n double_exp_model = double_exp(time_ax, a, k, a2, k2, c)\n return np.sum((spectrum - double_exp_model)**2)\n popt = differential_evolution(cost_func_double_exp,\n bounds=([-100,100],[-100, 100],[-200,200],[-100,100],[-200,200])).x\n return popt",
"def fit_exp_decay(x, y):\n def _func(z, z0):\n return np.exp(-z/z0)\n popt, pcov = curve_fit(_func, x, y)\n return popt[0]",
"def power_density(\n ebeam_energy=6, peak_field=1, undulator_n_period=100, k=1.5, sr_current=0.2\n):\n G = 2 * np.arctan(k * np.pi) / np.pi\n return 10.84 * ebeam_energy**4 * peak_field * undulator_n_period * sr_current * G",
"def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)",
"def elliptic_func(x):\n f = 0.0\n d = len(x)\n for i in range(d):\n f += np.power(10.0, 6.0 * i / (d - 1)) * x[i] ** 2\n return f",
"def epow(data,dt,epow=0,etpow=1,tmin=0):\n nt = data.shape[0]\n t_array = tmin+np.squeeze((dt*np.array([list(range(nt))])))\n etpowfac = t_array**etpow\n data = np.apply_along_axis(lambda m: np.multiply(m, np.exp(epow*etpowfac)), axis=0, arr=data)\n return data",
"def exponential_decay(value, max_val, half_life):\n\n return np.minimum(1.0, np.power(0.5, (max_val - value) / half_life))",
"def final_exponentiation(element, ec):\n return Fq12(ec.q, fq12_final_exp(element.ZT))",
"def __pow__(self, a: float) -> np.ndarray:\n return np.e**(a*self.logarithm)",
"def step_vdfdx_exponential(f, dt):\n\n return np.real(\n fft.ifft(np.exp(-1j * kx[:, None] * dt * v) * fft.fft(f, axis=0), axis=0)\n )",
"def specific_energy_consumption_deriv(self, increment_filter, k):\n self.jacobian[k, 4, 0] = -self.e.val\n # derivatives for variable P\n if self.P.is_var:\n self.jacobian[k, 5 + self.P.var_pos, 0] = 1\n # derivatives for variable e\n if self.e.is_var:\n self.jacobian[k, 5 + self.e.var_pos, 0] = -self.outl[2].m.val_SI",
"def get_est_exp_discount_function(self,params):\n params = params[0:5]\n df = pd.DataFrame(self.maturity.apply(lambda x: x ** i) for i in range(1, 6)).T\n df.columns = ['M1', 'M2', 'M3', 'M4', 'M5']\n return np.exp(df.dot(params))",
"def fixedPowerLaw(x, order, **kwargs):\n\n\n exponents = kwargs.pop('exponents', None)\n if exponents is None:\n raise ValueError(\"fixedPowerLaw requires list of expoenents passed as kwarg\")\n\n if not hasattr(exponents, \"__len__\"):\n exponents = [exponents]\n\n assert(order <= len(exponents))\n return x**exponents[order]",
"def exponential(self):\r\n if self.__E:\r\n return self.__E\r\n if self.zero != 0:\r\n raise ValueError(\"First term of exponentiated PowerSeries must be 0.\")\r\n def _e():\r\n for term in (E * self.derivative()).integral(Fraction(1, 1)):\r\n yield term\r\n E = self.__E = PowerSeries(_e)\r\n return E"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate strong password to add to csv file and clipboard.
|
def generate_pw():
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
password = ''.join(random.choice(chars) for i in range(16))
pyperclip.copy(password)
print('Password copied to clipboard.')
return password
|
[
"def generate_password(self):\n raise NotImplementedError()\n # self.passvoid = 'cde'",
"def generate_readback_password():\n # Generate 16 character random password.\n pw = ''.join(chr(random.randint(0, 255)) for i in range(8)).encode('hex')\n\n # Write password to secret file.\n with open('secret_build_output.txt', 'wb+') as secret_build_output:\n secret_build_output.write(pw)\n\n return pw",
"def passwordGen() :\n\treturn __randomString(12)",
"def generate_password():\n gen_pass = Credential.generate_password()\n return gen_pass",
"async def password_generate_complex(self, ctx):\n await ctx.send(\n \"\".join(\n random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))\n )\n )",
"def shuffle_pass(cls, p):\n password = ''.join(random.sample(p, len(p)))\n print(f\"Generated password is:{password}\")\n pyperclip.copy(password)\n print(f\"Your {len(password)} Digit Password is copied to clipboard!\")",
"def _random_password(self):\n return ''.join([\n random.choice(string.ascii_letters + string.digits)\n for _ in range(12)\n ])",
"def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)",
"def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))",
"def add_pass(input_master_pass, input_service, input_email, input_pass):\n f = load_key(input_master_pass)\n encrypted_service = str(f.encrypt(input_service), 'utf-8')\n encrypted_email = str(f.encrypt(input_email), 'utf-8')\n encrypted_pass = str(f.encrypt(input_pass), 'utf-8')\n with open('data.csv', mode='a', newline='') as data_file:\n csv_writer = csv.writer(data_file)\n csv_writer.writerow([encrypted_service, encrypted_email, encrypted_pass])\n print(\"Details Saved\\n\")\n data_file.close()",
"def generate_password_file(source_fn, dest_fn, rules=lambda word:true):\n filteredWords = filter_words_from_file(source_fn, rules);\n combos = combinations(filteredWords);\n writer = open(dest_fn, \"w\");\n for i in range(len(combos)):\n writer.write(combos[i][0] + \":\" + combos[i][1] + \"\\n\");\n writer.close();",
"def password_generate(self):\n password = ''\n password_base = self._password_base()\n for _ in range(self.size):\n password += choice(password_base)\n return password",
"def gen_secure_passwd(passwd_attributes):\n password = \"\"\n characters = string.ascii_letters + string.digits + string.punctuation\n completed = False\n\n # Loop through characters until password parameters have been satisfeid\n while not completed:\n password = \"\".join(secrets.choice(characters) for i in range(passwd_attributes[\"length\"]))\n # Check parameters are satisfied\n if ((len(password) == passwd_attributes[\"length\"])\n and (sum(x.isupper() for x in password)\n >= passwd_attributes[\"uppercase\"])\n and (sum(x.islower() for x in password)\n >= passwd_attributes[\"lowercase\"])\n and (sum(x.isdigit() for x in password)\n >= passwd_attributes[\"numbers\"])\n and (sum(x in string.punctuation for x in password)\n >= passwd_attributes[\"special\"])):\n completed = True\n else:\n password = password[1:]\n break\n # Output the generated password\n print(\"\\nSecure Password Generated: %s\" % password)\n input(\"\\nPress ENTER to proceed.\")",
"def random_password():\n pass_len = secrets.choice(range(32, 49))\n return ''.join(secrets.choice(string.printable)\n for _ in range(pass_len))",
"def password(words = word_list):\n p = _word_pair(words)\n s = _salt()\n return f\"{p[0]}{p[1]}{s}\"",
"def gen_password(length: int):\n letters: str = \"abcdefghijklmnopqrstuvwxyz\"\n numbers: str = \"1234567890\"\n symbols: str = \"!&^#*%$@\"\n\n _all = letters + letters.upper() + numbers + symbols\n passwd_len = validate_password_length(length)\n\n password = \"\".join(random.sample(_all, passwd_len))\n print(f\"you password is: {password}\")\n\n return",
"def generate_password(c, user=\"root\"):\n passw = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#xkcdpass\",\n \"--\",\n \"-d-\",\n \"-n3\",\n \"-C\",\n \"capitalize\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout.strip()\n hash = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#mkpasswd\",\n \"--\",\n \"-m\",\n \"sha-512\",\n \"-s\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n input=passw,\n ).stdout.strip()\n print(\"# Add the following secrets\")\n print(f\"{user}-password: {passw}\")\n print(f\"{user}-password-hash: {hash}\")",
"def generate_pw(self):\n\n chunks = []\n for chunk_no in range(self.CHUNKS):\n if chunk_no < self.chunk:\n chunks.append(self.verified_chunks[chunk_no])\n elif chunk_no == self.chunk:\n chunks.append(str(self.counter).zfill(self.PASSWORD_LENGTH /\n self.CHUNKS))\n else:\n chunks.append(\"000\")\n\n return \"\".join(chunks)",
"def create_password():\n generate_password = Credential.create_password()\n return generate_password"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add new account to pw.csv and generate a strong password.
|
def main(script):
try:
# ensure user entered account name and user name
account_name = sys.argv[1]
user_name = sys.argv[2]
except IndexError:
print('python add_pw.py [account name] [user name]')
else:
# read in csv file
pw_file = open('pw.csv')
pw_object = csv.reader(pw_file)
# ensure account does not already exist in pw.csv
for row in pw_object:
if row[0] == account_name:
print('Account already exists.')
break
# append account name, user name, and password generated by function
else:
with open('pw.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
password = generate_pw()
writer.writerow([account_name, user_name, password])
|
[
"def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)",
"def add_pass(input_master_pass, input_service, input_email, input_pass):\n f = load_key(input_master_pass)\n encrypted_service = str(f.encrypt(input_service), 'utf-8')\n encrypted_email = str(f.encrypt(input_email), 'utf-8')\n encrypted_pass = str(f.encrypt(input_pass), 'utf-8')\n with open('data.csv', mode='a', newline='') as data_file:\n csv_writer = csv.writer(data_file)\n csv_writer.writerow([encrypted_service, encrypted_email, encrypted_pass])\n print(\"Details Saved\\n\")\n data_file.close()",
"def generate_new_pass(self):\n # create password\n user_pass = uuid.uuid4().hex[:8]\n # hash password\n hashed_pass = sha256.hash(user_pass)\n self.password = hashed_pass\n db.session.merge(self)\n db.session.commit()\n current_app.logger.debug(\"Password for user {} has been changed into {}\"\n .format(self.id, user_pass))\n return user_pass",
"def save(passwords_loaded, account, username, password):\n if account is None:\n account = input('Account: ').lower()\n else:\n account = account[0].lower()\n\n if username is None:\n username = 'main'\n else:\n username = username[0].lower()\n\n if password is None:\n password = input('Password: ')\n else:\n password = password[0]\n\n if len(username) == 0:\n username = 'main'\n\n passwords_loaded[f'{account}-{username}'] = password \n if encrypt_and_save(passwords_loaded):\n print('Saved')\n else:\n print('An Error occured')",
"def new_account(firstname, lastname, pin):\n pass",
"def add_password(self, username, password):\n if not self._track:\n return\n key = self._create_key(username)\n llen = self._redis.lpush(key, self._create_bcrypt_hash(password))\n if llen > self.max_history:\n self._redis.ltrim(key, 0, self.max_history - 1)",
"def generate_new_account():\n private_key, public_address = account.generate_account()\n passphrase = mnemonic.from_private_key(private_key)\n print(\"Address: {}\\nPassphrase: \\\"{}\\\"\".format(public_address, passphrase))",
"def passwd(self, plaintext):\n self._password = bcrypt.generate_password_hash(plaintext.encode('utf8')).decode('utf8')",
"def addServiceAndPass(username, key):\n service = input(\"Enter the service name: \")\n while True:\n print(\"\\n1. Manually enter password\")\n print(\"2. Generate random password\")\n passInfo = input(\"Enter your choice (1/2): \")\n if passInfo == \"1\" or passInfo == \"2\":\n break\n columns = [i[1] for i in cur.execute(\"PRAGMA table_info(info)\")]\n\n if service not in columns:\n cur.execute(\"ALTER TABLE info ADD COLUMN '{}' TEXT\".format(service))\n conn.commit()\n\n cur.execute(\"SELECT COUNT({}) FROM info WHERE username = ?\".format(service), (username,))\n record = cur.fetchone()[0]\n\n if record == 0 and passInfo == \"2\":\n cur.execute(\"UPDATE info SET {} = '{}' WHERE username = ?\".format(service, generatePass(key)), (username,))\n conn.commit()\n\n if record == 0 and passInfo == \"1\":\n password = input(\"Enter your password: \")\n f = Fernet(key)\n token = f.encrypt(password.encode(\"utf-8\")).decode(\"utf-8\")\n cur.execute(\"UPDATE info SET {} = '{}' WHERE username = ?\".format(service, token), (username,))\n conn.commit()",
"def change_password(self):\n self.password = utils.generate_password()",
"def copy_password(account):\n return Attributes.duplicate_password(account)",
"def save_random(self, username):\n self.keys[username].key = pwgen(10, symbols=False)",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def create_account(self):\n pass",
"def do_new_account(args):\n password = get_password(args)\n retval, _ = geth_exec(args,\n 'personal.newAccount(\"%s\")' % (password,))\n args.nice = False\n exec_report(args, retval)",
"def test_set_password_account(self):\n pass",
"def generate_new_pw():\n sysrandom = random.SystemRandom() # Uses /dev/urandom\n return ''.join([sysrandom.choice(PASSWORD_CHARS) for _ in range(PASSWORD_LENGTH)])",
"def add_entry(hostname, port, database, username):\n prefix = \":\".join([hostname, port, database, username])\n with open(pass_file_path(), 'a+') as fs:\n for line in fs:\n if line.startswith(prefix):\n entries = line.split(\":\")\n return entries[4]\n\n # If here, the entry doesn't exist, append to the file:\n passw = create_password()\n passw = passw.strip()\n fs.write(\"{}:{}{}\".format(prefix, passw, os.linesep))\n return passw",
"def save_password(self):\n Credential.passwords.append(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Add Site Static Resource Directory
|
def addMobileStaticResourceDir(self, dir: str) -> None:
self.__rootMobileResource.addFileSystemRoot(dir)
|
[
"def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_dir):\n run(\"mkdir %s/static\" % project_name)",
"def glr_path_static():\n return os.path.join(base_path, \"static\")",
"def add_static_dir (self, www_path, local_path=None, relative=False):\n if not www_path.startswith('/'): www_path = '/' + www_path\n\n if local_path is None:\n local_path = www_path[1:]\n if relative:\n local_path = os.path.basename(local_path)\n if relative:\n import inspect\n path = inspect.stack()[1][1]\n path = os.path.dirname(path)\n local_path = os.path.join(path, local_path)\n\n local_path = os.path.abspath(local_path)\n\n log.debug(\"Serving %s at %s\", local_path, www_path)\n\n self.set_handler(www_path, StaticContentHandler,\n {'root':local_path}, True);",
"def copy_site_assets(self):\n if os.path.isdir(\"static\"):\n self.merge_dirs(\"static\", os.path.join(self.out_dir, \"static\"))",
"def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'",
"def __get_server_static__(app_path,static_dir):\n import os\n # from . import config_loader\n\n # root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n _path = (static_dir).replace(\"/\", os.path.sep)\n return os.sep.join([app_path, _path])",
"def _copy_static_dir(self):\n dst = os.path.join(os.getcwd(), '.pydy_viz')\n os.mkdir(dst)\n src = os.path.join(os.path.dirname(__file__), 'static')\n distutils.dir_util.copy_tree(src, dst)",
"def get_static_folder(self) -> str:",
"def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)",
"def collectstatic():\n public_dir = os.path.join(os.getcwd(), 'public')\n\n if os.path.isdir(public_dir):\n print('directory exists')\n else:\n os.mkdir(public_dir)\n\n local_static = os.path.join(os.getcwd(), 'folio', 'static')\n os.system(\"rsync -ruv --chmod=ug+w %s %s\" % (local_static, public_dir))",
"def static_dir(self):\n return os.path.join(self.app_dir, 'static')",
"def copy_static(self):\n try:\n shutil.copytree('template/static', 'public/static')\n except:\n print(\"Error copying static files \")",
"def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)",
"def copy_static(self, outdir):\n pass",
"def configure_static(self):\n self.static_asset_policy.add_static_view('websauna-static', 'websauna.system:static')",
"def get_swagger_static_root():\n return os.path.join(CURDIR, \"static\")",
"def update_static_publish_folder():\n if os.path.isdir(\"/var/new-www\"):\n shutil.rmtree(\"/var/new-www\")\n os.mkdir(\"/var/new-www\")\n shutil.copytree(os.path.join(CODE_DIR, \"static\"), \"/var/new-www/static\")\n shutil.copy2(\"/var/new-www/static/slides.html\", \"/var/new-www\" + SLIDES_PATH)\n if os.path.exists(\"/var/www\"):\n os.rename(\"/var/www\", \"/var/old-www\")\n os.rename(\"/var/new-www\", \"/var/www\")\n if os.path.exists(\"/var/old-www\"):\n shutil.rmtree(\"/var/old-www\")",
"def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))",
"def next_static(path):\n return send_from_directory('.next/static', path)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function is for viewing the plot of your cost history.
|
def plot_cost_history(alpha, cost_history):
cost_df = pandas.DataFrame({
'Cost_History': cost_history,
'Iteration': range(len(cost_history))
})
return ggplot(cost_df, aes('Iteration', 'Cost_History')) + geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha )
|
[
"def plot_history(self, cost_history):\n x_val = [i for i in range(len(cost_history))]\n fig, ax = plt.subplots()\n ax.plot(x_val, cost_history)\n ax.set_xlabel(\"Number of iterations\")\n ax.set_ylabel(\"Cost\")\n ax.set_title(\"Cost history of logistic regression\")\n plt.show()",
"def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()",
"def plot_cost_history(alpha, cost_history):\n cost_df = pandas.DataFrame({\n 'Cost_History': cost_history,\n 'Iteration': range(len(cost_history))\n })\n return ggplot(cost_df, aes('Iteration', 'Cost_History')) + \\\n geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha)",
"def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()",
"def _show_reward_history(self, hold_plot=False, \n filename='log/reward_history.png'):\n if self.graphing:\n fig = plt.figure(1)\n plt.plot(self.reward_steps, self.reward_history)\n plt.xlabel(\"time step\")\n plt.ylabel(\"average reward\")\n fig.show()\n fig.canvas.draw()\n plt.savefig(filename, format='png')\n if hold_plot:\n plt.show()\n return",
"def plot_training(self):\n fig = plt.figure(\"Training convergence\")\n plt.plot(self.cost_history)\n plt.title(\"Cost history\")\n plt.xlabel(\"nb of iterations\")\n plt.ylabel(\"Cost\")\n plt.show()",
"def plot_cost(doc: Dict):\n plt.plot(doc[\"cost\"])\n plt.xlabel(\"iterations (dozens)\")\n plt.ylabel(\"cost function\")\n plt.xlim((0, 1500))\n plt.title(\"cost function over iterations\")\n plt.show()",
"def plot_learning_curve(history):\r\n pd.DataFrame(history.history).plot(figsize=(8, 5))\r\n plt.grid(True)\r\n plt.gca().set_ylim(0, 1)\r\n plt.show()",
"def show_trajectory(self):\n X = [p[0] for p in self.history]\n Y = [p[1] for p in self.history]\n\n X.append(self.x)\n Y.append(self.y)\n\n plt.scatter(X,Y)\n plt.plot(X,Y)\n plt.show()",
"def show_trajectory(self):\n X = [p[0] for p in self.history]\n Y = [p[1] for p in self.history]\n \n X.append(self.x)\n Y.append(self.y)\n \n plt.scatter(X,Y)\n plt.plot(X,Y)\n plt.show()",
"def plot_history(H, epochs):\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.show()",
"def plot_history(data):\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=data.index, y=data[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_all_time.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_all_time.html'))\n fig.show()\n\n recent = data[:data.first_valid_index() - pd.Timedelta(weeks=52)]\n fig = go.Figure()\n for col in data.columns:\n fig.add_trace(go.Scatter(x=recent.index, y=recent[col], mode='lines', name=col))\n fig.update_xaxes(title_text=\"Time\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Share Price ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.2, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,)\n\n fig.write_image(join('..', 'docs', 'share_prices_past_year.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'share_prices_past_year.html'))\n fig.show()",
"def plot_history(his, metrics):\n for metric in metrics:\n plt.plot(his.history[metric], label=metric)\n plt.legend()",
"def show_model_effect(history):\n\n # summarize history for accuracy\n plt.plot(history.history[\"acc\"])\n plt.plot(history.history[\"val_acc\"])\n plt.title(\"Model accuracy\")\n plt.ylabel(\"accuracy\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(self.model_path+\"/Performance_accuracy.jpg\")\n\n # summarize history for loss\n plt.plot(history.history[\"loss\"])\n plt.plot(history.history[\"val_loss\"])\n plt.title(\"Model loss\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(self.model_path+\"/Performance_loss.jpg\")",
"def plot_opt_history(self, figsize=(15,5)):\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_style(style=\"darkgrid\")\n best_score_ls = []\n opt_df = pd.DataFrame(self.history_trials)\n for i, score in enumerate(opt_df.score_opt):\n if i == 0:\n best_score = score\n best_score_ls.append(score)\n else:\n if self.direction == 'maximize':\n if best_score < score:\n best_score = score\n best_score_ls.append(best_score)\n else:\n best_score_ls.append(best_score)\n else:\n if best_score > score:\n best_score = score\n best_score_ls.append(best_score)\n else:\n best_score_ls.append(best_score)\n\n opt_df['best_score'] = best_score_ls\n opt_df['Id'] = list(opt_df.index)\n\n plt.figure(figsize=figsize) \n points = plt.scatter(x=opt_df.Id, y=opt_df.score_opt, label='Iter Score',\n c=opt_df.score_opt, s=25, cmap=\"coolwarm\")\n plt.colorbar(points)\n plt.plot(opt_df.best_score, color='red', label='Best Score',)\n plt.xlabel(\"Iter\")\n plt.ylabel(\"Score\")\n plt.title('Plot optimization history')\n plt.legend()\n return(plt.show())",
"def statistics():\n dates = []\n costs = []\n pylab.xkcd()\n pylab.figure()\n for invoice in Invoice.get_all():\n dates.append(invoice.id)\n costs.append(invoice.amount)\n pylab.legend(('Invoice Amounts',))\n pylab.plot(dates, costs, marker='o', label='Charge')\n pylab.xlabel('Invoice Number')\n pylab.ylabel('Money')\n pylab.legend(loc=2)\n ax = pylab.axes()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.tick_params(axis=u'both', which=u'both', length=0)\n return pylab",
"def plotHistory(history):\n \n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(len(acc))\n \n # Make and save the plot for our accuracy\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n plt.savefig(\"trainValAccSecond.png\")\n\n # Make and save the plots for our loss \n plt.figure()\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show()\n plt.savefig(\"trainValLossSecond.png\")",
"def plot_loss(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Loss of the Model')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()",
"def history_visualization(history, save_path=\"./\", config=\"\", show_results=False):\n # Plot training & validation accuracy values\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n plt.savefig(save_path + config + \"_accuracy.svg\")\n if show_results:\n plt.show()\n\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n plt.savefig(save_path + config + \"_loss.svg\")\n if show_results:\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Searches inside the index for umbra3d
|
def search_umbra(text):
result = _search_blog('umbra3d', text)
_print_results(result)
return result
|
[
"def clustering_dbscan_o3d():\n pass",
"def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)",
"def search_index(\n client: Redis,\n exact: bool, \n embeddings_path: pathlib.Path, \n queries: int, \n batch_size: int,\n embedding_field_name : str,\n K: np.array,\n ):\n\n search_time = []\n\n nearest_neighbours = {}\n\n data_gen = get_embedding(embeddings_path, batch_size, queries)\n\n if exact : idx_name = \"Flat\"\n else : idx_name = \"HNSW\"\n\n for batch in tqdm.tqdm(data_gen):\n (embeddings_batch, external_id_batch) = batch\n\n for i in range(len(embeddings_batch)):\n query_vector = embeddings_batch[i].astype('float32')\n query_id = external_id_batch[i]\n\n assert len(redis_conn.execute_command(\"FT._LIST\")) == 1\n\n q = Query(f'*=>[KNN $k @{embedding_field_name} $vec_param AS dist]').paging(0,101).sort_by(f'dist')\n \n start_time = time.monotonic()\n res = client.ft(idx_name).search(q, query_params = {'k': int(K.max()+1),'vec_param': query_vector.tobytes()})\n search_time.append(time.monotonic() - start_time)\n\n query_id = str(query_id)\n nearest_neighbours[query_id] = {}\n\n nearest_neighbours[query_id][\"ids\"] = np.array([int(doc.external_id) for doc in res.docs])\n nearest_neighbours[query_id][\"distances\"] = np.array([float(doc.dist) for doc in res.docs]) \n\n return nearest_neighbours, np.mean(search_time)",
"def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)",
"def __getitem__(self, i):\n return _RMF_HDF5.DataSetIndex3D___getitem__(self, i)",
"def test_front_page_search_for_wiki(self):\n doc = document(\n title=u'How to fix your audio',\n locale=u'en-US',\n category=10)\n doc.save()\n\n doc.tags.add(u'desktop')\n\n rev = revision(\n document=doc,\n summary=u'Volume.',\n content=u'Turn up the volume.',\n is_approved=True)\n rev.save()\n\n self.refresh()\n\n # This is the search that you get when you start on the sumo\n # homepage and do a search from the box with two differences:\n # first, we do it in json since it's easier to deal with\n # testing-wise and second, we search for 'audio' since we have\n # data for that.\n response = self.localizing_client.get(reverse('search'), {\n 'q_tags': 'desktop', 'product': 'desktop', 'q': 'audio',\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['total'], 1)",
"def test_word_found_in_file(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'word_not_found')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"unit\")\n self.assertTrue(result != [])\n\n result = indexer.search(\"index\")\n self.assertTrue(result != [])\n print(result)",
"def album_search(query, term):\n table = Album.__table__\n search_statement = or_(\n table.c.name.ilike('%' + term + '%'),\n cast(table.c.release_date, Text).ilike('%' + term + '%'),\n table.c.image.ilike('%' + term + '%'),\n table.c.label.ilike('%' + term + '%'),\n table.c.tracks.ilike('%' + term + '%'),\n table.c.spotify_uri.ilike('%' + term + '%'),\n cast(table.c.id, Text).ilike('%' + term + '%'))\n return query.filter(search_statement)",
"def iou_3d(a: BBox3D, b: BBox3D):\n return jaccard_index_3d(a, b)",
"def find_art(self, artist, album, file=''):\n\n try:\n search_results = self.discogsclient.search(album+\" \"+artist, type='release', format='cd')\n # print(\"Discogs: find_art: for %30s/%s, pages > %s, releases %s\" % (artist, album, search_results.pages, len(search_results)))\n\n albums = []\n for release in search_results:\n a={}\n # m = self.discogsclient.master(release.id)\n # print(m.main_release.title)\n\n try:\n a['album'] = release.title\n a['match'] = ALBUM_MATCH_WEIGHT*fuzz.token_sort_ratio(a['album'], album)\n\n # print(release)\n\n for _artist in release.artists:\n # print(_artist.name)\n a['artist'] = _artist.name\n a['match'] += fuzz.token_sort_ratio(a['artist'], artist)\n\n\n for i in release.images:\n if 'type' in i and 'uri' and i['type'] == 'primary':\n a['uri'] = i['uri']\n except Exception as e:\n continue\n\n # print(a)\n if 'uri' in a: albums.append(a) #Dont bother if one could\n if len(albums) > ALBUM_LIMIT: break\n\n Discogs.match = 0\n for a in albums:\n if a['match'] > Discogs.match:\n best = a\n # print (\"Best so far:\", a)\n Discogs.match = best['match']\n\n if Discogs.match > MATCH_THRESHOLD:\n # print(\"Discogs: find_art: Image successfully found, match score was %d of %d, uri: %s\" % (best['match'], len(albums), best['uri']) )\n return best['uri']\n else:\n return False\n except:\n return False",
"def __le__(self, o):\n return _RMF_HDF5.DataSetIndex3D___le__(self, o)",
"def _load_bboxes_3d(self, results):\n results[\"gt_bboxes_3d\"] = results[\"ann_info\"][\"gt_bboxes_3d\"]\n results[\"bbox3d_fields\"].append(\"gt_bboxes_3d\")\n return results",
"def test_search_qlp():\n search = search_lightcurve(\"TIC 277554109\", author=\"QLP\", sector=11)\n assert len(search) == 1\n assert search.table[\"author\"][0] == \"QLP\"\n lc = search.download()\n assert type(lc).__name__ == \"TessLightCurve\"\n assert lc.sector == 11\n assert lc.author == \"QLP\"",
"def _search_py3_issues(self, full_name):\n open_issues = self.api.legacy.issues.search(full_name).open('python+3').GET().json()\n closed_issues = self.api.legacy.issues.search(full_name).closed('python+3').GET().json()\n return [{'state': issue['state'], 'title': issue['title'], 'html_url': issue['html_url']}\n for issue in chain(open_issues['issues'], closed_issues['issues'])]",
"def assets_search(ctx, text, pretty):\n ocean = ctx.obj['ocean']\n response = ocean.search(text, pretty)\n echo(response)",
"def getindexu(self,name,searchby='name'):\n name = name.replace(':','_').lower()\n result = []\n\n for (i,elem) in enumerate(self.lat):\n if fnmatch.fnmatch(elem[searchby],name):\n result.append(i)\n return result",
"def read_k3ds(content, name = None):\n ret_val = {}\n version = struct.unpack(\"f\", content[16:20])\n scale_x = struct.unpack(\"f\", content[20:24])\n scale_y = struct.unpack(\"f\", content[24:28])\n # notused1 = struct.unpack(\"f\", content[28:32])\n # notused2 = struct.unpack(\"f\", content[32:36])\n # notused3 = struct.unpack(\"h\", content[36:38])\n # notused4 = struct.unpack(\"h\", content[38:40])\n inspection_date = struct.unpack(\"ssssssssss\", content[40:50])\n inspection_time = struct.unpack(\"ssssssss\", content[50:58])\n board_name = struct.unpack(\"ssssssssssssssssssss\", content[58:78])\n # notused5 = struct.unpack(\"h\", content[78:80])\n # notused6 = struct.unpack(\"h\", content[80:82])\n # notused7 = struct.unpack(\"h\", content[82:84])\n # notused8 = struct.unpack(\"h\", content[84:86])\n image_width = struct.unpack(\"i\", content[86:90])\n image_height = struct.unpack(\"i\", content[90:94])\n kohyoung_id = struct.unpack(\"i\", content[94:98])\n pad_id = struct.unpack(\"f\", content[98:102])\n pad_type = struct.unpack(\"h\", content[102:104])\n # notused9 = struct.unpack(\"f\", content[104:108])\n result = struct.unpack(\"h\", content[108:110])\n volume_result = struct.unpack(\"f\", content[110:114])\n zmap_height = struct.unpack(\"f\", content[114:118])\n offset_x_result = struct.unpack(\"f\", content[118:122])\n offset_y_result = struct.unpack(\"f\", content[122:126])\n center_x_from_origin = struct.unpack(\"f\", content[126:130])\n center_y_from_origin = struct.unpack(\"f\", content[130:134])\n pad_size_width = struct.unpack(\"f\", content[134:138])\n pad_size_height = struct.unpack(\"f\", content[138:142])\n area_size = struct.unpack(\"f\", content[142:146])\n component_id = struct.unpack(\"ssssssssssssssssssss\", content[146:166])\n pin_number = struct.unpack(\"sssss\", content[166:171])\n panel_id = struct.unpack(\"h\", content[171:173])\n real_volume_result = struct.unpack(\"f\", content[173:177])\n real_area_result = struct.unpack(\"f\", content[177:181])\n pad_spec = struct.unpack(\"f\", content[181:185])\n # notused10 = struct.unpack(\"h\", content[185:187])\n # notused11 = struct.unpack(\"f\", content[187:191])\n # notused12 = struct.unpack(\"f\", content[191:195])\n # notused13 = struct.unpack(\"f\", content[195:199])\n # notused14 = struct.unpack(\"f\", content[199:203])\n # notused15 = struct.unpack(\"h\", content[203:205])\n stencil_height = struct.unpack(\"f\", content[205:209])\n # notused16 = struct.unpack(\"f\", content[209:213])\n # notused17 = struct.unpack(\"f\", content[213:217])\n extend_2d_width = struct.unpack(\"i\", content[217:221])\n extend_2d_height = struct.unpack(\"i\", content[221:225])\n roi_left = struct.unpack(\"i\", content[225:229])\n roi_top = struct.unpack(\"i\", content[229:233])\n roi_width = struct.unpack(\"i\", content[233:237])\n roi_height = struct.unpack(\"i\", content[237:241])\n if name is None:\n ret_val['name'] = (\"\".join(component_id)).rstrip(' \\t\\r\\n\\0')\n else:\n ret_val['name'] = name\n ret_val['version'] = version[0]\n ret_val['scale_x'] = scale_x[0]\n ret_val['scale_y'] = scale_y[0]\n ret_val['inspection_date'] = \"\".join(inspection_date).rstrip(' \\t\\r\\n\\0')\n ret_val['inspection_time'] = \"\".join(inspection_time).rstrip(' \\t\\r\\n\\0')\n ret_val['board_name'] = \"\".join(board_name)\n ret_val['image_width'] = image_width[0]\n ret_val['image_height'] = image_height[0]\n ret_val['kohyoung_id'] = kohyoung_id[0]\n ret_val['pad_id'] = pad_id[0]\n ret_val['pad_type'] = pad_type[0]\n ret_val['result'] = result[0]\n ret_val['volume_result'] = volume_result[0]\n ret_val['zmap_height'] = zmap_height[0]\n ret_val['offset_x_result'] = offset_x_result[0]\n ret_val['offset_y_result'] = offset_y_result[0]\n ret_val['center_x_from_origin'] = center_x_from_origin[0]\n ret_val['center_y_from_origin'] = center_y_from_origin[0]\n ret_val['pad_size_width'] = pad_size_width[0]\n ret_val['pad_size_height'] = pad_size_height[0]\n ret_val['area_size'] = area_size[0]\n ret_val['component_id'] = \"\".join(component_id).rstrip(' \\t\\r\\n\\0')\n ret_val['pin_number'] = \"\".join(pin_number).rstrip(' \\t\\r\\n\\0')\n ret_val['panel_id'] = panel_id[0]\n ret_val['real_volume_result'] = real_volume_result[0]\n ret_val['real_area_result'] = real_area_result[0]\n ret_val['pad_spec'] = pad_spec[0]\n ret_val['stencil_height'] = stencil_height[0]\n ret_val['extend_2d_width'] = extend_2d_width[0]\n ret_val['extend_2d_height'] = extend_2d_height[0]\n ret_val['roi_left'] = roi_left[0]\n ret_val['roi_top'] = roi_top[0]\n ret_val['roi_width'] = roi_width[0]\n ret_val['roi_height'] = roi_height[0]\n image_2d_size = (image_width[0] * image_height[0])\n if not 241 + image_2d_size < len(content):\n raise K3DException(\"K3d file invalid, not enough data to parse image\")\n image = (np.frombuffer(content[241:241 + image_2d_size], dtype=np.uint8))\n if image_height[0] * image_width[0] < 0:\n raise K3DException(\"K3d file invalid, image dimensions are negative\")\n try:\n image.resize((image_height[0], image_width[0]))\n except:\n raise K3DException(\"K3d file invalid, cannot resize the image\")\n ret_val['img_gray'] = image\n if not 241 + 1 + image_2d_size + 2 * image_2d_size < len(content):\n raise K3DException(\"K3d file invalid, not enough data to parse image\")\n\n image3d = np.frombuffer(content[241 + 1 + image_2d_size:241 + 1 + image_2d_size + 2 * image_2d_size], dtype=np.int16)\n image3d = image3d.reshape((image_height[0], image_width[0]))\n image3d = image3d.copy().astype(np.float16)\n ret_val['img_3d'] = image3d\n if version[0] > 3.0:\n offset = 241 + 1 + image_2d_size + 2 * image_2d_size + 82\n if not offset + image_2d_size < len(content):\n raise K3DException(\"K3d file invalid, not enough data to parse image\")\n image2dr = np.frombuffer(content[offset:offset + image_2d_size], dtype=np.uint8)\n offset += image_2d_size + 1\n if not offset + image_2d_size < len(content):\n raise K3DException(\"K3d file invalid, not enough data to parse image\")\n image2dg = np.frombuffer(content[offset:offset + image_2d_size], dtype=np.uint8)\n offset += image_2d_size + 1\n if not offset + image_2d_size < len(content):\n raise K3DException(\"K3d file invalid, not enough data to parse image\")\n image2db = np.frombuffer(content[offset:offset + image_2d_size], dtype=np.uint8)\n image2dr.resize((image_height[0], image_width[0]))\n image2dg.resize((image_height[0], image_width[0]))\n image2db.resize((image_height[0], image_width[0]))\n image_BGR = np.dstack((image2db, image2dg, image2dr))\n ret_val['img_bgr'] = image_BGR\n return ret_val",
"def test_search_gl_posting(self):\n pass",
"def test_get_records_3d(self):\n url = os.path.join(self.rest_url,\n 'compound/cid/2244/SDF?record_type=3d')\n ref = urllib2.urlopen(url).read()\n data = self.engine.get_records([2244], use_3d=True)\n assert self.identical_sdf(data, ref)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create an embed with the lyrics
|
def _lyrics_embed(colour, page: Dict[str, Any], data: Dict[str, Any]) -> discord.Embed:
title = [
x.get("value")
for x in data.get("names")
if x.get("language") == LANGUAGE_MAP.get(page["cultureCode"])
]
em = discord.Embed(
title=title[0] if title else data.get("defaultName"),
colour=colour,
)
em.set_thumbnail(url=data.get("thumbUrl") or "")
if data.get("id"):
em.url = f"https://vocadb.net/S/{data['id']}"
em.description = page["value"][:4090] if page.get("value") else "No lyrics found."
if page.get("url"):
em.add_field(
name="Source",
value=f"[{page.get('source') or 'Source'}]({page['url']})",
)
return em
|
[
"async def lyrics(self, ctx: commands.Context, *, song_name: str):\n try:\n client = await self.obtain_client()\n except AttributeError:\n await ctx.send(\"Not key for KSoft.Si has been set, ask owner to add a key.\")\n return\n try:\n music_lyrics = await client.music.lyrics(song_name)\n except ksoftapi.NoResults:\n await ctx.send(\"No lyrics were found for your music.\")\n return\n message, available_musics = await self._title_choose(music_lyrics)\n await ctx.maybe_send_embed(message)\n predicator = MessagePredicate.less(10, ctx)\n try:\n user_message = await self.bot.wait_for(\"message\", check=predicator, timeout=60)\n except Te:\n await ctx.send(\"It's so silent on the outside...\")\n return\n\n choosen_music = user_message.content\n if choosen_music not in available_musics:\n await ctx.send(\n \"I was unable to find the corresponding music in the available music list.\"\n )\n return\n music = available_musics[choosen_music]\n embeds = []\n embed = discord.Embed(color=await ctx.embed_color(), title=music.name, description=None)\n embed.set_thumbnail(url=music.album_art)\n embed.set_footer(text=\"Powered by KSoft.Si.\", icon_url=ctx.author.avatar_url)\n for text in pagify(music.lyrics):\n embed.description = text\n embeds.append(embed)\n create_task(menu(ctx, embeds, DEFAULT_CONTROLS)) # No await since max_concurrency is here",
"async def sample_embed(self, ctx: ct.ctxType):\n embed = discord.Embed(\n title=\"Sample Embed\",\n url=\"https://youtu.be/dQw4w9WgXcQ\",\n description=\"This is a sample embed.\",\n colour=discord.Colour.dark_blue(),\n )\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar.url)\n\n embed.set_thumbnail(url=ctx.author.avatar.url)\n\n embed.add_field(\n name=\"Field1\", value=\"Value under Field1, inline=False\", inline=False\n )\n embed.add_field(\n name=\"Field2\", value=\"Value under Field2, inline=True\", inline=True\n )\n embed.add_field(\n name=\"Field3\", value=\"Value under Field3, inline=True\", inline=True\n )\n\n embed.set_footer(\n text=f\"Requested by {ctx.author.name}\", icon_url=ctx.author.avatar.url\n )\n\n await ctx.reply(embed=embed)",
"async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e",
"async def lyrics(self, ctx, *args):\n state = self.get_state(ctx.guild)\n extract = Song_Lyrics(self.config[\"search_key\"], self.config[\"search_id\"])\n messages = []\n title = None\n lyrics = None\n if len(args) == 0: # now playing lyrics\n if ctx.voice_client is not None and ctx.voice_client.is_playing():\n playing = state.now_playing\n title, lyrics = extract.get_lyrics(playing.title)\n print(len(lyrics))\n print(lyrics)\n\n else:\n await ctx.send(\"Nothing is playing currently, add a song title to the command to search\")\n return\n else: # search lyrics\n song = utils.argument_concat(args)\n if utils.url_validation(song):\n await ctx.send(\"This doesn't take urls fam, just enter the title of the song\")\n return\n title, lyrics = extract.get_lyrics(song)\n message = title + \"\\n\" + lyrics\n if len(message) > 2000:\n while len(message) > 2000:\n index = 2000\n while message[index] != \"\\n\":\n index -= 1\n mes = message[:index]\n sage = message[index:]\n messages.append(mes)\n message = sage\n else:\n messages.append(message)\n for string in messages:\n await ctx.send(string)",
"def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed",
"def build_embed(clip_object, autoplay=False):\r\n url = build_url(clip_object, autoplay=autoplay)\r\n return \"<iframe frameborder=\\\"0\\\" scrolling=\\\"no\\\" marginheight=\\\"0\\\" marginwidth=\\\"0\\\"width=\\\"788.54\\\" height=\\\"443\\\" type=\\\"text/html\\\" src=\" + url + \"></iframe>\"",
"async def build_links_embed():\n embed = helper.embed_builder(\n bot.user.name,\n \"Hello, my friend! I am Valron. My wife has compiled a \"\n + \"list of helpful links for you.\",\n )\n embed.add_field(\n name=\"Invite me to your server with this link\",\n value=f\"[Click me!]({constants.DISCORD_INVITE_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"Find out what's new with me from the support discord server\",\n value=f\"[Click me!]({constants.SUPPORT_SERVER_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"See how I was made\",\n value=f\"[Click me!]({constants.GITHUB_LINK})\",\n inline=False,\n )\n embed.add_field(\n name=\"Want to support me and my wife?\",\n value=f\"Click any of these: [PayPal]({constants.PAYPAL_LINK}) \"\n + f\"| [Ko-Fi]({constants.KOFI_LINK}) | [GCash]({constants.GCASH_QR_CODE})\",\n inline=False,\n )\n embed.set_footer(\n text=\"/help - main help command\\n\"\n + \"/options - to see the list of supported classes and HP modifiers\"\n )\n\n return embed",
"def get_embed(self, show: Show) -> Embed:",
"def lyrics_plaintext(song):\n output = \"\"\n\n song = validate_song(song)\n\n output += song.default_arrangement\n output += \"\\n\\n\\n\\n\"\n output += song.composer\n output += \"\\n\"\n output += song.copyright\n output += \"\\n\\n\"\n\n for section, lyrics in song.lyrics.items():\n output += section\n output += \"\\n\"\n output += lyrics\n output += \"\\n\\n\"\n return output",
"def embed(title, image_url) -> discord.Embed:\n result = discord.Embed()\n result.title = title\n result.set_image(url=image_url)\n return result",
"async def new(self, ctx):\n author_id = ctx.message.author.id\n record = await Mongo.get_record('embed', 'embed_owner', author_id)\n if record is None:\n upg = {\n \"embed_owner\": author_id,\n \"author\": \"\",\n \"description\": \"\",\n \"field_1\": \"\",\n \"name_1\": \"\",\n \"field_2\": \"\",\n \"name_2\": \"\",\n \"field_3\": \"\",\n \"name_3\": \"\",\n \"field_4\": \"\",\n \"name_4\": \"\",\n \"set_image\": \"\",\n \"footer\": \"\"\n }\n await Mongo.record_insert('embed', upg)\n await ctx.send(\"Создание вашего личного embed успешно.:white_check_mark:\")\n else:\n await ctx.send(\"У вас уже есть свои личный embed\\nВы можете использовать 'em clear', для его очистки.\")",
"async def create_embed(self, author, author_message):\n embed = Embed(colour=author.color)\n\n if author_message.clean_content:\n embed.add_field(name=author.display_name, value=f\"{author_message.clean_content}\\n[[jump]]({author_message.jump_url})\")\n\n if author_message.attachments:\n for att in author_message.attachments:\n for ext in self.IMG_EXT:\n if ext in att.filename:\n break\n else:\n for ext in self.VIDEO_EXT:\n if ext in att.filename:\n embed.add_field(name=\"\\u200b\", value=f\"🎞️ {att.filename}\", inline=False)\n break\n else:\n embed.add_field(name=\"\\u200b\", value=f\"📁 {att.filename}\", inline=False)\n break\n break\n embed.set_image(url=f\"{att.url}\")\n\n if author_message.embeds and not author_message.attachments:\n for embed in author_message.embeds:\n embed.clear_fields()\n embed.set_image(url=\"\")\n embed.add_field(name=author.display_name, value=author_message.clean_content)\n\n embed.set_thumbnail(url=author.avatar_url_as(size=32, format='png'))\n\n if not author_message.clean_content:\n embed.add_field(name=\"\\u200b\", value=f\"[[jump]]({author_message.jump_url})\", inline=False)\n\n return embed",
"def quote_to_embed(self,result):\n thedate = datetime.date.fromtimestamp(result[3])\n thechannel = self.bot.get_channel(result[2])\n themember = thechannel.server.get_member(result[1])\n theauthor = themember.name\n if hasattr(themember, \"nick\"):\n if themember.nick is not None:\n theauthor = themember.nick\n embed = discord.Embed(title=\"Quote #{}\".format(result[4]), description=result[0])\n embed.set_author(name=theauthor, icon_url=themember.avatar_url)\n embed.set_footer(text=\"Saved on: {}\".format(thedate.strftime(\"%d %B %y\")))\n return embed",
"def generate_embed(self):\n doc_id = self.documentcloud_id\n doc_sidebar = str(self.sidebar).lower()\n style_embed = '<link rel=\"stylesheet\" type=\"text/css\" href=\"{css}\"></link>'.format(css=EMBED_CSS)\n iframe_embed = '<div><iframe class=\"docpubEmbed\" src=\"https://www.documentcloud.org/documents/{id}.html?sidebar={sidebar}\"></iframe></div>'.format(\n id=doc_id,\n sidebar=doc_sidebar\n ) # style=\"border:none;width:100%;height:500px\" # desktop height 930px, mobile height 500px\n self.embed_code = style_embed + iframe_embed",
"def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed",
"def help_lyrics(self):\n print_say(\"finds lyrics\\n\", self)\n print_say(\"the format is song,artist\\n\", self)\n print_say(\"song and artist are separated by a - \\n\", self)\n print_say(\"-- Example:\", self)\n print_say(\"\\tlyrics wonderful tonight-eric clapton\", self)",
"def embed(url, **options):\n wrapper = match_wrapper(url)\n if wrapper:\n return wrapper.render(wrapper.clean_url(url), options)\n return ''",
"def generate_music_info(tag_editor_context: dict) -> str:\n ctx = tag_editor_context\n\n return (\n f\"*🗣 Artist:* {ctx['artist'] if ctx['artist'] else '-'}\\n\"\n f\"*🎵 Title:* {ctx['title'] if ctx['title'] else '-'}\\n\"\n f\"*🎼 Album:* {ctx['album'] if ctx['album'] else '-'}\\n\"\n f\"*🎹 Genre:* {ctx['genre'] if ctx['genre'] else '-'}\\n\"\n f\"*📅 Year:* {ctx['year'] if ctx['year'] else '-'}\\n\"\n f\"*💿 Disk Number:* {ctx['disknumber'] if ctx['disknumber'] else '-'}\\n\"\n f\"*▶️ Track Number:* {ctx['tracknumber'] if ctx['tracknumber'] else '-'}\\n\"\n \"{}\\n\"\n )",
"def add_lyrics(self):\n\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n c.execute(\"SELECT songs.id, artist, title, url FROM songs LEFT JOIN lyrics ON songs.id = lyrics.song_id WHERE lyrics.song_id IS NULL\")\n all_songs_to_scrape = c.fetchall()\n for song in all_songs_to_scrape:\n song_id = song[0]\n song_artist = song[1]\n song_title = song[2]\n song_url = song[3]\n print(\"Looking for lyrics for \" + song_title + \" by \" + song_artist)\n try:\n lyrics = pygenius_songs.searchURL(song_url, 'lyrics')\n for lyric in lyrics:\n for line in lyric.split('\\n'):\n c.execute('INSERT INTO lyrics(song_id, line) VALUES (?,?)', (song_id, line))\n conn.commit()\n except Exception as e:\n print(e)\n print song_url\n print(\"Exception caught! ... continuing.\")\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fuse conv and bn into one module.
|
def _fuse_conv_bn(conv, bn):
conv_w = conv.weight
conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
bn.running_mean)
factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
conv.weight = nn.Parameter(conv_w *
factor.reshape([conv.out_channels, 1, 1, 1]))
conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
return conv
|
[
"def fuse_conv_bn(module):\n last_conv = None\n last_conv_name = None\n\n for name, child in module.named_children():\n if isinstance(child,\n (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):\n if last_conv is None: # only fuse BN that is after Conv\n continue\n fused_conv = _fuse_conv_bn(last_conv, child)\n module._modules[last_conv_name] = fused_conv\n # To reduce changes, set BN as Identity instead of deleting it.\n module._modules[name] = nn.Identity()\n last_conv = None\n elif isinstance(child, nn.Conv2d):\n last_conv = child\n last_conv_name = name\n else:\n fuse_conv_bn(child)\n return module",
"def efficient_conv_bn_eval_graph_transform(fx_model):\n modules = dict(fx_model.named_modules())\n\n patterns = [(torch.nn.modules.conv._ConvNd,\n torch.nn.modules.batchnorm._BatchNorm)]\n\n pairs = []\n # Iterate through nodes in the graph to find ConvBN blocks\n for node in fx_model.graph.nodes:\n # If our current node isn't calling a Module then we can ignore it.\n if node.op != 'call_module':\n continue\n target_module = modules[node.target]\n found_pair = False\n for conv_class, bn_class in patterns:\n if isinstance(target_module, bn_class):\n source_module = modules[node.args[0].target]\n if isinstance(source_module, conv_class):\n found_pair = True\n # Not a conv-BN pattern or output of conv is used by other nodes\n if not found_pair or len(node.args[0].users) > 1:\n continue\n\n # Find a pair of conv and bn computation nodes to optimize\n conv_node = node.args[0]\n bn_node = node\n pairs.append([conv_node, bn_node])\n\n for conv_node, bn_node in pairs:\n # set insertion point\n fx_model.graph.inserting_before(conv_node)\n # create `get_attr` node to access modules\n # note that we directly call `create_node` to fill the `name`\n # argument. `fx_model.graph.get_attr` and\n # `fx_model.graph.call_function` does not allow the `name` argument.\n conv_get_node = fx_model.graph.create_node(\n op='get_attr', target=conv_node.target, name='get_conv')\n bn_get_node = fx_model.graph.create_node(\n op='get_attr', target=bn_node.target, name='get_bn')\n # prepare args for the fused function\n args = (bn_get_node, conv_get_node, conv_node.args[0])\n # create a new node\n new_node = fx_model.graph.create_node(\n op='call_function',\n target=efficient_conv_bn_eval_control,\n args=args,\n name='efficient_conv_bn_eval')\n # this node replaces the original conv + bn, and therefore\n # should replace the uses of bn_node\n bn_node.replace_all_uses_with(new_node)\n # take care of the deletion order:\n # delete bn_node first, and then conv_node\n fx_model.graph.erase_node(bn_node)\n fx_model.graph.erase_node(conv_node)\n\n # regenerate the code\n fx_model.graph.lint()\n fx_model.recompile()",
"def create_from_conv_bn(conv: torch.nn.modules.conv._ConvNd,\n bn: torch.nn.modules.batchnorm._BatchNorm,\n efficient_conv_bn_eval=True) -> 'ConvModule':\n self = ConvModule.__new__(ConvModule)\n super(ConvModule, self).__init__()\n\n self.conv_cfg = None\n self.norm_cfg = None\n self.act_cfg = None\n self.inplace = False\n self.with_spectral_norm = False\n self.with_explicit_padding = False\n self.order = ('conv', 'norm', 'act')\n\n self.with_norm = True\n self.with_activation = False\n self.with_bias = conv.bias is not None\n\n # build convolution layer\n self.conv = conv\n # export the attributes of self.conv to a higher level for convenience\n self.in_channels = self.conv.in_channels\n self.out_channels = self.conv.out_channels\n self.kernel_size = self.conv.kernel_size\n self.stride = self.conv.stride\n self.padding = self.conv.padding\n self.dilation = self.conv.dilation\n self.transposed = self.conv.transposed\n self.output_padding = self.conv.output_padding\n self.groups = self.conv.groups\n\n # build normalization layers\n self.norm_name, norm = 'bn', bn\n self.add_module(self.norm_name, norm)\n\n self.turn_on_efficient_conv_bn_eval(efficient_conv_bn_eval)\n\n return self",
"def all_conv_ops(self):\n pass",
"def __init__(self, ch_in, ch_out, k, pool_size, norm_type, freeze_norm=False, name='', act='leaky', data_format='NCHW'):\n super(SPP, self).__init__()\n self.pool = nn.ModuleList()\n self.data_format = data_format\n for i, size in enumerate(pool_size):\n self.pool.add_module('{}_pool_{}'.format(name, i), nn.MaxPool2d(kernel_size=size, stride=1, padding=size // 2, ceil_mode=False))\n self.conv = ConvBNLayer(ch_in, ch_out, k, padding=k // 2, norm_type=norm_type, freeze_norm=freeze_norm, name=name, act=act, data_format=data_format)",
"def split_conv_module(cls, model: tf.keras.Model, layer: tf.keras.layers, rank, svd_lib_ref) \\\n -> (tf.keras.layers.Conv2D, tf.keras.layers.Conv2D):\n\n name = layer.name\n logger.debug('Splitting conv op: %s with rank %d', name, rank)\n split_weights, weight_sizes = [], []\n split_biases, bias_sizes = [], []\n bias_present = False\n\n conv_parameters = layer.get_weights()\n if len(conv_parameters) > 1:\n bias_present = True\n\n _, _, in_channels, out_channels = conv_parameters[0].shape\n data_format_channels = layer.data_format\n padding = layer.padding\n\n # TF weights are in [H,W,I,O] order. We must reshape the split weights to SVD format [O,I,H,W]\n # and then transpose back\n\n conv_a_weight_shape = (rank, in_channels, 1, 1)\n conv_a_weight = np.zeros(conv_a_weight_shape)\n\n split_weights.append(conv_a_weight.flatten().tolist())\n weight_sizes.append(conv_a_weight.size)\n\n conv_b_weight_shape = (out_channels, rank, *layer.kernel_size)\n conv_b_weight = np.zeros(conv_b_weight_shape)\n\n split_weights.append(conv_b_weight.flatten().tolist())\n weight_sizes.append(conv_b_weight.size)\n\n split_weights = svd_lib_ref.SplitLayerWeights(str(name), split_weights, weight_sizes,\n [rank])\n\n if bias_present:\n conv_a_bias = np.zeros(rank)\n split_biases.append(conv_a_bias.flatten().tolist())\n bias_sizes.append(conv_a_bias.size)\n\n conv_b_bias = np.zeros(out_channels)\n split_biases.append(conv_b_bias.flatten().tolist())\n bias_sizes.append(conv_b_bias.size)\n\n split_biases = svd_lib_ref.SplitLayerBiases(str(name), split_biases, bias_sizes,\n [rank])\n\n logger.debug(\"Splitting conv module weight of shape %r into %r and %r\",\n conv_parameters[0].shape, conv_a_weight.shape, conv_b_weight.shape)\n\n conv_a = tf.keras.layers.Conv2D(filters=rank, kernel_size=(1, 1),\n strides=(1, 1), data_format=data_format_channels,\n activation=None, padding=padding,\n name=layer.name + '_a', use_bias=bias_present)\n\n conv_b = tf.keras.layers.Conv2D(filters=out_channels, kernel_size=layer.kernel_size,\n strides=layer.strides,\n name=layer.name + '_b',\n data_format=data_format_channels, padding=padding, use_bias=bias_present)\n\n # Replace the layer in the model\n replace_layer_in_functional_model(model, layer, [conv_a, conv_b])\n\n if bias_present:\n conv_a.set_weights([np.array(split_weights[0], dtype=np.float32).reshape(conv_a_weight_shape).transpose(2, 3, 1, 0),\n np.array(split_biases[0], dtype=np.float32)])\n conv_b.set_weights([np.array(split_weights[1], dtype=np.float32).reshape(conv_b_weight_shape).transpose(2, 3, 1, 0),\n np.array(split_biases[1], dtype=np.float32)])\n else:\n conv_a.set_weights([np.array(split_weights[0], dtype=np.float32).reshape(conv_a_weight_shape).transpose(2, 3, 1, 0)])\n conv_b.set_weights([np.array(split_weights[1], dtype=np.float32).reshape(conv_b_weight_shape).transpose(2, 3, 1, 0)])\n\n return conv_a, conv_b",
"def fuse_model(self):\n\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()",
"def __init__(self,\n in_channels=[512, 1024, 2048],\n norm_type='bn',\n data_format='NCHW',\n act='mish',\n conv_block_num=3,\n drop_block=False,\n block_size=3,\n keep_prob=0.9,\n spp=False,\n init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform')\n ):\n super(PPYOLOPAN, self).__init__(init_cfg)\n assert len(in_channels) > 0, \"in_channels length should > 0\"\n self.in_channels = in_channels\n self.num_blocks = len(in_channels)\n # parse kwargs\n self.drop_block = drop_block\n self.block_size = block_size\n self.keep_prob = keep_prob\n self.spp = spp\n self.conv_block_num = conv_block_num\n self.data_format = data_format\n if self.drop_block:\n dropblock_cfg = [['dropblock', DropBlock, [self.block_size, self.keep_prob], dict()]]\n else:\n dropblock_cfg = []\n\n # fpn\n self.fpn_block_names = []\n self.fpn_routes_names = []\n fpn_channels = []\n for i, ch_in in enumerate(self.in_channels[::-1]):\n if i > 0:\n ch_in += 512 // (2 ** (i - 1))\n channel = 512 // (2 ** i)\n base_cfg = []\n for j in range(self.conv_block_num):\n base_cfg += [\n # name, layer, args\n ['{}_0'.format(j), ConvBNLayer, [channel, channel, 1],\n dict(padding=0, act=act, norm_type=norm_type)],\n ['{}_1'.format(j), ConvBNLayer, [channel, channel, 3],\n dict(padding=1, act=act, norm_type=norm_type)]\n ]\n\n if i == 0 and self.spp:\n base_cfg[3] = [\n 'spp', SPP, [channel * 4, channel, 1], dict(pool_size=[5, 9, 13], act=act, norm_type=norm_type)\n ]\n\n cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]\n name = 'fpn_{}'.format(i)\n fpn_block = PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name, data_format)\n self.add_module(name, fpn_block)\n self.fpn_block_names.append(name)\n\n fpn_channels.append(channel * 2)\n if i < self.num_blocks - 1:\n route = ConvBNLayer(\n ch_in=channel * 2,\n ch_out=channel,\n filter_size=1,\n stride=1,\n padding=0,\n act=act,\n norm_type=norm_type)\n name = \"route_{}\".format(i)\n self.add_module(name, route)\n self.fpn_routes_names.append(name)\n # pan\n self.pan_blocks = []\n self.pan_routes = []\n self._out_channels = [512 // (2 ** (self.num_blocks - 2)), ]\n for i in reversed(range(self.num_blocks - 1)):\n route = ConvBNLayer(\n ch_in=fpn_channels[i + 1],\n ch_out=fpn_channels[i + 1],\n filter_size=3,\n stride=2,\n padding=1,\n act=act,\n norm_type=norm_type)\n self.pan_routes = [route, ] + self.pan_routes\n base_cfg = []\n ch_in = fpn_channels[i] + fpn_channels[i + 1]\n channel = 512 // (2 ** i)\n for j in range(self.conv_block_num):\n base_cfg += [\n # name, layer, args\n [\n '{}_0'.format(j), ConvBNLayer, [channel, channel, 1],\n dict(\n padding=0, act=act, norm_type=norm_type)\n ],\n [\n '{}_1'.format(j), ConvBNLayer, [channel, channel, 3],\n dict(\n padding=1, act=act, norm_type=norm_type)\n ]\n ]\n\n cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]\n name = 'pan_{}'.format(i)\n pan_block = PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name, data_format)\n self.pan_blocks = [pan_block, ] + self.pan_blocks\n self._out_channels.append(channel * 2)\n\n self._out_channels = self._out_channels[::-1]\n self.pan_blocks = nn.Sequential(*self.pan_blocks)\n self.pan_routes = nn.Sequential(*self.pan_routes)",
"def _wrap_modules(self, layer: LayerInfo, config: Dict):\n _logger.debug(\"Module detected to compress : %s.\", layer.name)\n wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config, self)\n assert hasattr(layer.module, 'weight'), \"module %s does not have 'weight' attribute\" % layer.name\n # move newly registered buffers to the same device of weight\n wrapper.to(layer.module.weight.device)\n return wrapper",
"def convert_network(network, dtype, convert_bn):\n for module in network.modules():\n if not convert_bn and isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:\n continue\n apex.fp16_utils.convert_module(module, dtype)\n\n return network",
"def __init__(self, in_channels=[512, 1024, 2048], norm_type='bn', data_format='NCHW', act='mish', conv_block_num=3, drop_block=False, block_size=3, keep_prob=1.0, spp=False):\n super(PPYOLOPAN, self).__init__()\n assert len(in_channels) > 0, 'in_channels length should > 0'\n self.in_channels = in_channels\n self.num_blocks = len(in_channels)\n self.drop_block = drop_block\n self.block_size = block_size\n self.keep_prob = keep_prob\n self.spp = spp\n self.conv_block_num = conv_block_num\n self.data_format = data_format\n if self.drop_block:\n dropblock_cfg = [['dropblock', DropBlock, [self.block_size, self.keep_prob], dict()]]\n else:\n dropblock_cfg = []\n self.fpn_blocks = nn.ModuleList()\n self.fpn_routes = nn.ModuleDict()\n self.fpn_routes_names = []\n fpn_channels = []\n for i, ch_in in enumerate(self.in_channels[::-1]):\n if i > 0:\n ch_in += 512 // 2 ** (i - 1)\n channel = 512 // 2 ** i\n base_cfg = []\n for j in range(self.conv_block_num):\n base_cfg += [['{}_0'.format(j), ConvBNLayer, [channel, channel, 1], dict(padding=0, act=act, norm_type=norm_type)], ['{}_1'.format(j), ConvBNLayer, [channel, channel, 3], dict(padding=1, act=act, norm_type=norm_type)]]\n if i == 0 and self.spp:\n base_cfg[3] = ['spp', SPP, [channel * 4, channel, 1], dict(pool_size=[5, 9, 13], act=act, norm_type=norm_type)]\n cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]\n name = 'fpn_{}'.format(i)\n self.fpn_blocks.add_module(name, PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name, data_format))\n fpn_channels.append(channel * 2)\n if i < self.num_blocks - 1:\n name = 'fpn_transition_{}'.format(i)\n self.fpn_routes.add_module(name, ConvBNLayer(ch_in=channel * 2, ch_out=channel, filter_size=1, stride=1, padding=0, act=act, norm_type=norm_type, data_format=data_format, name=name))\n self.fpn_routes_names.append(name)\n self.pan_blocks = nn.ModuleDict()\n self.pan_blocks_names = []\n self.pan_routes = nn.ModuleDict()\n self.pan_routes_names = []\n self._out_channels = [512 // 2 ** (self.num_blocks - 2)]\n for i in reversed(range(self.num_blocks - 1)):\n name = 'pan_transition_{}'.format(i)\n self.pan_routes.add_module(name, ConvBNLayer(ch_in=fpn_channels[i + 1], ch_out=fpn_channels[i + 1], filter_size=3, stride=2, padding=1, act=act, norm_type=norm_type, data_format=data_format, name=name))\n route_name = [name] + self.pan_routes_names\n self.pan_routes_names = route_name\n base_cfg = []\n ch_in = fpn_channels[i] + fpn_channels[i + 1]\n channel = 512 // 2 ** i\n for j in range(self.conv_block_num):\n base_cfg += [['{}_0'.format(j), ConvBNLayer, [channel, channel, 1], dict(padding=0, act=act, norm_type=norm_type)], ['{}_1'.format(j), ConvBNLayer, [channel, channel, 3], dict(padding=1, act=act, norm_type=norm_type)]]\n cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]\n name = 'pan_{}'.format(i)\n self.pan_blocks.add_module(name, PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name, data_format))\n pan_block_name = [name] + self.pan_blocks_names\n self.pan_blocks_names = pan_block_name\n self._out_channels.append(channel * 2)\n self._out_channels = self._out_channels[::-1]",
"def _wrap_modules(self, layer: LayerInfo, config: Dict):\n _logger.debug(\"Module detected to compress : %s.\", layer.name)\n assert self.bound_model is not None\n # TODO: merge with _create_scalers after nni v3.0\n if self.sparse_granularity and self.sparse_granularity == 'auto' and self._model_parser:\n if self._model_parser.is_attention(layer.name):\n num_heads = self._model_parser.get_num_heads(layer.name, self.bound_model)\n if num_heads <= 0:\n score_size = None\n else:\n if layer.module.weight.shape[0] % num_heads != 0 or layer.module.weight.shape[1] % num_heads != 0: # type: ignore\n score_size = None\n else:\n score_size = [num_heads, num_heads]\n elif self._model_parser.is_ffn(layer.name, ffn_num=1):\n score_size = [layer.module.weight.shape[0], 1] # type: ignore\n elif self._model_parser.is_ffn(layer.name, ffn_num=2):\n score_size = [1, layer.module.weight.shape[1]] # type: ignore\n else:\n score_size = None\n else:\n score_size = None\n wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config, score_size)\n assert hasattr(layer.module, 'weight'), \"module %s does not have 'weight' attribute\" % layer.name\n # move newly registered buffers to the same device of weight\n wrapper.to(layer.module.weight.device) # type: ignore\n return wrapper",
"def run(self):\n if len(self._tf_op._OPSETS) == 0:\n raise RuntimeError( # pragma: no cover\n \"No converter was registered.\")\n if self.verbose:\n print(\"[Tf2OnnxConvert.run]\") # pragma: no cover\n\n done = {}\n modif = 1\n turn = 0\n while modif > 0 and turn < self.max_iter:\n modif = 0\n turn += 1\n # The converter may alter the current list of nodes, we freeze it.\n current_values = list(self._names.values())\n for node in current_values:\n if not hasattr(node, 'domain'):\n # initializer\n continue\n if done.get(node.name, False):\n continue # pragma: no cover\n domain = node.domain\n if domain not in self._tf_op._OPSETS:\n continue # pragma: no cover\n\n # look for a converter\n rews = self._tf_op._OPSETS[domain]\n target = min(self.target_opsets[domain], len(rews))\n conv = None\n for i in range(len(rews) - 1, -1, -1):\n if node.op_type in rews[i]:\n conv = rews[i][node.op_type]\n break\n if conv is None:\n continue\n\n # applies the converter\n if self.verbose:\n print( # pragma: no cover\n \"[Tf2OnnxConvert.run] convert node type=%r opset=%r name=%r\"\n \"\" % (node.op_type, target, node.name))\n fct, kwargs = conv\n fct(self, node, target_opset=target, **kwargs)\n modif += 1\n\n if turn >= self.max_iter:\n raise RuntimeError( # pragma: no cover\n \"Too many iterations and no stable ONNX was reached, \"\n \"iter=%d\\n%s\" % (turn, str(self.make_model())))\n return self.make_model()",
"def __init__(self, config: dict):\n super().__init__()\n\n self.input_dim = config[\"input_dim\"]\n self.output_dim = config[\"output_dim\"]\n self.gnn_type = config[\"type\"]\n\n gnn_conv = GNN_CONV[self.gnn_type]\n\n gnn_modules = []\n current_conv = None\n for params in config[\"gnn\"]:\n params[\"features\"] = current_conv\n\n conv = gnn_conv(**params)\n\n gnn_modules.append(conv)\n current_conv = conv\n\n # TODO: check if the output dim of this layer\n # is same as the input dim of the next layer\n\n self.add_module(\"conv\", gnn_modules[-1])",
"def merge_conv_bn(net):\n previous = None\n has_seen_cnn = False\n conv_replace_queue = []\n bn_replace_queue = []\n for s in net.children():\n if has_seen_cnn and isinstance(s, nn.BatchNorm2d):\n conv_replace_queue.append(previous)\n bn_replace_queue += [s]\n if isinstance(s, nn.Conv2d):\n has_seen_cnn = True\n else:\n has_seen_cnn = False\n previous = s\n if len(conv_replace_queue):\n if isinstance(net, nn.Sequential):\n for i, sub in enumerate(net):\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n net[i] = new_conv\n net[i + 1] = nn.Identity()\n else:\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n setattr(net, n, new_conv)\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.BatchNorm2d) and sub in bn_replace_queue:\n setattr(net, n, nn.Identity())",
"def convert_syncbn_model(self, module, process_group=None, channel_last=False):\n mod = module\n if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):\n return module\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\n mod = SyncBatchNorm(\n module.num_features,\n module.eps,\n module.momentum,\n module.affine,\n module.track_running_stats,\n ) # , process_group, channel_last=channel_last\n mod.running_mean = module.running_mean\n mod.running_var = module.running_var\n if module.affine:\n mod.weight.data = module.weight.data.clone().detach()\n mod.bias.data = module.bias.data.clone().detach()\n for name, child in module.named_children():\n mod.add_module(\n name,\n self.convert_syncbn_model(\n child, process_group=process_group, channel_last=channel_last\n ),\n )\n # TODO(jie) should I delete model explicitly?\n del module\n return mod",
"def forward_pass_on_convolutions(self, x):\n conv_output = None\n for module_name, module in self.model._modules.items():\n print(module_name)\n if module_name == 'fc':\n return conv_output, x\n x = module(x) # Forward\n # print(module_name, module)\n if module_name == self.target_layer:\n print('True')\n x.register_hook(self.save_gradient)\n conv_output = x # Save the convolution output on that layer\n return conv_output, x",
"def replace_conv(module: nn.Module):\n for name, mod in module.named_children():\n target_mod = getattr(module, name)\n if type(mod) == torch.nn.Conv2d:\n setattr(module, name, WSConv2d(target_mod.in_channels, target_mod.out_channels, target_mod.kernel_size,\n target_mod.stride, target_mod.padding, target_mod.dilation, target_mod.groups, target_mod.bias))\n \n if type(mod) == torch.nn.BatchNorm2d:\n setattr(module, name, torch.nn.Identity())\n\n for name, mod in module.named_children():\n replace_conv(mod)",
"def init_one_conv_in_block(newmodel, block_id, conv_id, model, freeze_conv, freeze_linear, special_id=-1):\n preserve = []\n prune = []\n linear = []\n # copy first conv, last BN and linear_last\n conv_first = None\n BN_last = None\n linear_last = None\n downsample_list = []\n\n for name0, m0 in model.named_children():\n if isinstance(m0, nn.Conv2d):\n if 'downsample' in name0:\n print('downsample in model')\n else:\n conv_first = m0\n elif isinstance(m0, nn.BatchNorm2d):\n BN_last = m0\n elif isinstance(m0, nn.Linear):\n linear_last = m0\n\n for m1 in newmodel.children():\n if isinstance(m1, nn.Conv2d):\n if special_id == 0:\n print('inti conv_first:', m1)\n prune.append(m1)\n else:\n conv_trans(conv_first, m1, freeze=freeze_conv, bias=False, mod_list=preserve)\n elif isinstance(m1, nn.BatchNorm2d):\n BN_trans(BN_last, m1, freeze=freeze_conv, mod_list=preserve)\n elif isinstance(m1, nn.Linear):\n linear_trans(linear_last, m1, freeze=freeze_linear, bias=True, mod_list=linear)\n\n # copy downsample\n for name0, m0 in model.named_modules():\n # print(name0)\n if 'downsample' in name0:\n downsample_list.append(m0)\n assert(len(downsample_list) == 3)\n\n downsample_id = 0\n for name1, m1 in newmodel.named_modules(): \n if 'downsample' in name1:\n m0 = downsample_list.pop(0)\n if downsample_id == special_id - 1:\n print('inti downsample_%d:' % downsample_id, m1)\n prune.append(m1)\n else:\n conv_trans(m0, m1, freeze=freeze_conv, bias=False, mod_list=preserve)\n downsample_id += 1\n\n if special_id == -1:\n block_id_mod = 0\n for block0, block1 in zip(model.modules(), newmodel.modules()):\n if isinstance(block0, Bottleneck):\n if block_id_mod == block_id:\n conv_id_mod = 0\n for (name0, m0), (name1, m1) in zip(block0.named_children(), block1.named_children()):\n if isinstance(m1, nn.Conv2d):\n if name0 != 'downsample':\n if conv_id_mod == conv_id:\n print('inti conv_%d_%d:' % (block_id_mod+1, conv_id_mod+1), m1)\n prune.append(m1)\n else:\n conv_trans(m0, m1, freeze=freeze_conv, bias=False, mod_list=preserve)\n conv_id_mod += 1\n elif isinstance(m1, nn.BatchNorm2d):\n if conv_id_mod == conv_id + 1:\n print('inti BN:', m1)\n prune.append(m1)\n else:\n BN_trans(m0, m1, freeze=freeze_conv, mod_list=preserve)\n else: \n for (name0, m0), (name1, m1) in zip(block0.named_children(), block1.named_children()):\n if isinstance(m1, nn.Conv2d):\n if name0 != 'downsample':\n conv_trans(m0, m1, freeze=freeze_conv, bias=False, mod_list=preserve)\n elif isinstance(m1, nn.BatchNorm2d):\n BN_trans(m0, m1, freeze=freeze_conv, mod_list=preserve)\n block_id_mod += 1\n else:\n for block0, block1 in zip(model.modules(), newmodel.modules()):\n if isinstance(block0, Bottleneck):\n for (name0, m0), (name1, m1) in zip(block0.named_children(), block1.named_children()):\n if isinstance(m1, nn.Conv2d):\n if name0 != 'downsample':\n conv_trans(m0, m1, freeze=freeze_conv, bias=False, mod_list=preserve)\n elif isinstance(m1, nn.BatchNorm2d):\n BN_trans(m0, m1, freeze=freeze_conv, mod_list=preserve)\n return preserve, prune, linear"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recursively fuse conv and bn in a module. During inference, the functionary of batch norm layers is turned off but only the mean and var alone channels are used, which exposes the chance to fuse it with the preceding conv layers to save computations and simplify network structures.
|
def fuse_conv_bn(module):
last_conv = None
last_conv_name = None
for name, child in module.named_children():
if isinstance(child,
(nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
if last_conv is None: # only fuse BN that is after Conv
continue
fused_conv = _fuse_conv_bn(last_conv, child)
module._modules[last_conv_name] = fused_conv
# To reduce changes, set BN as Identity instead of deleting it.
module._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_conv_bn(child)
return module
|
[
"def _fuse_conv_bn(conv, bn):\n conv_w = conv.weight\n conv_b = conv.bias if conv.bias is not None else torch.zeros_like(\n bn.running_mean)\n\n factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)\n conv.weight = nn.Parameter(conv_w *\n factor.reshape([conv.out_channels, 1, 1, 1]))\n conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)\n return conv",
"def fuse_model(self):\n\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()",
"def efficient_conv_bn_eval_graph_transform(fx_model):\n modules = dict(fx_model.named_modules())\n\n patterns = [(torch.nn.modules.conv._ConvNd,\n torch.nn.modules.batchnorm._BatchNorm)]\n\n pairs = []\n # Iterate through nodes in the graph to find ConvBN blocks\n for node in fx_model.graph.nodes:\n # If our current node isn't calling a Module then we can ignore it.\n if node.op != 'call_module':\n continue\n target_module = modules[node.target]\n found_pair = False\n for conv_class, bn_class in patterns:\n if isinstance(target_module, bn_class):\n source_module = modules[node.args[0].target]\n if isinstance(source_module, conv_class):\n found_pair = True\n # Not a conv-BN pattern or output of conv is used by other nodes\n if not found_pair or len(node.args[0].users) > 1:\n continue\n\n # Find a pair of conv and bn computation nodes to optimize\n conv_node = node.args[0]\n bn_node = node\n pairs.append([conv_node, bn_node])\n\n for conv_node, bn_node in pairs:\n # set insertion point\n fx_model.graph.inserting_before(conv_node)\n # create `get_attr` node to access modules\n # note that we directly call `create_node` to fill the `name`\n # argument. `fx_model.graph.get_attr` and\n # `fx_model.graph.call_function` does not allow the `name` argument.\n conv_get_node = fx_model.graph.create_node(\n op='get_attr', target=conv_node.target, name='get_conv')\n bn_get_node = fx_model.graph.create_node(\n op='get_attr', target=bn_node.target, name='get_bn')\n # prepare args for the fused function\n args = (bn_get_node, conv_get_node, conv_node.args[0])\n # create a new node\n new_node = fx_model.graph.create_node(\n op='call_function',\n target=efficient_conv_bn_eval_control,\n args=args,\n name='efficient_conv_bn_eval')\n # this node replaces the original conv + bn, and therefore\n # should replace the uses of bn_node\n bn_node.replace_all_uses_with(new_node)\n # take care of the deletion order:\n # delete bn_node first, and then conv_node\n fx_model.graph.erase_node(bn_node)\n fx_model.graph.erase_node(conv_node)\n\n # regenerate the code\n fx_model.graph.lint()\n fx_model.recompile()",
"def freeze_bn(self):\n for layer in self.modules():\n if isinstance(layer, nn.modules.batchnorm._BatchNorm):\n layer.eval()",
"def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False",
"def replace_conv(module: nn.Module):\n for name, mod in module.named_children():\n target_mod = getattr(module, name)\n if type(mod) == torch.nn.Conv2d:\n setattr(module, name, WSConv2d(target_mod.in_channels, target_mod.out_channels, target_mod.kernel_size,\n target_mod.stride, target_mod.padding, target_mod.dilation, target_mod.groups, target_mod.bias))\n \n if type(mod) == torch.nn.BatchNorm2d:\n setattr(module, name, torch.nn.Identity())\n\n for name, mod in module.named_children():\n replace_conv(mod)",
"def gabor_model(inputs, train=True, norm=True, **kwargs):\n \n # propagate input targets\n outputs = inputs\n dropout = .5 if train else None\n input_to_network = inputs['images']\n \n with tf.variable_scope('conv1') as scope:\n #will be 96 43x43 filters with 6 spatial frequencies and 16 orientations\n weights = tf.get_variable(shape=[43, 43, 3, 96], dtype=tf.float32, \n initializer=tf.constant_initializer(gabor_initializer()), trainable=False, name='weights')\n conv = tf.nn.conv2d(input_to_network, weights,[1, 6, 6, 1], padding='SAME')#want to produce ~30x30 outputs\n biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32), trainable=False, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n relu = tf.nn.relu(bias, name='relu')\n pool = tf.nn.max_pool(value=relu, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding='SAME', name='pool')\n lrn = tf.nn.local_response_normalization(pool, depth_radius=5, bias=2, alpha=.0001, beta=.75) # bias=Kappa?\n \n #shape = np.product(lrn.shape.as_list()[1:])\n #flatten = tf.reshape(lrn, [-1, shape]) \n #dummyBias = tf.get_variable(shape=[shape,1000], dtype=tf.float32, \n # initializer=tf.contrib.layers.xavier_initializer(), trainable=True, name='dummyBias')\n #dummyFC = tf.nn.bias_add(flatten, dummyBias, name='dummyFC')\n \n \n outputs['conv1_kernel'] = weights\n outputs['conv1'] = lrn\n \n with tf.variable_scope('fc2') as scope:\n shape = np.product(outputs['conv1'].shape.as_list()[1:])\n flatten = tf.reshape(outputs['conv1'], [-1, shape])\n weights = tf.get_variable(shape=[16224,1000], dtype=tf.float32, \n initializer=tf.contrib.layers.xavier_initializer(), name='weights')\n biases = tf.Variable(tf.constant(0, shape=[1000], dtype=tf.float32), trainable=True, name='biases')\n fc = tf.nn.xw_plus_b(flatten, weights, biases)\n \n outputs['pred'] = fc\n \n ### END OF YOUR CODE\n for k in ['conv1', 'conv1_kernel', 'pred']:\n assert k in outputs, '%s was not found in outputs' % k\n\n return outputs, {}",
"def create_from_conv_bn(conv: torch.nn.modules.conv._ConvNd,\n bn: torch.nn.modules.batchnorm._BatchNorm,\n efficient_conv_bn_eval=True) -> 'ConvModule':\n self = ConvModule.__new__(ConvModule)\n super(ConvModule, self).__init__()\n\n self.conv_cfg = None\n self.norm_cfg = None\n self.act_cfg = None\n self.inplace = False\n self.with_spectral_norm = False\n self.with_explicit_padding = False\n self.order = ('conv', 'norm', 'act')\n\n self.with_norm = True\n self.with_activation = False\n self.with_bias = conv.bias is not None\n\n # build convolution layer\n self.conv = conv\n # export the attributes of self.conv to a higher level for convenience\n self.in_channels = self.conv.in_channels\n self.out_channels = self.conv.out_channels\n self.kernel_size = self.conv.kernel_size\n self.stride = self.conv.stride\n self.padding = self.conv.padding\n self.dilation = self.conv.dilation\n self.transposed = self.conv.transposed\n self.output_padding = self.conv.output_padding\n self.groups = self.conv.groups\n\n # build normalization layers\n self.norm_name, norm = 'bn', bn\n self.add_module(self.norm_name, norm)\n\n self.turn_on_efficient_conv_bn_eval(efficient_conv_bn_eval)\n\n return self",
"def _equalize_weights_unfolding_pact(self, bn_dict={}, verbose=False, eps=None):\n\n if not bn_dict:\n bn_dict = get_bn_dict_from_supernodes(self)\n\n module_dict = {}\n for n,m in self.named_modules():\n if (m.__class__.__name__ == \"PACT_Conv2d\" or \\\n m.__class__.__name__ == \"PACT_Conv1d\" or \\\n m.__class__.__name__ == \"PACT_Linear\" or \\\n m.__class__.__name__ == \"BatchNorm2d\" or \\\n m.__class__.__name__ == \"BatchNorm1d\" ):\n module_dict[n] = m\n for n_before in bn_dict.keys():\n n_after = bn_dict[n_before]\n m_before = module_dict[n_before]\n m_after = module_dict[n_after]\n if eps is None:\n eps = m_after.eps\n range_before = weight_range(m_before, 0)\n if verbose:\n logging.info(\"[Equalization by Unfolding] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, range_before.min().item(), range_before.max().item()))\n m_before.weight.data[:] = m_before.weight.data[:] / reshape_before(m_before, range_before)\n try:\n m_before.bias.data[:] = m_before.bias.data[:] / range_before\n except AttributeError:\n pass\n m_after.running_mean.data[:] = m_after.running_mean.data[:] / range_before\n m_after.weight.data[:] = m_after.weight.data[:] * reshape_after(m_after, range_before)\n if verbose:\n logging.info(\"[Equalization by Unfolding] %s: wrange_min=%.5f wrange_max=%.5f\" % (n_before, weight_range(m_before, 0).min().item(), weight_range(m_before, 0).max().item()))",
"def conv_norm_relu_pool_forward(x, w, b, conv_param, pool_param, gamma, beta, bn_param):\n conv, conv_cache = conv_forward_fast(x, w, b, conv_param)\n norm, norm_cache = spatial_batchnorm_forward(conv, gamma, beta, bn_param)\n relu, relu_cache = relu_forward(norm)\n out, pool_cache = max_pool_forward_fast(relu, pool_param)\n\n cache = (conv_cache, norm_cache, relu_cache, pool_cache)\n\n return out, cache",
"def _apply_layer_nonlocal_(self, is_training=True):\r\n # batch normalization and activation\r\n att_out = self._layer_output_['x']\r\n # if 'BN_0' in self.ops:\r\n # att_out = self.ops['BN_0'].apply(\r\n # att_out, is_training=is_training, label=self._layer_output_['y'])\r\n # att_out = self._apply_activation_(att_out)\r\n\r\n # attention map kernel f\r\n att_out_f = self.ops['f_x'].apply(att_out) # NxH1xW1xC2 or NxC2xH1xW1\r\n att_out_f = self.ops['bias_f'].apply(att_out_f)\r\n\r\n # attention map kernel g and h\r\n if 'downsampling' in self.ops:\r\n att_out_gh = self.ops['downsampling'].apply(att_out)\r\n else:\r\n att_out_gh = att_out\r\n att_out_g = self.ops['g_x'].apply(att_out_gh) # NxH2xW2xC2 or NxC2xH2xW2\r\n att_out_h = self.ops['h_x'].apply(att_out_gh) # NxH2xW2xC1 or NxC1xH2xW2\r\n\r\n # flatten the tensor and do batch multiplication\r\n att_shape_f = att_out_f.get_shape().as_list() # NxH1xW1xC2 or NxC2xH1xW1\r\n att_shape_g = att_out_g.get_shape().as_list() # NxH2xW2xC2 or NxC2xH2xW2\r\n with tf.name_scope('att_map'):\r\n if self.data_format == 'channels_first':\r\n c_float = np.float32(att_shape_g[1])\r\n att_out_f = tf.reshape(\r\n att_out_f, shape=(-1, att_shape_f[1], att_shape_f[2] * att_shape_f[3])) # NxC2xHW1\r\n att_out_g = tf.reshape(\r\n att_out_g, shape=(-1, att_shape_g[1], att_shape_g[2] * att_shape_g[3])) # NxC2xHW2\r\n if self.design['type'] in {'nl_dist', 'nl_pool_dist'}:\r\n dist_fg = get_batch_squared_dist(\r\n att_out_f, att_out_g, axis=1, mode='xy', name='squared_dist')\r\n att_map_logits = -dist_fg / c_float # NxHW1xHW2\r\n else:\r\n sqrt_channel = np.sqrt(c_float, dtype=np.float32)\r\n att_map_logits = tf.matmul(\r\n tf.transpose(att_out_f, [0, 2, 1]), att_out_g) / sqrt_channel # NxHW1xHW2\r\n # att_map_logits = tf.matmul(\r\n # tf.transpose(att_out_f, [0, 2, 1]), att_out_g) # NxHW1xHW2\r\n else: # channels_last\r\n c_float = np.float32(att_shape_g[3])\r\n att_out_f = tf.reshape(\r\n att_out_f, shape=(-1, att_shape_f[1] * att_shape_f[2], att_shape_f[3])) # NxHW1xC2\r\n att_out_g = tf.reshape(\r\n att_out_g, shape=(-1, att_shape_g[1] * att_shape_g[2], att_shape_g[3])) # NxHW2xC2\r\n if self.design['type'] in {'nl_dist', 'nl_pool_dist'}:\r\n dist_fg = get_batch_squared_dist(\r\n att_out_f, att_out_g, axis=2, mode='xy', name='squared_dist')\r\n att_map_logits = -dist_fg / c_float # NxHW1xHW2\r\n else:\r\n sqrt_channel = np.sqrt(c_float, dtype=np.float32)\r\n att_map_logits = tf.matmul(\r\n att_out_f, tf.transpose(att_out_g, [0, 2, 1])) / sqrt_channel # NxHW1xHW2\r\n # apply softmax to each row of att_map\r\n att_map = tf.nn.softmax(att_map_logits, axis=2) # NxHW1xHW2\r\n # att_map = att_map_logits / hw # NxHW1xHW2\r\n\r\n # get final attention feature map\r\n att_shape_h = att_out_h.get_shape().as_list() # NxH2xW2xC1 or NxC1xH2xW2\r\n with tf.name_scope('att_features'):\r\n if self.data_format == 'channels_first':\r\n att_out_h = tf.reshape(\r\n att_out_h, shape=(-1, att_shape_h[1], att_shape_h[2] * att_shape_h[3])) # NxC1xHW2\r\n att_out_o = tf.matmul(att_out_h, tf.transpose(att_map, [0, 2, 1])) # NxC1xHW1\r\n att_out_o = tf.reshape( # NxC1xH1xW1\r\n att_out_o,\r\n shape=(-1, att_shape_h[1], att_shape_f[2], att_shape_f[3]))\r\n else: # channels_last\r\n att_out_h = tf.reshape(\r\n att_out_h, shape=(-1, att_shape_h[1] * att_shape_h[2], att_shape_h[3])) # NxHW2xC1\r\n att_out_o = tf.matmul(att_map, att_out_h) # NxHW1xC1\r\n att_out_o = tf.reshape( # NxH1xW1xC1\r\n att_out_o,\r\n shape=(-1, att_shape_f[1], att_shape_f[2], att_shape_h[3]))\r\n\r\n # scalar kernel\r\n if 'BN_1' in self.ops: # conditional batch normalization for scalar has not been implemented\r\n att_out_o = self.ops['BN_1'].apply(\r\n att_out_o, is_training=is_training)\r\n att_out_o = self.ops['k_x'].apply(att_out_o)\r\n # if 'bias_k' in self.ops:\r\n # att_out = self.ops['bias_k'].apply(att_out)\r\n\r\n # layer_out\r\n self._layer_output_['x'] = att_out_o + self._layer_output_['x']",
"def _FoldFusedBatchNorms(graph):\n for match in _FindFusedBatchNorms(graph):\n scope, sep, _ = match.layer_op.name.rpartition('/')\n # Make sure new ops are added to `graph` and put on the same device as\n # `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope\n # named `scope`. Otherwise, TF creates a unique scope whose name starts with\n # `scope`.\n with graph.as_default(), graph.name_scope(scope + sep), ops.device(\n match.bn_op.device):\n with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):\n # new weights = old weights * gamma / sqrt(variance + epsilon)\n # new biases = -mean * gamma / sqrt(variance + epsilon) + beta\n multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(\n match.variance_tensor + match.bn_op.get_attr('epsilon'))\n bias_tensor = math_ops.subtract(\n match.beta_tensor,\n match.mean_tensor * multiplier_tensor,\n name='bias')\n\n # The shape of depthwise weights is different, so we need to reshape the\n # multiplier_tensor to ensure that the scaled_weight_tensor has the\n # expected shape.\n if match.layer_op.type == 'DepthwiseConv2dNative':\n new_shape = [\n match.weight_tensor.get_shape().as_list()[2],\n match.weight_tensor.get_shape().as_list()[3]\n ]\n multiplier_tensor = array_ops.reshape(\n multiplier_tensor, new_shape, name='scale_reshape')\n\n # TODO(suharshs): This naming of the following ops needs to carefully\n # follow the naming expected by quantize.py. Generalize the quantize code\n # to not require these delicate naming conventions.\n scaled_weight_tensor = math_ops.multiply(\n match.weight_tensor, multiplier_tensor, name='mul_fold')\n\n new_layer_tensor = _CloneWithNewOperands(\n match.layer_op, match.input_tensor, scaled_weight_tensor)\n\n bias_add_tensor = math_ops.add(\n new_layer_tensor, bias_tensor, name='add_fold')\n\n nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,\n match.output_tensor)\n if nodes_modified_count != 1:\n raise ValueError(\n 'Unexpected inputs to op: %s' % match.output_tensor.name)",
"def replace_block(backbone_block,ticket_block,imagenet_ticket_type,shortcut=False):\n\n for i in range(len(backbone_block)):\n back = backbone_block[i]\n tick = ticket_block[i]\n\n tick_state_dict = tick.state_dict()\n back_state_dict = back.state_dict()\n\n print('Len: ',len(backbone_block))\n\n for i in range(len(backbone_block)):\n back = backbone_block[i]\n tick = ticket_block[i]\n\n if shortcut and i==0:\n #Downsample = shortcut.\n\n back_state_dict = back.shortcut.state_dict()\n tick_state_dict = tick.downsample[0].state_dict()\n\n #Copy cnn\n back_state_dict['weight'] = tick_state_dict['weight']\n back.shortcut.load_state_dict(back_state_dict)\n\n #copy bnorm\n back_bn_state_dict = back.shortcut.norm.state_dict()\n tick_bn_state_dict = tick.downsample[1].state_dict()\n\n for key in back_bn_state_dict:\n back_bn_state_dict[key] = tick_bn_state_dict[key]\n\n back.shortcut.norm.load_state_dict(back_bn_state_dict) \n\n\n #Set conv1 weights\n back_state_dict = back.conv1.state_dict()\n tick_state_dict = tick.conv1.state_dict()\n back_state_dict['weight'] = tick_state_dict['weight']\n back.conv1.load_state_dict(back_state_dict)\n #Set bn1\n #back.conv1.norm = FrozenBatchNorm2d.convert_frozen_batchnorm(tick.bn1)\n back_bn_state_dict = copy_bn(tick.bn1,back.conv1.norm)\n back.conv1.norm.load_state_dict(back_bn_state_dict)\n\n\n\n #Set conv2\n back_state_dict = back.conv2.state_dict()\n tick_state_dict = tick.conv2.state_dict()\n back_state_dict['weight'] = tick_state_dict['weight']\n back.conv2.load_state_dict(back_state_dict)\n #set bn2\n #back.conv2.norm = FrozenBatchNorm2d.convert_frozen_batchnorm(tick.bn2)\n \n back_bn_state_dict = copy_bn(tick.bn2,back.conv2.norm)\n back.conv2.norm.load_state_dict(back_bn_state_dict)\n\n\n if imagenet_ticket_type =='res50':\n #Additional conv in each block.\n\n #Set conv3\n back_state_dict = back.conv3.state_dict()\n tick_state_dict = tick.conv3.state_dict()\n back_state_dict['weight'] = tick_state_dict['weight']\n back.conv3.load_state_dict(back_state_dict)\n #set bn3\n #back.conv3.norm = FrozenBatchNorm2d.convert_frozen_batchnorm(tick.bn3)\n\n back_bn_state_dict = copy_bn(tick.bn3,back.conv3.norm)\n back.conv3.norm.load_state_dict(back_bn_state_dict)\n\n #print(\"Still left for resnet 50\")\n #breakpoint()\n\n\n backbone_block[i] = back",
"def fc(input, output, reuse=False, norm=None, activation=tf.nn.relu, dropout=0.7, is_training=True, name='fc'):\n with tf.variable_scope(name, reuse=reuse):\n x = slim.fully_connected(input, output, activation_fn=activation, normalizer_fn=norm, reuse=reuse)\n x = tf.nn.dropout(x, dropout)\n return x",
"def merge_conv_bn(net):\n previous = None\n has_seen_cnn = False\n conv_replace_queue = []\n bn_replace_queue = []\n for s in net.children():\n if has_seen_cnn and isinstance(s, nn.BatchNorm2d):\n conv_replace_queue.append(previous)\n bn_replace_queue += [s]\n if isinstance(s, nn.Conv2d):\n has_seen_cnn = True\n else:\n has_seen_cnn = False\n previous = s\n if len(conv_replace_queue):\n if isinstance(net, nn.Sequential):\n for i, sub in enumerate(net):\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n net[i] = new_conv\n net[i + 1] = nn.Identity()\n else:\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.Conv2d) and sub in conv_replace_queue:\n idx = conv_replace_queue.index(sub)\n bn = bn_replace_queue[idx]\n new_conv = fuse(sub, bn)\n setattr(net, n, new_conv)\n for n in dir(net):\n sub = getattr(net, n)\n if isinstance(sub, nn.BatchNorm2d) and sub in bn_replace_queue:\n setattr(net, n, nn.Identity())",
"def conv_layer(module, layer_index: int, layer_data: dict, prev_filters: int):\n activation = layer_data.get('activation')\n batch_normalize = int(layer_data.get('batch_normalize') or 0)\n filters = int(layer_data.get(\"filters\"))\n padding = int(layer_data.get(\"pad\"))\n kernel_size = int(layer_data.get(\"size\"))\n stride = int(layer_data.get(\"stride\"))\n\n if padding:\n pad = (kernel_size - 1) // 2\n else:\n pad = 0\n # Add convolutional layer to the layer sequence\n conv_sublayer = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias = not bool(batch_normalize))\n module.add_module(\"conv_{0}\".format(layer_index), conv_sublayer)\n\n # Chain batch normalization layer to convolutional layer\n if batch_normalize:\n batch_norm_sublayer = nn.BatchNorm2d(filters)\n module.add_module(\"batch_norm_{0}\".format(layer_index), batch_norm_sublayer)\n\n # Chain activation layer to previous layer to introduce non linearity in the model\n # Darknet only uses leaky ReLU activation function to convoutional layers\n if activation == \"leaky\":\n activation_sublayer = nn.LeakyReLU(0.1, inplace=True)\n module.add_module(\"leaky_{0}\".format(layer_index), activation_sublayer)\n return filters",
"def reset_model_bn_forward(self, model, bn_mean, bn_var):\n for name, m in model.named_modules():\n if isinstance(m, nn.BatchNorm2d):\n bn_mean[name] = AverageMeter()\n bn_var[name] = AverageMeter()\n\n def new_forward(bn, mean_est, var_est):\n def lambda_forward(x):\n batch_mean = x.mean(0, keepdim=True).mean(\n 2, keepdim=True).mean(3,\n keepdim=True) # 1, C, 1, 1\n batch_var = (x - batch_mean) * (x - batch_mean)\n batch_var = batch_var.mean(0, keepdim=True).mean(\n 2, keepdim=True).mean(3, keepdim=True)\n\n batch_mean = torch.squeeze(batch_mean).float()\n batch_var = torch.squeeze(batch_var).float()\n # 直接算正常的bn的mean 和 var\n\n # 累计mean_est = batch_mean * batch\n reduce_batch_mean = batch_mean.clone(\n ) / link.get_world_size()\n reduce_batch_var = batch_var.clone(\n ) / link.get_world_size()\n link.allreduce(reduce_batch_mean.data)\n link.allreduce(reduce_batch_var.data)\n mean_est.update(reduce_batch_mean.data, x.size(0))\n var_est.update(reduce_batch_var.data, x.size(0))\n\n # bn forward using calculated mean & var\n _feature_dim = batch_mean.size(0)\n return F.batch_norm(\n x,\n batch_mean,\n batch_var,\n bn.weight[:_feature_dim],\n bn.bias[:_feature_dim],\n False,\n 0.0,\n bn.eps,\n )\n\n return lambda_forward\n\n m.forward = new_forward(m, bn_mean[name], bn_var[name])",
"def _FoldUnfusedBatchNorms(graph):\n input_to_ops_map = input_to_ops.InputToOps(graph)\n\n for bn in common.BatchNormGroups(graph):\n has_scaling = _HasScaling(graph, input_to_ops_map, bn)\n\n # The mangling code intimately depends on BatchNorm node's internals.\n original_op, folded_op = _CreateFoldedOp(graph, bn, has_scaling=has_scaling)\n\n activation = common.GetEndpointActivationOp(graph, bn)\n if activation:\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[activation])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % activation.name)\n continue\n\n # Treat consumer ops in bypass modules differently since they have Add\n # operations instead of Relu* above.\n add_bypass_ctx = re.search(r'^(.*)/([^/]+)', bn).group(1)\n add_bypass = graph.get_operation_by_name(add_bypass_ctx + '/Add')\n nodes_modified_count = graph_editor.reroute_ts([folded_op.outputs[0]],\n [original_op.outputs[0]],\n can_modify=[add_bypass])\n if nodes_modified_count != 1:\n raise ValueError('Unexpected inputs to op: %s' % add_bypass.name)",
"def _conv_raw_weight_hook(conv: nn.Conv2d, input_data: torch.Tensor, output_data: torch.Tensor):\n if conv.groups != 1:\n # for SparseGate and deep-wise layer in MobileNet v2\n # note the `d_flops_in` and `d_flops_out` of SparseGate should NOT be used\n\n # in MobileNet v2, the groups will change according to the input channel and output channel\n assert conv.groups == conv.in_channels and conv.groups == conv.out_channels\n\n output_channels, output_height, output_width = output_data[0].size()\n\n if conv.groups == 1:\n new_conv_groups = conv.groups\n else:\n # the conv_groups will change according to the input channel and output channel\n new_conv_groups = conv.groups - 1\n\n kernel_ops = conv.kernel_size[0] * conv.kernel_size[1] * (conv.in_channels / new_conv_groups)\n d_kernel_ops_in = conv.kernel_size[0] * conv.kernel_size[1] * (1 / new_conv_groups)\n\n # flops = kernel_ops * output_channels * output_height * output_width\n if conv.groups == 1:\n # normal conv layer\n conv.d_flops_in = d_kernel_ops_in * output_channels * output_height * output_width\n conv.d_flops_out = kernel_ops * 1 * output_height * output_width\n else:\n # for deepwise layer\n # this layer will not be pruned, so do not set d_flops_out\n conv.d_flops_in = d_kernel_ops_in * (output_channels - 1) * output_height * output_width"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates this configuration object from a dictionary.
|
def update_from_dict(self, dct):
if not dct:
return
all_props = self.__class__.CONFIG_PROPERTIES
for key, value in six.iteritems(dct):
attr_config = all_props.get(key)
if attr_config:
setattr(self, key, value)
else:
self.update_default_from_dict(key, value)
|
[
"def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)",
"def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)",
"def _update_config(self, config_dict):\n for key in config_dict.keys():\n if key not in self._config:\n raise KeyError(''.join(['Key ', str(key), ' not in dictionary.']))\n self._config.update(config_dict)\n self._update_que.append(config_dict.keys())\n if not self._running_update:\n self._update_frontend()",
"def updateBuckconfigWithDict(self, values):\n for section, kvps in values.items():\n for key, value in kvps.items():\n self.buckconfig[section][key] = value",
"def update_from_dict(self, dict_values):\n self.delta = dict_values.get(\"delta\", self.delta)\n self.K = dict_values.get(\"K\", self.K)\n self.update_centroids_from_list(dict_values[\"centroids\"])\n return self",
"def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))",
"def update(self, dict=None, **kwargs):\n\n # dict.update does not seem to call self.__getitem__,\n # so need to redefine update method here.\n\n # todo: Support sequence of (key, value) pairs here.\n if dict is not None:\n for k, v in dict.items():\n self[k] = v\n\n for k, v in kwargs.items():\n self[k] = v",
"def from_dict(cls, dict_obj):\n config = cls()\n for k, v in dict_obj.items():\n setattr(config, k, v)\n return config",
"def load_from_dict(self, dict_):\n policies = dict_.get('policies', None)\n super(Config, self).load_from_dict(\n {k: v for k, v in six.iteritems(dict_) if k != 'policies'})\n if policies is not None:\n self.policies = policies",
"def _update_config_dict(self, config_fpath: str, config_dict: Optional[dict[str, Any]] = None) -> dict[str, Any]:\n if config_dict is None:\n to_update = {}\n else:\n to_update = deepcopy(config_dict)\n with open(config_fpath, 'rb') as f:\n to_update.update(tomli.load(f))\n return to_update",
"def update(self, adict):\n for key, val in adict.items():\n setattr(self,key,val)",
"def from_dict(cls, input_dict, update=False) -> 'IspindelConfiguration' or None:\n try:\n obj = cls.objects.get(uuid=input_dict['uuid'])\n if not update:\n return None\n except cls.DoesNotExist:\n obj = cls()\n\n obj.sensor_id = input_dict['sensor_id']\n obj.name_on_device = input_dict['name_on_device']\n obj.third_degree_coefficient = input_dict['third_degree_coefficient']\n obj.second_degree_coefficient = input_dict['second_degree_coefficient']\n obj.first_degree_coefficient = input_dict['first_degree_coefficient']\n obj.constant_term = input_dict['constant_term']\n obj.temperature_correction = input_dict['temperature_correction']\n obj.coefficients_up_to_date = input_dict['coefficients_up_to_date']\n obj.uuid = input_dict['uuid']\n\n return obj",
"def from_dict(self, values):\n for k in values.keys():\n setattr(self, k, values[k])",
"def _deep_update_config(config, updates):\n for key, value in updates.iteritems():\n if isinstance(value, collections.Mapping):\n config[key] = DexNet._deep_update_config(config.get(key, {}), value)\n else:\n config[key] = value\n return config",
"def override_from_dict(self, values_dict):\n for name, value in values_dict.items():\n self.set_hparam(name, value)\n return self",
"def from_dict(d):\n c = ConfigParser()\n for section in d.keys():\n c.add_section(section)\n for option, value in d[section].iteritems():\n c.set(section, option, value)\n return c",
"def set_attr_from_dict(self, dictionary):\n for key in dictionary:\n self.__setattr__(key, dictionary.get(key))",
"def from_dict(self, d):\n\n for attr in self.ser_attrs:\n\n if attr == 'options':\n self.options.update(d[attr])\n else:\n try:\n setattr(self, attr, d[attr])\n except KeyError:\n log.debug(\"FlatCAMObj.from_dict() --> KeyError: %s. \"\n \"Means that we are loading an old project that don't\"\n \"have all attributes in the latest application version.\" % str(attr))\n pass",
"def from_dict(cls, dict_):\n items = {}\n for key, value in dict_.items():\n if isinstance(value, dict):\n value = _ConfigGroup.from_dict(value)\n items[key] = value\n return cls(items)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Merges listbased attributes into one list including unique elements from both lists. When ``lists_only`` is set to ``False``, updates dictionaries and overwrites singlevalue attributes. The resulting configuration is 'clean', i.e. input values converted and validated. If the conversion is not possible, a ``ValueError`` is raised.
|
def merge(self, values, lists_only=False):
if isinstance(values, self.__class__):
self.merge_from_obj(values, lists_only=lists_only)
elif isinstance(values, dict):
self.merge_from_dict(values, lists_only=lists_only)
else:
raise ValueError("{0} or dictionary expected; found '{1}'.".format(self.__class__.__name__,
type(values).__name__))
|
[
"def removeDupes(self):\n \n for key in self.attr_dict_list[0].keys():\n self.attrs[key] = set()\n\n for d in self.attr_dict_list:\n for attr in d:\n self.attrs[attr].add(tuple(d[attr].items()))\n\n for key in self.attrs:\n temp = [dict(t) for t in self.attrs[key]]\n self.attrs[key] = temp",
"def canonicalize_attr_map(attr_map):\n for attr, val in attr_map.iteritems():\n if not isinstance(val, list):\n attr_map[attr] = [val] \n return attr_map",
"def _check_attributes(attribute_list, types, back_end, attribute_specs,\n context_name, module_source_file):\n if attribute_specs is None:\n attribute_specs = []\n errors = []\n already_seen_attributes = {}\n for attr in attribute_list:\n if attr.back_end.text:\n if attr.back_end.text != back_end:\n continue\n else:\n if back_end is not None:\n continue\n attribute_name = _attribute_name_for_errors(attr)\n if (attr.name.text, attr.is_default) in already_seen_attributes:\n original_attr = already_seen_attributes[attr.name.text, attr.is_default]\n errors.append([\n error.error(module_source_file,\n attr.source_location,\n \"Duplicate attribute '{}'.\".format(attribute_name)),\n error.note(module_source_file,\n original_attr.source_location,\n \"Original attribute\")])\n continue\n already_seen_attributes[attr.name.text, attr.is_default] = attr\n\n if (attr.name.text, attr.is_default) not in attribute_specs:\n if attr.is_default:\n error_message = \"Attribute '{}' may not be defaulted on {}.\".format(\n attribute_name, context_name)\n else:\n error_message = \"Unknown attribute '{}' on {}.\".format(attribute_name,\n context_name)\n errors.append([error.error(module_source_file,\n attr.name.source_location,\n error_message)])\n else:\n errors.extend(types[attr.name.text](attr, module_source_file))\n return errors",
"def _clean_attributes(self):\n for attr in self._all_attrs:\n item = getattr(self, attr)\n if item is not None:\n cleaned_item = []\n for sub_item in item:\n if attr == \"download_urls\":\n sub_item = sub_item.strip()\n else:\n if isinstance(sub_item, str):\n sub_item = clean_string(sub_item)\n elif isinstance(sub_item, datetime):\n sub_item = sub_item.date()\n if attr in [\"case_names\", \"docket_numbers\"]:\n sub_item = harmonize(sub_item)\n cleaned_item.append(sub_item)\n self.__setattr__(attr, cleaned_item)",
"def update_list_properties(self, instance_conf, keys = ['security_group_ids']):\n for k in keys:\n if k in instance_conf:\n instance_conf[k] = self.make_list(instance_conf[k])",
"def serialize(self, values, *, null_check: bool = True):\n rval = []\n for idx, value in enumerate(values):\n attr = self._get_serialize_class(value)\n if self.element_type and value is not None and not isinstance(attr, self.element_type):\n raise ValueError(\"List elements must be of type: {}\".format(self.element_type.__name__))\n attr_type = attr.attr_type\n try:\n if isinstance(attr, (ListAttribute, MapAttribute)):\n attr_value = attr.serialize(value, null_check=null_check)\n else:\n attr_value = attr.serialize(value)\n except AttributeNullError as e:\n e.prepend_path(f'[{idx}]')\n raise\n if attr_value is None:\n # When attribute values serialize to \"None\" (e.g. empty sets) we store {\"NULL\": True} in DynamoDB.\n attr_type = NULL\n attr_value = True\n rval.append({attr_type: attr_value})\n return rval",
"def _get_merged_attributes(self, attributes):\n merged_attributes = {}\n for attribute in attributes:\n same_slug_attrs = merged_attributes.setdefault(attribute.slug, [])\n same_slug_attrs.append(attribute)\n return merged_attributes",
"def remove_identical_attributes(self):\n merges = []\n if self._attributes is None:\n return merges\n\n if len(self._attributes) <= 1:\n return []\n\n new_attrs = []\n me_attr = self._attributes.pop()\n while me_attr is not None:\n found_match = False\n for other_attr in self._attributes:\n if me_attr == other_attr:\n merges.append(me_attr)\n found_match = True\n break\n if found_match is False:\n new_attrs.append(me_attr)\n if len(self._attributes) == 0:\n break\n me_attr = self._attributes.pop()\n\n self._attributes = new_attrs\n return merges",
"def add_list_cls_to_items(self, objects):\n list_objects = [x for x in objects if x[\"list_attr\"]]\n complex_list_objects = [\n x for x in list_objects if not x[\"list_attr\"][\"is_simple\"]\n ]\n item_objects = [x for x in objects if not x[\"list_attr\"]]\n mismatch1 = []\n mismatch2 = []\n for obj in complex_list_objects:\n list_attr = obj[\"list_attr\"]\n if list_attr[\"name\"] in obj[\"api_name\"]:\n item_obj = [\n x for x in objects if x[\"class_name\"] == list_attr[\"type_camel\"]\n ]\n item_obj[0][\"list_cls\"] = obj[\"class_name\"]\n item_obj[0][\"list_name\"] = obj[\"api_name\"]\n else:\n mismatch1.append(obj)\n\n for obj in mismatch1:\n list_attr = obj[\"list_attr\"]\n item_obj = [x for x in item_objects if x[\"src_type\"] == list_attr[\"type\"]]\n if item_obj:\n if \"list_cls\" not in item_obj[0]:\n item_obj[0][\"list_cls\"] = obj[\"class_name\"]\n item_obj[0][\"list_name\"] = obj[\"api_name\"]\n else:\n mismatch2.append(obj)\n\n for obj in item_objects:\n obj[\"list_cls\"] = obj.get(\"list_cls\", None)\n obj[\"list_name\"] = obj.get(\"list_name\", \"\")\n return objects",
"def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val",
"def _update_attribute_types(cls, attribute_values: Dict[str, Dict[str, Any]]):\n for attr in cls.get_attributes().values():\n attribute_value = attribute_values.get(attr.attr_name)\n if attribute_value:\n AttributeContainer._coerce_attribute_type(attr.attr_type, attribute_value)\n if isinstance(attr, ListAttribute) and attr.element_type and LIST in attribute_value:\n if issubclass(attr.element_type, AttributeContainer):\n for element in attribute_value[LIST]:\n if MAP in element:\n attr.element_type._update_attribute_types(element[MAP])\n else:\n for element in attribute_value[LIST]:\n AttributeContainer._coerce_attribute_type(attr.element_type.attr_type, element)\n if isinstance(attr, AttributeContainer) and MAP in attribute_value:\n attr._update_attribute_types(attribute_value[MAP])",
"def _restore_mutable_attr(args_list, compile_args):\n new_compile_args = ()\n for idx, arg in enumerate(args_list):\n if hasattr(arg, \"__ms_mutable__\") and getattr(arg, \"__ms_mutable__\") and \\\n not (hasattr(arg, \"const_arg\") and getattr(arg, \"const_arg\")):\n if hasattr(arg, \"__ms_dynamic_len__\"):\n new_compile_args += (mutable(compile_args[idx], getattr(arg, \"__ms_dynamic_len__\")),)\n else:\n new_compile_args += (mutable(compile_args[idx], False),)\n else:\n new_compile_args += (compile_args[idx],)\n return new_compile_args",
"def merge_attribute_defs(self, dest, source, changes = {}):\n # print \"in merge_attribute_defs, dest =\"\n # pp.pprint(dest)\n # print \"source =\"\n # pp.pprint(source)\n for aid in source.keys():\n if aid not in dest.keys():\n # copy attribute, then check for append\n dest[aid] = copy.deepcopy(source[aid])\n if 'value' in dest[aid]:\n if type(dest[aid]['value']) is str and dest[aid]['value'][0]=='+':\n dest[aid]['value'] = dest[aid]['value'].lstrip('+')\n changes[aid] = dest[aid]['value']\n continue \n if 'value' not in dest[aid]:\n if 'value' in source[aid]:\n dest[aid]['value'] = source[aid]['value']\n if (type(dest[aid]['value']) is str and dest[aid]['value'][0] == '+'):\n dest[aid]['value'] = dest[aid]['value'].lstrip('+') \n changes[aid] = dest[aid]['value']\n continue\n else:\n print (\"** Error, merging attribute '%s' but value not specified in source\"\n \" or destination\") % aid\n traceback.print_stack()\n sys.exit(1) \n else:\n if 'value' in source[aid]: \n # value given in both source and destination\n self.append_or_replace(dest[aid], source[aid], 'value', \"attribute %s\" % aid)\n changes[aid] = dest[aid]['value'] # save changed value\n else:\n print (\"** Warning, node at:\\n%s\\nmerging attribute '%s'\" \n \" but value to merge not specified.\") % (self.full_path, aid)\n print \"source attributes:\"\n pp.pprint(source)\n print \"dest attributes:\"\n pp.pprint(dest)",
"def as_unique_attributes(data, exclude=None, include=None, raise_error=False):\n include = set(include or [])\n exclude = set(exclude or set()).union(\"id\")\n fields = greedy_set_cover(data, exclude=exclude, raise_error=raise_error)\n\n if len({tuple(sorted(obj.keys())) for obj in data}) > 1:\n raise InconsistentFields\n\n def formatter(obj, fields, include):\n return {\n key: value\n for key, value in obj.items()\n if (key in fields or key in include or key == \"id\")\n }\n\n return (\n fields.union(include).union({\"id\"}),\n [formatter(obj, fields, include) for obj in data],\n )",
"def merge_edge_props(attrs: dict, additional_attrs: dict):\n result = attrs\n for (key, value) in additional_attrs.items():\n if key not in ['in', 'out']:\n if type(additional_attrs[key]) is list:\n if key not in result:\n result[key] = []\n result[key].extend(additional_attrs[key])\n result[key] = list(set(result[key])) # silly solution to find unique elements\n else:\n result[key] = value\n return result",
"def normalize_list(self, tokens):\n pass",
"def normalize_set(self, items, **kwargs):\n values = set()\n for item in ensure_list(items):\n values.update(self.normalize(item, **kwargs))\n return list(values)",
"def _concat_attr(self, other, attrs=None, ignore_duplicate=True):\n new_obj = deepcopy(self)\n if attrs is None:\n attrs = ('components', 'y', 'Fs') + self.stackable\n elif type(attrs) is not tuple:\n attrs = tuple(attrs)\n\n for attr_name in attrs:\n self_attr = getattr(self, attr_name)\n other_attr = getattr(other, attr_name)\n if self_attr is None and other_attr is None:\n pass\n elif other_attr is None:\n pass\n elif self_attr is None:\n setattr(new_obj, attr_name, deepcopy(other_attr))\n else:\n if attr_name == 'Fs':\n assert self_attr == other_attr, 'Observed data have different sampling frequencies.'\n if attr_name == 'components':\n assert len(self_attr) == len(other_attr), 'Components have different shapes.'\n elif len(other_attr.shape) > 2:\n for j in range(other_attr.shape[2]):\n model_attr = other_attr[:, :, j]\n add_flag = True\n if len(self_attr.shape) > 2:\n for k in range(self_attr.shape[2]):\n if (self_attr[:, :, k] == model_attr).all(): # type: ignore\n add_flag = False\n else:\n if (self_attr == model_attr).all(): # type: ignore\n add_flag = False\n if add_flag or ignore_duplicate is False:\n setattr(new_obj, attr_name, np.dstack([getattr(new_obj, attr_name), model_attr]))\n else:\n model_attr = other_attr\n add_flag = True\n if len(self_attr.shape) > 2:\n for k in range(self_attr.shape[2]):\n if (self_attr[:, :, k] == model_attr).all(): # type: ignore\n add_flag = False\n else:\n if (self_attr == model_attr).all(): # type: ignore\n add_flag = False\n if add_flag or ignore_duplicate is False:\n setattr(new_obj, attr_name, np.dstack([getattr(new_obj, attr_name), model_attr]))\n new_obj._check_dimensions()\n return new_obj",
"def _serialize_list_fields(self, etreeModel, request):\n for list_field in self.list_fields:\n show_collapsed = getattr(self, '_supports_collapsed_collection', False)\n for val in getattr(self, list_field, []):\n if hasattr(val, '_meta'):\n # This is a db model...\n # now if the collection is marked collapsable mark the kids\n # as things we need to show in summary view, to\n if getattr(self, '_supports_collapsed_collection', False):\n val._summarize = True\n xobjModelVal = val.serialize(request, tag=list_field)\n elif isinstance(val, HrefField):\n tag = list_field\n # We do have mixed lists like JobResults, so let the\n # tag be variable\n if val.tag is not None:\n tag = val.tag\n xobjModelVal = val.serialize_value(request, tag=tag)\n elif isinstance(val, basestring):\n xobjModelVal = etree.Element(list_field)\n xobjModelVal.text = unicode(val)\n else:\n xobjModelVal = val\n etreeModel.append(xobjModelVal)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a copy of the current instance.
|
def copy(self):
return self.__class__(self)
|
[
"def copy(self):\n return self.__copy__()",
"def clone(self):\r\n import copy\r\n return self._wrap(copy.copy(self.obj))",
"def copy(self):\n return Sample(**self.__dict__)",
"def __copy__(self):\n trait_data = self.__getstate__()\n inst = self.__class__.create(trait_data)\n return inst",
"def copy( self ):\n\n\t\treturn State( **self.__dict__ )",
"def copy(self):\n return self.mutate().simple_copy()",
"def copy(self):\n cls = type(self)\n # Create a new instance without calling __init__: parameters are\n # different depending on the class.\n new_box = cls.__new__(cls)\n # Copy attributes\n new_box.__dict__.update(self.__dict__)\n return new_box",
"def copy(self) -> Widget:\n\n return deepcopy(self)",
"def copy(self):\n\n copy = self.__class__(*[a.copy() for a in self.atoms()])\n copy._id, copy._name = self._id, self._name\n return copy",
"def __copy__(self):\n return self.__class__(xml=copy.deepcopy(self.xml),\n stream=self.stream)",
"def copy(self):\n return self.__class__(\n copy.deepcopy(self.content),\n self.channel.name,\n self.channel_layer,\n )",
"def copy(self):\n new_client = self._client.copy()\n return self.__class__(self.instance_id, new_client,\n self._cluster_location_id,\n display_name=self.display_name)",
"def create_copy(self) -> Copy:\n\n return self._runtime.create_copy()",
"def copy(self):\n a = Motif()\n a.__dict__ = self.__dict__.copy()\n return a",
"def clone(self):\n return type(self)(iterator=self)",
"def deepcopy(self):\n return copy.deepcopy(self)",
"def __copy__(self):\n out = self.__class__(self.name, self.data, self.dimensions[:],\n self.attributes.copy())\n out.id = self.id\n return out",
"def copy(self):\n new = Loopingprofile()\n new.state = self.state.copy()\n return new",
"def return_copy(self):\n return copy.deepcopy(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Cleans the input values of this configuration object. Fields that have gotten updated through properties are converted to configuration values that match the format needed by functions using them. For example, for listlike values it means that input of single strings is transformed into a singleentry list. If this conversion fails, a ``ValueError`` is raised.
|
def clean(self):
all_props = self.__class__.CONFIG_PROPERTIES
for prop_name in self._modified:
attr_config = all_props.get(prop_name)
if attr_config and attr_config.input_func:
self._config[prop_name] = attr_config.input_func(self._config[prop_name])
self._modified.clear()
|
[
"def clean(self, value):\n value = self.validate_to_python(value)\n self.run_validators(value)\n return value",
"def _clean_inputs(self, inputs):\n return inputs",
"def _trans_format(self):\n config_dict = vars(self._config)\n for item, value in config_dict.items():\n if value == 'None':\n config_dict[item] = None\n elif isinstance(value, str) and is_number(value):\n if value.isdigit():\n value = int(value)\n else:\n value = float(value)\n config_dict[item] = value",
"def _clean_attribute_settings(cls, instance, cleaned_input):\n attribute_input_type = cleaned_input.get(\"input_type\") or instance.input_type\n errors = {}\n for field in ATTRIBUTE_PROPERTIES_CONFIGURATION.keys():\n allowed_input_type = ATTRIBUTE_PROPERTIES_CONFIGURATION[field]\n if attribute_input_type not in allowed_input_type and cleaned_input.get(\n field\n ):\n errors[field] = ValidationError(\n f\"Cannot set {field} on a {attribute_input_type} attribute.\",\n code=AttributeErrorCode.INVALID.value,\n )\n if errors:\n raise ValidationError(errors)",
"def force_clean(self, caller=True): # TODO: can't make call to super...\r\n obj_key = NAME_TO_KEY[self.__class__.__name__]\r\n if caller:\r\n self.to_graph_objs(caller=False)\r\n del_keys = [key for key in self if str(key) not in INFO[obj_key]]\r\n for key in del_keys:\r\n if (key[:5] == 'xaxis') or (key[:5] == 'yaxis'):\r\n try:\r\n test_if_int = int(key[5:])\r\n except ValueError:\r\n del self[key]\r\n else:\r\n del self[key]\r\n keys = self.keys()\r\n for key in keys:\r\n try:\r\n self[key].force_clean(caller=False) # TODO error handling??\r\n except AttributeError:\r\n pass\r\n if isinstance(self[key], (dict, list)):\r\n if len(self[key]) == 0:\r\n del self[key] # clears empty collections!\r\n elif self[key] is None:\r\n del self[key]",
"def clean_entry(self, entry):\n for key, value in entry.items():\n if isinstance(value, list):\n value = [v.replace('{{!}}', '|') for v in value]\n value = list(\n filter(lambda x: x != self.options.get('na_value'), value))\n else:\n value = value.replace('{{!}}', '|')\n if value == self.options.get('na_value'):\n value = ''\n entry[key] = value\n\n return entry",
"def _adjust_input_properties(input_properties):\n\n default_value = input_properties.get('default_value', UNDEFINED)\n\n # Derive undefined 'nullable' from 'default_value'\n nullable = input_properties.get('nullable', UNDEFINED)\n if nullable is UNDEFINED and default_value is None:\n input_properties['nullable'] = True\n\n # Derive undefined 'data_type' from 'default_value'\n data_type = input_properties.get('data_type', UNDEFINED)\n if data_type is UNDEFINED and not (default_value is UNDEFINED or default_value is None):\n input_properties['data_type'] = type(default_value)",
"def clean(self):\n if not self.is_input and not self.is_output:\n raise ValidationError(\"TransformationXput with pk={} is neither an input nor an output\".format(self.pk))\n if self.has_structure:\n self.structure.clean()",
"def coerce(cls, values):\n for key, value in values.items():\n values[key] = value = value.decode(\"utf-8\")\n if key in cls._properties:\n try:\n values[key] = cls._properties[key].to_python(value)\n except ValueError, e:\n raise BadValueError(\"Attribute '%s' with value '%s' couldn't be validated: %s\"\n % (key, value, e.message))\n\n return values",
"def clean(self, value, model_instance):\n value = self.to_python(value)\n if value is not None:\n value = self.get_sanitizer()(value)\n self.validate(value, model_instance)\n self.run_validators(value)\n return value",
"def test_construct_values_raises_on_invalid_normalize(self):\n constructor_fields = ('first', 'second', 'last', )\n values = ['first_value', 'second_value', 'last_value']\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update(dict(zip(constructor_fields, values)))\n self.form.cleaned_data = cleaned_data\n normalize = 'not a valid normalize function'\n message = \"The normalize parameter must be a callable or None. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values(constructor_fields, normalize=normalize)",
"def autostrip(cls):\r\n fields = [(key, value)\r\n for key, value in cls.base_fields.iteritems()\r\n if isinstance(value, CharField)]\r\n for field_name, field_object in fields:\r\n def get_clean_func(original_clean):\r\n return lambda value: original_clean(value and value.strip())\r\n clean_func = get_clean_func(getattr(field_object, 'clean'))\r\n setattr(field_object, 'clean', clean_func)\r\n return cls",
"def clean_configurations(self):\n cfg_limits = self.spectrograph.valid_configuration_values()\n if cfg_limits is None:\n # No values specified, so we're done\n return\n\n good = np.ones(len(self), dtype=bool)\n for key in cfg_limits.keys():\n # NOTE: For now, check that the configuration values were\n # correctly assigned in the spectrograph class definition.\n # This should probably go somewhere else or just removed.\n assert isinstance(cfg_limits[key], list), \\\n 'CODING ERROR: valid_configuration_values is not correctly defined ' \\\n 'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)\n\n # Check that the metadata are valid for this column.\n indx = np.isin(self[key], cfg_limits[key])\n if not np.all(indx):\n msgs.warn('Found frames with invalid {0}.'.format(key))\n good &= indx\n\n if np.all(good):\n # All values good, so we're done\n return\n\n # Alert the user that some of the frames are going to be\n # removed\n msg = 'The following frames have configurations that cannot be reduced by PypeIt' \\\n ' and will be removed from the metadata table (pypeit file):\\n'\n indx = np.where(np.logical_not(good))[0]\n for i in indx:\n msg += ' {0}\\n'.format(self['filename'][i])\n msgs.warn(msg)\n # And remove 'em\n self.table = self.table[good]",
"def sanitize_conf_values(conf_values):\n for key in list(conf_values.keys()):\n if not conf_values[key]:\n conf_values[key] = '\"\"'\n return conf_values",
"def clean_config(self, config):\n return config",
"def autostrip(cls):\n fields = [(key, value) for key, value in cls.base_fields.iteritems()\n if isinstance(value, forms.CharField)]\n for field_name, field_object in fields:\n def get_clean_func(original_clean):\n return lambda value: original_clean(value and value.strip())\n clean_func = get_clean_func(getattr(field_object, 'clean'))\n setattr(field_object, 'clean', clean_func)\n return cls",
"def _normalize(self):\n if (\n self.config[\"normalize_field\"] is not None\n and self.config[\"normalize_all\"] is True\n ):\n raise ValueError(\n \"Normalize_field and normalize_all can't be set at the same time.\"\n )\n\n if self.config[\"normalize_field\"]:\n fields = self.config[\"normalize_field\"]\n for field in fields:\n if field not in self.field2type:\n raise ValueError(f\"Field [{field}] does not exist.\")\n ftype = self.field2type[field]\n if ftype != FeatureType.FLOAT and ftype != FeatureType.FLOAT_SEQ:\n self.logger.warning(\n f\"{field} is not a FLOAT/FLOAT_SEQ feat, which will not be normalized.\"\n )\n elif self.config[\"normalize_all\"]:\n fields = self.float_like_fields\n else:\n return\n\n self.logger.debug(set_color(\"Normalized fields\", \"blue\") + f\": {fields}\")\n\n for field in fields:\n for feat in self.field2feats(field):\n\n def norm(arr):\n mx, mn = max(arr), min(arr)\n if mx == mn:\n self.logger.warning(\n f\"All the same value in [{field}] from [{feat}_feat].\"\n )\n arr = 1.0\n else:\n arr = (arr - mn) / (mx - mn)\n return arr\n\n ftype = self.field2type[field]\n if ftype == FeatureType.FLOAT:\n feat[field] = norm(feat[field].values)\n elif ftype == FeatureType.FLOAT_SEQ:\n split_point = np.cumsum(feat[field].agg(len))[:-1]\n feat[field] = np.split(\n norm(feat[field].agg(np.concatenate)), split_point\n )",
"def clear_field_values(self):\n\t\tlogging.info(\"Clearing values in the field[] dictionary of the object\")\n\t\tlogging.debug(\"Before = \" + str(self.field))\n\t\tfor key, value in self.fields.items():\n\t\t\tself.field[str(key)] = None\n\t\tlogging.debug(\"After = \" + str(self.field))\n\t\treturn",
"def _cleanupTargetData(self, targetData):\n\n for key in targetData:\n\n # Strips strings\n if isinstance(targetData[key], six.string_types):\n targetData[key] = targetData[key].strip()\n\n # Checks if a value can be converted to float\n try:\n float(targetData[key])\n targetData[key] = float(targetData[key])\n except:\n pass\n\n try:\n # Checks if value(s) are NaN\n isNaN = np.isnan(targetData[key])\n if np.any(isNaN):\n if np.isscalar(isNaN):\n targetData[key] = -999.\n else:\n targetData[key][isNaN] = -999.\n\n # Does the same for infs\n isInf = np.isinf(targetData[key])\n if np.any(isInf):\n if np.isscalar(isInf):\n targetData[key] = -999.\n else:\n targetData[key][isInf] = -999.\n except TypeError:\n pass\n\n return targetData"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Whether the current object is 'clean', i.e. has no nonconverted input.
|
def is_clean(self):
return not self._modified
|
[
"def __bool__(self):\n return self.is_valid",
"def _clean( self ):\n\t\tself.__is_dirty = False",
"def clean(self):\n if not self.is_input and not self.is_output:\n raise ValidationError(\"TransformationXput with pk={} is neither an input nor an output\".format(self.pk))\n if self.has_structure:\n self.structure.clean()",
"def is_raw(self):\n return not self.has_structure",
"def is_clean(self):\n return self._badness == 0 and \\\n self.age + self.MAX_GOOD_AGE <= time.time()",
"def __bool__(self):\n return bool(self.obj)",
"def __bool__(self):\n return not self.undefine",
"def is_ignorable(self):\n return self.peek() == ''",
"def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer",
"def converts_values(self):\n return self.convert_value is not Formatter.convert_value or \\\n self.convert_column is not Formatter.convert_column",
"def is_valid():\n return IsValid()",
"def __bool__(self):\n return True",
"def is_real(self) -> bool:\n return not any(self.v)",
"def fixable(self):\n return False",
"def isDumb(self):\r\n return False",
"def check_input(self) -> bool:\n return self._check_input",
"def shouldBeCleanedBeforePaste( self, context ):\n return ObjectShouldBeCleanedBeforePaste( context )",
"def is_valid(self) -> bool:\n return all(\n (\n not self.author,\n self.unit,\n )\n )",
"def is_finite(self):\n return not self._is_special"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the current world size (number of distributed processes).
|
def world_size() -> int:
return dist.get_world_size() if dist.is_initialized() else 1
|
[
"def get_world_size(self):\n return self.WORLD_SIZE",
"def get_data_parallel_world_size():\n return torch.distributed.get_world_size(group=get_data_parallel_group())",
"def _get_data_parallel_world_size():\n global mpu\n if mpu is not None:\n return mpu.get_data_parallel_world_size()\n return dist.get_world_size(group=_get_data_parallel_group())",
"def world_size(self):\n return self._wsize",
"def _get_expert_parallel_world_size(group_name):\n return dist.get_world_size(group=_get_expert_parallel_group(group_name))",
"def _get_model_parallel_world_size():\n global mpu\n if mpu is not None:\n return mpu.get_model_parallel_world_size()\n return 1",
"def get_model_parallel_world_size():\n global _MPU_WORLD_SIZE\n if _MPU_WORLD_SIZE is not None:\n return _MPU_WORLD_SIZE\n return torch.distributed.get_world_size(group=get_model_parallel_group())",
"def _get_expert_data_parallel_world_size(group_name):\n return dist.get_world_size(group=_get_expert_data_parallel_group(group_name))",
"def world_size(self):\n if self.data_section is None:\n return None\n attrs = self.data_section.attrs\n if bool(attrs)==False:\n return None\n return attrs.get('world_size', None)",
"def getMapSpaceSize(self):\n return self.model.getMapSpaceSize()",
"def get_size(self):\n return get_dir_size(self.run_dir)",
"def GlobalSize(self):\n return _hypre.HypreParVector_GlobalSize(self)",
"def geometry_max_world_size(self):\n wsize = c_float()\n ckresult(_dll.FMOD_System_GetGeometrySettings(self._ptr, byref(wsize)))\n return wsize.value",
"def get_size(self):\n num0 = modelDB.db[self.model]['num0']\n num1 = modelDB.db[self.model]['num1']\n self.size = num0 + num1*self.m1*self.n1\n return self.size",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()",
"def domain_size(self):\n all_vars = self.all_variables()\n if not all_vars:\n return 0\n return np.prod([v.size for v in all_vars])",
"def population_size(self):\n return self._population_size",
"def get_dungeon_size(self):\n return self._dungeon_size"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decorator that only runs the function on the process with rank 0.
|
def rank_zero_only(fn):
def wrapped(*args, **kwargs):
if rank() == 0:
return fn(*args, **kwargs)
return wrapped
|
[
"def rank_zero_fn(fn: Callable[..., TReturn]) -> Callable[..., Optional[TReturn]]:\n\n @wraps(fn)\n def wrapped_fn(*args: Any, **kwargs: Any) -> Optional[TReturn]:\n if get_global_rank() == 0:\n return fn(*args, **kwargs)\n return None\n\n return wrapped_fn",
"def parallel_function(func):\n\n if world.size == 1:\n return func\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n if (args and getattr(args[0], 'serial', False) or\n not kwargs.pop('parallel', True)):\n # Disable:\n return func(*args, **kwargs)\n\n ex = None\n result = None\n if world.rank == 0:\n try:\n result = func(*args, **kwargs)\n except Exception as ex:\n pass\n ex, result = broadcast((ex, result))\n if ex is not None:\n raise ex\n return result\n\n return new_func",
"def custom_process(f: ProcessFunction):\n process_registry_040.add_hidden(f)\n process_registry_100.add_hidden(f)\n return f",
"def non_standard_process(spec: ProcessSpec) -> Callable[[ProcessFunction], ProcessFunction]:\n\n def decorator(f: ProcessFunction) -> ProcessFunction:\n process_registry_040.add_function(f=f, spec=spec.to_dict_040())\n process_registry_100.add_function(f=f, spec=spec.to_dict_100())\n return f\n\n return decorator",
"def call_in_rank_order(fun, comm=None):\n if comm is None:\n comm = PETSc.COMM_WORLD\n\n for rank in range(comm.size):\n if rank == comm.rank:\n fun(rank, comm)\n comm.barrier()",
"def test_rank_zero_none_set(rank_key, rank):\n\n with mock.patch.dict(os.environ, {rank_key: rank}):\n from pytorch_lightning.utilities.distributed import _get_rank, rank_zero_only\n\n rank_zero_only.rank = _get_rank()\n\n @rank_zero_only\n def foo():\n return 1\n\n x = foo()\n assert x is None",
"def call_by_root(f, root=0):\n MPI = is_running_mpi()\n if MPI:\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n if rank == root:\n return f()\n else:\n return f()",
"def mpisync(func, comm=MPI.COMM_WORLD):\n def mpifunc(*args, **kwargs):\n if comm.Get_rank() == 0:\n res = func(*args, **kwargs)\n else:\n res = None\n res = comm.bcast(res, root=0)\n return res\n return mpifunc",
"def call_and_bcast(func, *args, **kwargs):\n if is_rank_zero():\n outputs = func(*args, **kwargs)\n else:\n outputs = None\n if _is_distributed:\n outputs = comm.bcast(outputs, root=0)\n return outputs",
"def process(f: ProcessFunction) -> ProcessFunction:\n process_registry_040.add_function(f)\n process_registry_100.add_function(f)\n return f",
"def clusterprocess(func=None, cluster_nodefile=\"$PBS_NODEFILE\", cluster_pin=None, cluster_hint='blocked', cluster_ssh_port=22):\n if func:\n def _call(*args, **kwargs):\n return ClusterProcess(func, *args, **kwargs)\n _call.__name__ = func.__name__\n return _call\n else:\n def wrap_process(func):\n def _call(*args, **kwargs):\n kwargs['cluster_nodefile'] = cluster_nodefile\n kwargs['cluster_pin'] = cluster_pin\n kwargs['cluster_hint'] = cluster_hint\n kwargs['cluster_ssh_port'] = cluster_ssh_port\n return ClusterProcess(func, *args, **kwargs)\n _call.__name__ = func.__name__\n return _call\n return wrap_process",
"def wrapper(self):\n self.policy.SetProbability(0)\n func(self)\n self.policy.SetProbability(1)",
"def is_rank_zero():\n return _rank == 0",
"def process_fn(func):\n def wrapper(*args, **kwargs):\n process = multiprocessing.Process(target=func, args=args, kwargs=kwargs)\n process.start()\n return process\n return wrapper",
"def apply_only(self, function, worker, *args, **kwargs):\n pass",
"def master_node_only(func):\n\n return_type = inspect.signature(func).return_annotation\n function_has_return_value = return_type is not None and return_type != inspect._empty\n if function_has_return_value:\n raise RuntimeError(f\"Function {func} decorated with @master_node_only must not return any value. \"\n f\"Function signature: {inspect.signature(func)}\")\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if is_main_process():\n return func(*args, **kwargs)\n else:\n return None\n\n return wrapper",
"def test_rank_zero_known_cluster_envs(env_vars: Mapping[str, str]):\n from pytorch_lightning.utilities.distributed import _get_rank, rank_zero_only\n\n rank_zero_only.rank = _get_rank()\n\n with mock.patch.dict(os.environ, env_vars):\n from pytorch_lightning.utilities.distributed import _get_rank, rank_zero_only\n\n rank_zero_only.rank = _get_rank()\n\n @rank_zero_only\n def foo(): # The return type is optional because on non-zero ranks it will not be called\n return 1\n\n x = foo()\n assert x == 1",
"def runs_last(func):\r\n def Wrapper():\r\n calls = func.num_host_calls\r\n if calls >= len(env.hosts) - 1:\r\n return func()\r\n else:\r\n func.num_host_calls = calls + 1\r\n return None\r\n\r\n setattr(func, 'num_host_calls', 0)\r\n return Wrapper",
"def careful_call(fn):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Equivalent to print, but only runs on the process with rank 0.
|
def print_rank_zero(*args, **kwargs) -> None:
print(*args, **kwargs)
|
[
"def print_from_rank_zero(msg, output_channel='stdout'):\n if is_rank_zero():\n print(msg)",
"def r_print(*args):\n if comm.rank == 0:\n print('ROOT:', end=' ')\n for i in args:\n print(i, end=' ')\n # noinspection PyArgumentList\n print()",
"def print_on_node_master(self, msg: str):\n self._assert_local_rank_set()\n if self.local_rank == 0:\n print(msg)",
"def print_once(*message):\n if MPI.rank(mpi_comm_world()) == 0:\n print(*message)",
"def fun1():\n size = MPI.COMM_WORLD.Get_size()\n rank = MPI.COMM_WORLD.Get_rank()\n name = MPI.Get_processor_name()\n\n print \"Hello, World! I am process %d of %d on %s.\\n\" % (rank, size, name)",
"def master_print(*args, **kwargs) -> None:\n if is_main_process():\n print(*args, **kwargs)",
"def print_player_rank_and_points(self):\r\n pass",
"def printRanks(self):\n print(self.__ranks)",
"def _mpi_print(self, iteration, abs_res, rel_res):\n if (self.options['iprint'] == 2 and\n (self._system().comm.rank == 0 or os.environ.get('USE_PROC_FILES'))):\n\n prefix = self._solver_info.prefix\n solver_name = self.SOLVER\n\n if prefix.endswith('precon:'):\n solver_name = solver_name[3:]\n\n print_str = prefix + solver_name\n print_str += ' %d ; %.9g %.9g' % (iteration, abs_res, rel_res)\n print(print_str)",
"def _default_vprint_worker(*args, **kwargs):\r\n print(*args, **kwargs)",
"def main_thread_print(*args, **kwargs):\n return print(*args, **kwargs)",
"def ParallelPrint(*args, **kwargs):\n if am_i_the_master():\n print(*args, **kwargs)",
"def worker_test():\n print \"I am worker pid\", os.getpid()\n for i in range(10):\n for j in range(10):\n print i * 10 + j,\n time.sleep(.1)\n if i < 5:\n # The first five lines flush every number printed.\n sys.stdout.flush()\n # All lines flush at the newline.\n print",
"def ready_print(worker, output, error): # pragma: no cover\n print(worker, output, error)",
"def is_rank_zero():\n return _rank == 0",
"def mpi_rank(self):\n return 0",
"def show_running_process() -> None:\n print('++++++++++++++++++++++++++++++++++++++++++++++')\n print(f'Python: {version_info.major}.{version_info.minor}.{version_info.micro}')\n print(f'MocaSystem: {core.VERSION}')\n print(f'MocaModules: {mzk.VERSION}')\n print(f'MocaSanic: {mzk.MocaSanic.VERSION}')\n print(f'Sanic: {__version__}')\n print('++++++++++++++++++++++++++++++++++++++++++++++')\n print('PID\\tPPID\\tNAME')\n for line in mzk.check_output('ps -ef | grep MocaTwitterUtils', shell=True).decode().splitlines():\n items = line.split()\n if items[7].startswith('MocaTwitterUtils'):\n print(f\"{items[1]}\\t{items[2]}\\t{' '.join(items[7:])}\")\n print('++++++++++++++++++++++++++++++++++++++++++++++')",
"def mpi_fork(n):\n if n<=1:\n return \"child\"\n if os.getenv(\"IN_MPI\") is None:\n env = os.environ.copy()\n env.update(\n MKL_NUM_THREADS=\"1\",\n OMP_NUM_THREADS=\"1\",\n IN_MPI=\"1\"\n )\n print( [\"mpirun\", \"-np\", str(n), sys.executable] + sys.argv)\n subprocess.check_call([\"mpirun\", \"-np\", str(n), sys.executable] +['-u']+ sys.argv, env=env)\n return \"parent\"\n else:\n global nworkers, rank\n nworkers = comm.Get_size()\n rank = comm.Get_rank()\n print('assigning the rank and nworkers', nworkers, rank)\n return \"child\"",
"def stdout(msg):\n print(msg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Identifies uncorrelated samples and updates the arrays of the reduced potential energy and dhdlt retaining data entries of these samples only. 'sta' and 'fin' are the starting and final snapshot positions to be read, both are arrays of dimension K.
|
def uncorrelate(sta, fin, do_dhdl=False):
if not P.uncorr_threshold:
if P.software.title()=='Sire':
return dhdlt, nsnapshots, None
return dhdlt, nsnapshots, u_klt
u_kln = numpy.zeros([K,K,max(fin-sta)], numpy.float64) # u_kln[k,m,n] is the reduced potential energy of uncorrelated sample index n from state k evaluated at state m
N_k = numpy.zeros(K, int) # N_k[k] is the number of uncorrelated samples from state k
g = numpy.zeros(K,float) # autocorrelation times for the data
if do_dhdl:
dhdl = numpy.zeros([K,n_components,max(fin-sta)], float) #dhdl is value for dhdl for each component in the file at each time.
print "\n\nNumber of correlated and uncorrelated samples:\n\n%6s %12s %12s %12s\n" % ('State', 'N', 'N_k', 'N/N_k')
UNCORR_OBSERVABLE = {'Gromacs':P.uncorr,'Amber':'dhdl', 'Sire':'dhdl', 'Desmond':'dE', 'Gomc':P.uncorr}[P.software.title()]
if UNCORR_OBSERVABLE == 'dhdl':
# Uncorrelate based on dhdl values at a given lambda.
for k in range(K):
# Sum up over those energy components that are changing.
# if there are repeats, we need to use the lchange[k] from the last repeated state.
lastl = k
for l in range(K):
if numpy.array_equal(lv[k],lv[l]):
lastl = l
dhdl_sum = numpy.sum(dhdlt[k, lchange[lastl], sta[k]:fin[k]], axis=0)
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
#NML: Set statistical inefficiency (g) = 1 if vector is all 0
if not numpy.any(dhdl_sum):
#print "WARNING: Found all zeros for Lambda={}\n Setting statistical inefficiency g=1.".format(k)
g[k] = 1
else:
# (alternatively, could use the energy differences -- here, we will use total dhdl).
g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples
N_uncorr = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N_uncorr < P.uncorr_threshold:
if do_dhdl:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N_uncorr, k)
indices = sta[k] + numpy.arange(len(dhdl_sum))
N = len(indices)
else:
N = N_uncorr
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
print "%6s %12s %12s %12.2f" % (k, N_uncorr, N_k[k], g[k])
for n in range(n_components):
dhdl[k,n,0:N] = dhdlt[k,n,indices]
if UNCORR_OBSERVABLE == 'dhdl_all':
# Uncorrelate based on dhdl values at a given lambda.
for k in range(K):
# Sum up over the energy components; notice, that only the relevant data is being used in the third dimension.
dhdl_sum = numpy.sum(dhdlt[k,:,sta[k]:fin[k]], axis=0)
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
# (alternatively, could use the energy differences -- here, we will use total dhdl).
g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
if do_dhdl:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dhdl_sum))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
print "%6s %12s %12s %12.2f" % (k, fin[k], N_k[k], g[k])
for n in range(n_components):
dhdl[k,n,0:N] = dhdlt[k,n,indices]
if UNCORR_OBSERVABLE == 'dE':
# Uncorrelate based on energy differences between lambdas.
for k in range(K):
# Sum up over the energy components as above using only the relevant data; here we use energy differences
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
dE = u_klt[k,k+1,sta[k]:fin[k]] if not k==K-1 else u_klt[k,k-1,sta[k]:fin[k]]
g[k] = pymbar.timeseries.statisticalInefficiency(dE)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dE, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dE))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
return (dhdl, N_k, u_kln)
return (N_k, u_kln)
|
[
"def msms_det(temp_pick_dict, data_dict):\n t=time.time()\n # prep input\n data_list, temp_list, dt_ot_list = [], [], []\n for net_sta, [temp, norm_temp, dt_list] in temp_pick_dict.items():\n if net_sta not in data_dict: continue\n data, norm_data = data_dict[net_sta][1:3]\n data_list.append([data, norm_data])\n temp_list.append([temp[0], norm_temp[0]])\n dt_ot_list.append(dt_list[0])\n\n num_sta = len(data_list)\n if num_sta<min_sta: return []\n cc_holder = np.zeros([num_sta, int(86400*samp_rate)])\n # 1. match\n cc_mat = match_filter(data_list, temp_list)\n # 2. shift\n cc = shift_ot(cc_holder, cc_mat, dt_ot_list)\n # 3. mask\n cc_masked = [mask_cc(cci) for cci in cc]\n # 4. stack & detect\n cc_stack = np.mean(cc_masked, axis=0)\n dets = det_cc_stack(cc_stack)\n print('{} dets, {} sta, {:.1f}s'.format(len(dets), num_sta, time.time()-t))\n return dets",
"def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)",
"def update_total_fpmu_dict(self):\n # identical for each long-range connection\n # extract parameters\n deltat = self.dt\n trise = self.tau_r\n tdamp = self.tau_d\n\n tr = deltat/trise\n etr = np.exp(-tr) \n td = deltat/tdamp\n etd = np.exp(-td)\n cst = trise/(tdamp-trise)\n\n # nmda should keep in memory which could not be reset to zerooooooo!!!\n \"\"\"\n no resetting to zero --> go directly to refreshing !!! based on pre-value\n \"\"\"\n for c in self.source_connection_list:\n if (c.conn_type == 'LongRange'):\n self.total_INMDA_dict[c.connection_distribution] = self.total_INMDA_dict[c.connection_distribution] * etd + self.total_HNMDA_dict[c.connection_distribution] * cst\n self.total_HNMDA_dict[c.connection_distribution] = self.total_HNMDA_dict[c.connection_distribution] * etr + c.curr_firing_rate * c.nsyn * c.weights * self.tau_r\n\n print 'Change HNMDA: ', c.curr_firing_rate * c.nsyn ,' \\n'\n print 'Inputlr dict: ', self.total_inputlr_dict[c.connection_distribution]\n\n\n\n # for curr_CD in self.source_connection_list:\n # have already exist\n for c in self.source_connection_list:\n if(c.conn_type == 'ShortRange'):\n self.total_fpmu_dict[c.connection_distribution] = 0.0\n # have already clear up all the short range connections\n for c in self.source_connection_list:\n if(c.conn_type == 'ShortRange'):\n self.total_fpmu_dict[c.connection_distribution] += c.curr_firing_rate * c.nsyn * c.weights\n\n # summation\n self.total_fp_vslave = 0.0\n for key,val in self.total_fpmu_dict.items():\n \n try:\n self.total_fp_vslave += val\n except:\n key.initialize()\n self.total_fp_vslave += val\n # and then, summation of Inmda\n for key,val in self.total_INMDA_dict.items():\n try:\n self.total_fp_vslave += val\n except:\n key.initialize()\n self.total_fp_vslave += val\n # and then divided by gL or multiply tau_m\n self.total_fp_vslave = self.total_fp_vslave * self.tau_m",
"def data(dbfilename = os.path.expanduser('~/python/project/znuc2012.S4.star.el.y.stardb.gz')):\n db = stardb.load(dbfilename) # loads database\n nmass = db.nvalues[0] # finds the number of values\n masses = db.values[0][:nmass] #creates a vector of the initial masses\n isodb = stardb.load(os.path.expanduser('~/python/project/znuc2012.S4.star.deciso.y.stardb.gz'))\n \n massnumber = []\n for x in range(len(isodb.ions)):\n mn = isodb.ions[x].A\n massnumber.append(mn)\n massnumber = np.array(massnumber)\n np.save(os.path.expanduser('~/python/project/filestoload/Massnumber'), massnumber) \n####################### \n# write all energy and mixing values\n\n energyvalues = np.unique(db.fielddata['energy'])\n mixingvalues = np.unique(db.fielddata['mixing'])\n masterremnant = [] # result will be a multidimensional array\n elementdata = []\n isodata = []\n r = len(db.ions) # for loop iteration\n w = len(isodb.ions)\n for energy in energyvalues:\n remmixingarray = [] # reinitialise the next dimension\n elmixingarray = []\n isomixingarray = []\n for mixing in mixingvalues:\n \n \n ii = np.logical_and(np.isclose(db.fielddata['energy'], energy), np.isclose(db.fielddata['mixing'], mixing))\n \n mass = db.fielddata[ii]['remnant']\n remmixingarray.append(mass) # this is an array of remnant masses for one energy and every mixing value\n \n elfill = [] # reinitialise the next dimension again\n isofill = []\n \n \n for m in range(w):\n \n a = isodb.ions[m] #for obtaining the element string\n kk = np.where(isodb.ions==isotope.ion(a)) # finding the indices in db.ions for a particular element\n jj = np.where(ii)\n isotopes = isodb.data[jj, kk][0] # array of abundances for that particular element\n isofill.append(isotopes) # this is an array of element data for every mass for one energy and one mixing value\n\n\n\n\n isomixingarray.append(isofill) \n \n \n masterremnant.append(remmixingarray) # these master arrays have every bit of data under its own energy. so called like elementdata[energy][mixing][elementnumber] gives the element data for every star for a single element.\n \n isodata.append(isomixingarray)\n \n np.save(os.path.expanduser('~/python/project/filestoload/IsoData'), isodata)\n np.save(os.path.expanduser('~/python/project/filestoload/RemnantMasses'), masterremnant)\n np.save(os.path.expanduser('~/python/project/filestoload/Ioninfo'), isodb.ions)\n time = [] \n \n for mass in masses: # for loop will cycle through the masses and grab the lifetime of each star\n s = str(mass) # converts the mass number to a string for file acquiring\n if s.endswith('.0'): # formatting issue, to match the filenames\n s = s[:-2] \n filename = os.path.expanduser('~/python/project/dumps/z{}#presn').format(s)\n # grabs filename corrosponding to this mass\n d = kepdump.load(filename) # loads the kepdump data for this star\n time.append(d.time) \n yr = 365.2425*86400 \n time = np.array(time)/yr\n dataarray = [masses, time]\n\n\n return dataarray",
"def calculate_hysteresis(data,ms,filename):\n\t\n\tfrom numpy import pi, mean\n\tfrom scipy.integrate import cumtrapz\n\timport pandas as pd\n\t\n\tprint('--------------------------')\n\tprint('Evaluation ...')\n\tprint('... calculating hysteresis')\n\t\n\t# prepare results\n\tresult = pd.Series({'Vamp':ms['amp']})\n\tresult['frequency'] = ms['freq']\n\tresult['thickness'] = ms['thickness']\n\tresult['area'] = ms['area']\n\tresult['areaerr'] = ms['areaerr']\n\t\n\t# calculate difference voltage betwen Vset and Vref\n\tdata['Vdiff'] = data.Vset - data.Vref\n\t\n\t#calculate displacement current \n\tdata['I'] = data.Vref / ms['rref']\n\n\t#calc and center electric field from Vset and sample thickness d (+save)\n\tdata['E'] = data.Vdiff / ms['thickness']\n\tE_bias = abs(max(data.E))-abs(min(data.E))\n\tprint('... E_bias: %f MV/m ; %f V'%(E_bias/1e6,E_bias*ms['thickness']))\n\tif ms['correct_Ebias'] == True:\n\t\tprint('... correct Ebias')\n\t\tif E_bias < 0:\n\t\t\tdata['E'] = data.E + abs(E_bias)\n\t\telse:\n\t\t\tdata['E'] = data.E - abs(E_bias)\n\tresult['ebias'] = E_bias\n\t\n\t# correct loss current before removing offset\t\n\tif ms['correct_LossI'] == True:\n\t\tprint('... correct loss current')\n\t\ttry:\n\t\t\tdata['I_Loss'] = data.Vref * 2 * pi * ms['freq'] * ms['cap'] * ms['tand']\n\t\t\tdata['I'] = data.I - data.I_Loss\n\t\t\tprint('... ILoss/IP: %e'%(mean(data.I_Loss)/mean(data.I)))\n\t\texcept ValueError:\n\t\t\tprint('Some values missing! (Capacity, tan d ?)')\n\t\n\t# calc offset current from mean of 1 period\n\tif ms['custom_curr_offs'] == 0:\n\t\t\n\t\t# TEST: get start index from first zero transition of current signal\n\t\t# index_DataFrame = data.iloc[(data['I']-0.0).abs().argsort()[:20]]\t# extract index from nearest values to zero\n\t\t# start_index = index_DataFrame.index.min()\n\t\t\n\t\tstart_index = 50\n\t\t\n\t\tincrement = data.time[1]-data.time[0]\n\t\tsteps = int(1./ ms['freq'] / increment * 2)\n\t\toffset = mean(data.I[start_index:steps+start_index])\n\telse:\n\t\tprint('... auto offset current disabled')\n\t\ttry:\n\t\t\toffset = ms['custom_curr_offs']\n\t\texcept ValueError:\n\t\t\tprint('current offset value missing!')\n\t\t\n\t# remove offset current\n\tdata['I'] = data.I - offset\n\n\t# charge by integrating current\n\tdata['Q'] = cumtrapz(data.I,data.time,initial=0)\n\n\t# polarization\n\tdata['P'] = data.Q / ms['area']\n\n\t# align P around Pmin and Pmax\n\tmaxP = max(data.P)\n\tminP = min(data.P)\n\tPdiff = abs(minP)-abs(maxP)\n\tdata['P'] = data.P + Pdiff/2\n\t\t\n\t# aling P around 0\t\t16 because 8+ und 8-\n\tPNull = mean([max(data.iloc[(data['E']-0).abs().argsort()[:16]].P),min(data.iloc[(data['E']-0).abs().argsort()[:16]].P)])\n\tif PNull < 0:\n\t\tdata['P'] = data.P + abs(PNull)\n\telse:\n\t\tdata['P'] = data.P - abs(PNull)\n\tresult['pnull'] = PNull\n\n\t# calc error of polarization\n\tdata['P_error'] = (ms['vreferr'] / data.Vref + ms['rreferr']/ms['rref'] + ms['areaerr']/ms['area']) * data.P\n\n\t# get EC and PR --> 3 sigma\n\tPR, PR_error = get_PR(data)\n\tresult['PR'], result['PRerr'] = PR, PR_error\n\tresult['EC'] = get_EC(data)\n\t\n\tprint('... PR: (%f +- %f) yC/cm2'%(abs(result['PR'])*100,abs(result['PRerr'])*100))\n#\tprint('... (%.2f)'%(PR_error/PR*100))\n\t#print('Vdiff: %f V'%(data.Vdiff.max()))\n\t\n\treturn data, result",
"def fillDetInfo():\n print('here i am')\n # 1. maps of analysis channel to cpd, and pulser monitor channels\n detCH, pMons = {}, {}\n for ds in [0,1,2,3,4,5,6]:\n f = np.load(\"%s/data/ds%d_detChans.npz\" % (os.environ['LATDIR'], ds))\n detCH[ds] = f['arr_0'].item()\n pMons[ds] = f['arr_1'].item()\n\n # 2. maps of HV and TRAP threshold settings are stored in the DB.\n # make them global, and move them to the runSettings file.\n # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }\n detHV, detTH = {}, {}\n\n # load all possible values, as in settingsMgr\n detDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n detPars = db.Query()\n cal = dsi.CalInfo()\n for ds in [0,1,2,3,4,5,6]:\n # for ds in [0]:\n print(\"scanning ds\",ds)\n detTH[ds] = {}\n detHV[ds] = {}\n for key in cal.GetKeys(ds):\n mod = -1\n if \"m1\" in key: mod = 1\n if \"m2\" in key: mod = 2\n for cIdx in range(cal.GetIdxs(key)):\n\n # load the DB records\n dbKeyTH = \"trapThr_%s_c%d\" % (key, cIdx)\n dbValTH = dsi.getDBRecord(dbKeyTH,calDB=detDB,pars=detPars)\n\n dbKeyHV = \"hvBias_%s_c%d\" % (key, cIdx)\n dbValHV = dsi.getDBRecord(dbKeyHV,calDB=detDB,pars=detPars)\n\n # debug: print the record\n # for val in sorted(dbValTH):\n # if len(dbValTH[val])>0:\n # print(val, dbValTH[val])\n # return\n\n # fill the first value\n if len(detTH[ds])==0:\n detTH[ds] = dbValTH\n detHV[ds] = dbValHV\n continue\n\n # check for new threshold values.\n for cpd in detTH[ds]:\n nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detTH[ds][cpd] = dbValTH[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][-1][1]\n for val in dbValTH[cpd]:\n thisRun, thisTH = val[0], val[1]\n if thisTH != prevTH:\n detTH[ds][cpd].append([thisRun,thisTH])\n prevTH = thisTH\n\n # check for new HV values.\n for cpd in detHV[ds]:\n\n nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detHV[ds][cpd] = dbValHV[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][-1][1]\n for val in dbValHV[cpd]:\n thisRun, thisHV = val[0], val[1]\n if thisHV != prevHV:\n print(\"found HV diff. cpd %d prev %dV (run %d) new %dV (run %d)\" % (cpd, prevHV, prevRun, thisHV, thisRun))\n detHV[ds][cpd].append([thisRun,thisHV])\n prevHV = thisHV\n\n # return\n\n # # load the old file and compare\n # # GOAL: improve on this file.\n # # f = np.load(\"%s/data/runSettings.npz\" % dsi.latSWDir)\n # # detHVOld = f['arr_0'].item()\n # # detTHOld = f['arr_1'].item()\n # # detCHOld = f['arr_2'].item()\n # # pMonsOld = f['arr_3'].item()\n #\n # ds = 3\n # print(\"old results, ds\",ds)\n # for cpd in sorted(detTHOld[ds]):\n # if cpd!=\"122\":continue\n # if len(detTHOld[ds][cpd]) > 0:\n # print(cpd, detTHOld[ds][cpd])\n #\n # # for ds in [0,1,2,3,4,5,6]:\n # print(\"thresh results, ds:\",ds)\n # for cpd in sorted(detTH[ds]):\n # # if cpd!=122:continue\n # if len(detTH[ds][cpd]) > 0:\n # print(cpd, detTH[ds][cpd])\n\n\n np.savez(\"%s/data/runSettings-v2.npz\" % dsi.latSWDir,detHV,detTH,detCH,pMons)",
"def HD_input_snfit_data(self):\n\n dico = cPickle.load(open(SUGAR_parameter_pkl))\n self.read_snfit_results()\n self.read_meta()\n Filtre = np.array([True]*len(self.sn_name))\n self.zcmb = []\n self.z_err = []\n for j in range(len(self.sn_name)):\n if self.sn_name[j] in dico.keys() and self.sn_name[j] :\n\n for i in range (len(self.meta_sn_name_list)):\n if self.sn_name[j] == self.meta_sn_name_list[i]:\n \n self.z_err.append(self.meta_zhl_err[i])\n self.zcmb.append(self.meta_zcmb[i])\n if np.abs(self.x1[j] - self.meta_x1[i]) > 0.001:\n print 'problem with %s include in sample but difference between snfit and meta'%(self.sn_name[j])\n else:\n Filtre[j] = False\n\n for p in dico.keys():\n if p not in self.sn_name:\n print p\n \n self.x1 = self.x1[Filtre]\n self.x1_err = self.x1_err[Filtre] \n self.c = self.c[Filtre]\n self.c_err = self.c_err[Filtre]\n self.mb = self.mb[Filtre]\n self.mb_err = self.mb_err[Filtre]\n self.cov_x0_x1 = self.cov_x0_x1[Filtre]\n self.cov_x0_c = self.cov_x0_c[Filtre]\n self.cov_x1_c = self.cov_x1_c[Filtre]\n self.cov_mb_x1 = self.cov_mb_x1[Filtre]\n self.cov_mb_c = self.cov_mb_c[Filtre]\n self.z = self.z[Filtre]\n self.zcmb = np.array(self.zcmb)\n self.z_err = np.array(self.z_err)\n\n self.cov_y = np.zeros((len(self.mb)*3,len(self.mb)*3))\n\n for i in range (len(self.mb)):\n self.cov_y[i*3,i*3] = self.mb_err[i]**2\n self.cov_y[i*3+ 1,i*3+ 1] = self.x1_err[i]**2\n \n self.cov_y[i*3+ 2,i*3+ 2] = self.c_err[i]**2\n self.cov_y[i*3+ 0,i*3+ 1] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 1,i*3+ 0] = self.cov_mb_x1[i]\n self.cov_y[i*3+ 0,i*3+ 2] = self.cov_mb_c[i]\n self.cov_y[i*3+ 2,i*3+ 0] = self.cov_mb_c[i]\n self.cov_y[i*3+ 1,i*3+ 2] = self.cov_x1_c[i] \n self.cov_y[i*3+ 2,i*3+ 1] = self.cov_x1_c[i] \n \n self.salt_parm = np.array([self.mb,self.x1,self.c]).T\n# print len(self.salt_parm), len(self.cov_y), len(self.z), len(self.zcmb)\n# return self.salt_parm, self.cov_y, self.z, self.meta_zcmb, self.meta_zhl_err, self.sn_name, self.meta_idr\n return self.salt_parm, self.cov_y, self.z, self.zcmb, self.z_err",
"def UHF_measure_demod(Num_of_TC = 3):\n\n \n \n path = path_demod\n\n # Poll data parameters\n poll_length = 0.001 # [s]\n poll_timeout = 500 # [ms]\n poll_flags = 0\n poll_return_flat_dict = True \n\n # Data aquisition time for recording 1000 samples\n acq_time = 1/sampling_rate * 1000\n \n\n #START MEASURE\n\n # Wait for the demodulator filter to settle\n time.sleep(Num_of_TC*TC)\n\n daq.flush() # Getting rid of previous read data in the buffer\n\n time.sleep(acq_time) # Waiting a bit to record sufficient number of samples\n\n data = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict) # Readout from subscribed node (demodulator)\n\n #END OF MEASURE\n\n # Check the dictionary returned is non-empty\n assert data, \"poll() returned an empty data dictionary, did you subscribe to any paths?\"\n # Note, the data could be empty if no data arrived, e.g., if the demods were\n # disabled or had demodulator rate 0\n assert path in data, \"data dictionary has no key '%s'\" % path\n # The data returned is a dictionary of dictionaries that reflects the node's path\n\n\n # The data returned is a dictionary of dictionaries that reflects the node's path\n sample = data[path]\n sample_x = np.array(sample['x']) # Converting samples to numpy arrays for faster calculation\n sample_y = np.array(sample['y']) # Converting samples to numpy arrays for faster calculation\n sample_r = np.sqrt(sample_x**2 + sample_y**2) # Calculating R value from X and y values\n \n \n \n sample_mean = np.mean(sample_r) # Mean value of recorded data vector\n #measured_ac_conductance = sample_mean/out_ampl\n \n return sample_mean",
"def feature_extraction(clip_data):\n \n features_list = ['RMSX','RMSY','RMSZ','rangeX','rangeY','rangeZ','meanX','meanY','meanZ','varX','varY','varZ',\n 'skewX','skewY','skewZ','kurtX','kurtY','kurtZ','xcor_peakXY','xcorr_peakXZ','xcorr_peakYZ',\n 'xcorr_lagXY','xcorr_lagXZ','xcorr_lagYZ','Dom_freq','Pdom_rel','PSD_mean','PSD_std','PSD_skew',\n 'PSD_kur','jerk_mean','jerk_std','jerk_skew','jerk_kur','Sen_X','Sen_Y','Sen_Z']\n# ,'RMS_mag','range_mag',\n# 'mean_mag','var_mag','skew_mag','kurt_mag','Sen_mag']\n\n for trial in clip_data.keys():\n\n for sensor in clip_data[trial].keys():\n\n #cycle through all clips for current trial and save dataframe of features for current trial and sensor\n features = []\n for c in range(len(clip_data[trial][sensor]['data'])):\n rawdata = clip_data[trial][sensor]['data'][c]\n \n #acceleration magnitude\n rawdata_wmag = rawdata.copy()\n rawdata_wmag['Accel_Mag']=np.sqrt((rawdata**2).sum(axis=1))\n\n #extract features on current clip\n\n #Root mean square of signal on each axis\n N = len(rawdata)\n RMS = 1/N*np.sqrt(np.asarray(np.sum(rawdata**2,axis=0)))\n\n # RMS_mag = 1/N*np.sqrt(np.sum(rawdata_wmag['Accel_Mag']**2,axis=0))\n\n #range on each axis\n min_xyz = np.min(rawdata,axis=0)\n max_xyz = np.max(rawdata,axis=0)\n r = np.asarray(max_xyz-min_xyz)\n\n # r_mag = np.max(rawdata_wmag['Accel_Mag']) - np.min(rawdata_wmag['Accel_Mag'])\n\n #Moments on each axis\n mean = np.asarray(np.mean(rawdata,axis=0))\n var = np.asarray(np.std(rawdata,axis=0))\n sk = skew(rawdata)\n kurt = kurtosis(rawdata)\n\n # mean_mag = np.mean(rawdata_wmag['Accel_Mag'])\n # var_mag = np.std(rawdata_wmag['Accel_Mag'])\n # sk_mag = skew(rawdata_wmag['Accel_Mag'])\n # kurt_mag = kurtosis(rawdata_wmag['Accel_Mag'])\n\n #Cross-correlation between axes pairs\n xcorr_xy = np.correlate(rawdata.iloc[:,0],rawdata.iloc[:,1],mode='same')\n # xcorr_xy = xcorr_xy/np.abs(np.sum(xcorr_xy)) #normalize values\n xcorr_peak_xy = np.max(xcorr_xy)\n xcorr_lag_xy = (np.argmax(xcorr_xy))/len(xcorr_xy) #normalized lag\n\n xcorr_xz = np.correlate(rawdata.iloc[:,0],rawdata.iloc[:,2],mode='same')\n # xcorr_xz = xcorr_xz/np.abs(np.sum(xcorr_xz)) #normalize values\n xcorr_peak_xz = np.max(xcorr_xz)\n xcorr_lag_xz = (np.argmax(xcorr_xz))/len(xcorr_xz)\n\n xcorr_yz = np.correlate(rawdata.iloc[:,1],rawdata.iloc[:,2],mode='same')\n # xcorr_yz = xcorr_yz/np.abs(np.sum(xcorr_yz)) #normalize values\n xcorr_peak_yz = np.max(xcorr_yz)\n xcorr_lag_yz = (np.argmax(xcorr_yz))/len(xcorr_yz)\n\n #pack xcorr features\n xcorr_peak = np.array([xcorr_peak_xy,xcorr_peak_xz,xcorr_peak_yz])\n xcorr_lag = np.array([xcorr_lag_xy,xcorr_lag_xz,xcorr_lag_yz])\n\n #Dominant freq and relative magnitude (on acc magnitude)\n Pxx = power_spectra_welch(rawdata_wmag,fm=0,fM=10)\n domfreq = np.asarray([Pxx.iloc[:,-1].idxmax()])\n Pdom_rel = Pxx.loc[domfreq].iloc[:,-1].values/Pxx.iloc[:,-1].sum() #power at dominant freq rel to total\n\n #moments of PSD\n Pxx_moments = np.array([np.nanmean(Pxx.values),np.nanstd(Pxx.values),skew(Pxx.values),kurtosis(Pxx.values)])\n\n #moments of jerk magnitude\n jerk = rawdata_wmag['Accel_Mag'].diff().values\n jerk_moments = np.array([np.nanmean(jerk),np.nanstd(jerk),skew(jerk[~np.isnan(jerk)]),kurtosis(jerk[~np.isnan(jerk)])])\n\n #sample entropy raw data (magnitude) and FFT\n sH_raw = []; sH_fft = []\n\n for a in range(3):\n x = rawdata.iloc[:,a]\n n = len(x) #number of samples in clip\n Fs = np.mean(1/(np.diff(x.index)/1000)) #sampling rate in clip\n sH_raw.append(nolds.sampen(x)) #samp entr raw data\n #for now disable SH on fft\n # f,Pxx_den = welch(x,Fs,nperseg=min(256,n/4))\n # sH_fft.append(nolds.sampen(Pxx_den)) #samp entr fft\n\n sH_mag = nolds.sampen(rawdata_wmag['Accel_Mag'])\n\n #Assemble features in array\n # Y = np.array([RMS_mag,r_mag,mean_mag,var_mag,sk_mag,kurt_mag,sH_mag])\n X = np.concatenate((RMS,r,mean,var,sk,kurt,xcorr_peak,xcorr_lag,domfreq,Pdom_rel,Pxx_moments,jerk_moments,sH_raw)) #,Y))\n features.append(X)\n\n F = np.asarray(features) #feature matrix for all clips from current trial\n # clip_data['features'] = pd.DataFrame(data=F,columns=features_list,dtype='float32')\n clip_data[trial][sensor]['features'] = pd.DataFrame(data=F,columns=features_list,dtype='float32')",
"def refine_dataset(original_data,settings):\n print len(original_data)\n data_ = original_data[original_data.sweep_primary_load_temperature >= settings['valid_load_temp_range'][0]]\n print len(data_)\n data_ = data_[data_.sweep_primary_load_temperature <= settings['valid_load_temp_range'][1]]\n print len(data_)\n data_ = data_[data_.f_0_err/data_.f_0 < settings['fractional_f_0_err_limit']]\n print len(data_)\n data_ = data_[data_.Q_err/data_.Q < settings['fractional_Q_err_limit']]\n print len(data_)\n data_ = data_[data_.Q >= settings['valid_Q_range'][0]]\n data_ = data_[data_.Q <= settings['valid_Q_range'][1]]\n print len(data_)\n data_.sweep_primary_load_temperature[data_.optical_load=='dark'] = .2\n if settings['max_package_temp_deviation'] is not None:\n median_temp = np.median(data_.sweep_primary_package_temperature)\n temp_deviations = np.abs(data_.sweep_primary_package_temperature - median_temp)\n data_ = data_[temp_deviations < settings['max_package_temp_deviation']]\n print len(data_)\n #data_ = data_.sort([\"f_0\"])\n data_['f_0_max'] = np.zeros((data_.shape[0],))#data_.groupby(\"resonator_index\")[\"f_0\"].transform(lambda x: x.max())\n data_['responsivity_Hz_per_K'] = np.zeros((data_.shape[0],))\n data_['responsivity_err'] = np.zeros((data_.shape[0],))\n data_['responsivity_offset'] = np.zeros((data_.shape[0],))\n for index in np.unique(data_.resonator_index):\n group = data_[data_.resonator_index == index]\n max = group[group.sweep_primary_load_temperature < settings['f_0_max_temp_limit']].f_0.max()\n data_.f_0_max[data_.resonator_index == index] = max\n data_['delta_f_0_Hz'] = (data_.f_0-data_.f_0_max)*1e6\n data_['fractional_delta_f_0'] = data_.delta_f_0_Hz/(1e6*data_.f_0_max)#(1e6*data_.noise_measurement_freq_MHz)\n data_['Q_i_err'] = khalil.qi_error(Q = data_.Q, Q_err = data_.Q_err, \n Q_e_real = data_.Q_e_real, Q_e_real_err = data_.Q_e_real_err, \n Q_e_imag = data_.Q_e_imag, Q_e_imag_err = data_.Q_e_imag_err)\n\n for index in np.unique(data_.resonator_index):\n group = data_[(data_.resonator_index == index)&(np.abs(data_.sweep_primary_package_temperature-0.16)<0.04)\n &(data_.sweep_primary_load_temperature>3)]\n try:\n (slope,offset),cov = np.polyfit(group.sweep_primary_load_temperature,group.delta_f_0_Hz,1,cov=True)\n print slope\n data_.responsivity_Hz_per_K[data_.resonator_index == index] = slope\n data_.responsivity_offset[data_.resonator_index == index] = offset\n data_.responsivity_err[data_.resonator_index == index] = np.sqrt(cov[1,1])\n except ValueError:\n continue\n except TypeError:\n continue\n except np.linalg.LinAlgError:\n continue\n eigvals_Hz = []\n nets = []\n for eigvals,freq,responsivity in zip(data_.pca_eigvals,data_.noise_measurement_freq_MHz,data_.responsivity_Hz_per_K):\n # Convert eigvals spectra from 1/Hz units to Hz/sqrt(Hz)\n spectrum_Hz = np.sqrt(eigvals)*freq*1e6\n eigvals_Hz.append(spectrum_Hz)\n # Calculate net in muK sqrt(s). In the following, 1e6 is K -> uK factor, and sqrt(2) is 1/sqrt(Hz) -> sqrt(s) factor\n net = (1e6*spectrum_Hz/abs(responsivity))/np.sqrt(2)\n nets.append(net)\n data_['pca_eigvals_Hz_per_rootHz'] = eigvals_Hz \n data_['net_uK_rootsec'] = nets\n return data_",
"def refine_mask(outname=None,\n info_tab='notes_otsu_masking.csv',\n stamp=None, \n mask=None,\n band=None, ccd=None, section=None,\n sigma1=None, sigma2=None,\n max_area=None,\n min_otsu=None,\n dilat_n=None,):\n # To mange both filename or array as input\n if (type(stamp) == np.ndarray):\n pass\n elif isinstance(stamp, str):\n arr = np.load(stamp)\n # Apply the mask\n arr = np.ma.masked_where(mask, arr)\n # NOTE: the Gaussian filter does not work with masked arrays. I need to\n # make zero the masked regions. NaN doesn't get a good result\n arr[np.ma.getmask(arr)] = 0\n # Apply a combined Gaussian kernel\n karr = (gauss_filter(arr, sigma1, order=0, mode='constant', cval=0) *\n gauss_filter(arr, sigma2, order=0, mode='constant', cval=0))\n # Over the kernelized image apply the Otsu threshold and RMS\n val_otsu = otsu_threshold(karr)\n rms = (np.sqrt(np.mean(np.square(karr.flatten()))))\n # Masks for 0.9 otsu and 3RMS\n msk_otsu1 = karr > 0.9 * val_otsu\n msk_rms = karr > 3 * rms\n # Dilate the mask. These are the arrays to save \n d_msk_otsu1 = bin_dilation(msk_otsu1, niter=dilat_n)\n d_msk_rms = bin_dilation(msk_rms, niter=dilat_n)\n area = d_msk_otsu1[np.where(d_msk_otsu1)].size / karr.size\n #\n # Load the table of visual inspection results\n #\n df = pd.read_csv(info_tab)\n # Fill NaN with False\n df = df.fillna(False)\n # For the actual band-ccd-section, write out (or not) the mask\n df = df.loc[\n (df['band'] == band) & (df['ccd'] == ccd) & (df['section'] == section)\n ]\n #\n # Different cases\n # NOTE: As True/False behaves as 1/0, I'm comparing against 1.5 to \n # not select booleans\n print(df)\n if ( (df['masked'].iloc[0]) and (not df['update'].iloc[0]) ):\n # Save original mask\n np.save(outname, d_msk_otsu1)\n logging.info('1-Saved {0}'.format(os.path.basename(outname)))\n elif ( (df['update'].iloc[0]) and ((df['x1'].iloc[0]) > 1.5) ):\n # Set a rectangular mask\n x1 , x2 = int(df['x1'].iloc[0]), int(df['x2'].iloc[0])\n y1 , y2 = int(df['y1'].iloc[0]), int(df['y2'].iloc[0])\n print(x1, x2, y1, y2)\n rect_mask = np.zeros_like(karr).astype(bool)\n rect_mask[y1 - 1:y2 , x1 - 1:x2 ] = True\n # Dilate mask\n d_rect_mask = bin_dilation(rect_mask, niter=dilat_n)\n # Save rectangular mask\n np.save(outname, d_rect_mask)\n logging.info('2-Saved {0}'.format(os.path.basename(outname)))\n elif ( (df['update'].iloc[0]) and (df['3rms'].iloc[0]) ): \n # Save 3RMS dilated mask\n np.save(outname, d_msk_rms)\n logging.info('3-Saved {0}'.format(os.path.basename(outname)))\n elif ( (df['update'].iloc[0]) and (df['remove'].iloc[0]) ): \n pass\n logging.warning('Mask was not saved because it was inaccurate')\n return True",
"def update_lcmetrics(conn, sources, flag=False):\n cur = conn.cursor()\n for src in sources:\n print('updating assoc_source id = ',src.id,' ndetect = ',src.ndetect)\n #fetch corrected fluxes and uncertainities, ns, nc, nm\n #cur.execute(\"SELECT SUM(peak_flux/(e_peak_flux*e_peak_flux)) AS peaktmp, SQRT(1./SUM(1./(e_peak_flux*e_peak_flux))) AS epeakflux, SUM(total_flux/(e_total_flux*e_total_flux)) AS totaltmp, SQRT(1./SUM(1./(e_total_flux*e_total_flux))) AS etotalflux FROM corrected_flux WHERE assoc_id = %s\",(src.id,))\n cur.execute(\"SELECT SUM(c.peak_flux/(c.e_peak_flux*c.e_peak_flux)) AS peaktmp, SQRT(1./SUM(1./(c.e_peak_flux*c.e_peak_flux))) AS epeakflux, SUM(c.total_flux/(c.e_total_flux*c.e_total_flux)) AS totaltmp, SQRT(1./SUM(1./(c.e_total_flux*c.e_total_flux))) AS etotalflux, SUM(CASE WHEN d.code='S' THEN 1 ELSE 0 END) AS ns, SUM(CASE WHEN d.code='C' THEN 1 ELSE 0 END) AS nc, SUM(CASE WHEN d.code='M' THEN 1 ELSE 0 END) AS nm FROM detected_source AS d, corrected_flux AS c WHERE (c.src_id,c.image_id,d.assoc_id) = (d.src_id,d.image_id,%s)\",(src.id,))\n numrows = int(cur.rowcount)\n if numrows==0:\n print('ERROR! Fetched no fluxes! assoc_id = ',src.id)\n sys.exit(0)\n arow = cur.fetchone()\n newpeak = arow[1]*arow[1]*arow[0]\n newtotal = arow[3]*arow[3]*arow[2]\n if flag:\n cur.execute(\"UPDATE assoc_source SET ave_peak = %s, e_ave_peak = %s, ave_total = %s, e_ave_total = %s, ns = %s, nc = %s, nm = %s WHERE id = %s\",(newpeak, arow[1], newtotal, arow[3], arow[4], arow[5], arow[6], src.id))\n conn.commit()\n #fetch light curve\n cur.execute('''SELECT total_flux, peak_flux, e_total_flux, e_peak_flux FROM corrected_flux WHERE assoc_id= %s''',(src.id,))\n numrows = int(cur.rowcount)\n #if only 1 detection then can't set metrics\n if numrows == 1:\n cur.execute(\"UPDATE assoc_source SET v_total = %s, v_peak = %s, eta_total = %s, eta_peak = %s WHERE id = %s\",(None, None, None, None, src.id))\n #print('*',src.id,at,newtotal,ap,newpeak,' ',st,arow[3],sp,arow[1],' ',numrows)\n else:\n lc = np.fromiter(cur.fetchall(), dtype=[('total','float'),('peak','float'),\n ('e_total','float'),('e_peak','float')], count=numrows)\n wt = 1./lc['e_total']**2\n wp = 1./lc['e_peak']**2\n # calc weighted average and std dev\n at,st = weighted_avg_and_std(lc['total'], wt)\n ap,sp = weighted_avg_and_std(lc['peak'], wp)\n # calc V\n src.v_total = st/at\n src.v_peak = sp/ap\n # calc eta\n src.eta_total = np.sum(wt*(lc['total']-at)**2)/(numrows-1)\n src.eta_peak = np.sum(wp*(lc['peak']-ap)**2)/(numrows-1)\n # update\n cur.execute('''UPDATE assoc_source SET v_total = %s, v_peak = %s, eta_total = %s, \n eta_peak = %s WHERE id = %s''',\n (src.v_total, src.v_peak, src.eta_total, src.eta_peak, src.id))\n conn.commit()\n cur.close()",
"def Main_Algorithm(Y, M, k, L, data = 'EEG', fix = True): \n if data == 'EEG':\n \"\"\"\n Recover of the source matrix with a fixed mixing matrix A on a\n segmented data set.\n \"\"\"\n if fix == True:\n \" Perform Main Algorithm on Segmented Dataset \"\n X_result = [] # Original recovered source matrix X\n \n for i in range(k.shape[0]):\n \" Making the right size of X for all segments \"\n X_result.append(np.zeros([len(Y), int(k[i])]))\n \n \" Original Recovered Source Matrix X, MSE and Average MSE with X_ica \"\n for seg in range(len(Y)): \n # Looking at one time segment\n A = np.random.normal(0, 2, (M, int(k[seg])))\n X_result[seg] = M_SBL.M_SBL(A, Y[seg], M, int(k[seg]), int(k[seg]), iterations=1000, noise=False)\n \n return A, X_result\n \n if data == 'simulated':\n \"\"\"\n Recover of the source matrix with a either a fixed mixing matrix A \n or with mixing matrix from Cov-DL. This ask for the true number of\n k and N.\n \"\"\"\n print('Data information:\\n number of sensors \\t \\t M = {} \\n number of samples pr segment \\t L = {}'.format(M, Y[0].shape[1]))\n \n N = int(input(\"Please enter N: \")) # number of sources\n k = int(input(\"Please enter k: \")) # active sources to be found\n\n if fix == True:\n A = np.random.normal(0, 2, (M, N))\n X_result = M_SBL.M_SBL(A, Y, M, N, k, iterations=1000, noise=False)\n return A, X_result\n \n if fix == False:\n Ls = 10\n n_seg = int(L/Ls)\n A_result = np.zeros((n_seg, M, N))\n X_result = np.zeros((n_seg, N, L-2))\n for i in range(len(Y)): \n Y_big = Cov_DL._covdomain(Y[i], L, Ls, M) # transformation to covariance-domain\n \n if N <= (M*(M+1))/2.:\n A_rec, A_init = Cov_DL.Cov_DL2(Y_big, M, N, k)\n A_result[i] = A_rec\n \n elif k <= (M*(M+1))/2.:\n A_rec = Cov_DL.Cov_DL1(Y_big, M, N, k)\n A_init = None\n A_result[i] = A_rec\n \n elif k > (M*(M+1))/2.:\n raise SystemExit('X is not sparse enogh (k > (m*(m+1))/2)')\n \n X_rec = M_SBL.M_SBL(A_rec, Y[i], M, N, k, iterations=1000, noise=False)\n X_result[i] = X_rec\n return A_result, A_init, X_result",
"def UHF_measure_demod_trig(Num_of_TC = 3, trigger = 3, AWG_instr = None, record_time = 5):\n\n \n if AWG_instr is None:\n raise Exception(\"AWG_instr is not passed :-P\")\n\n path = path_demod\n\n # Poll data parameters\n poll_length = 0.001 # [s]\n poll_timeout = 500 # [ms]\n poll_flags = 0\n poll_return_flat_dict = True \n\n if trigger not in [3,4]:\n raise Exception(\"Trigger must be either 3 or 4!\")\n\n\n \n\n # Unsubscribe from all paths (nodes) - needed that buffer is not continuously filling\n daq.unsubscribe('*')\n\n daq.setInt(path_demod_trig, 32)\n\n #START MEASURE\n\n # Subscribe to the demodulator's sample using global parameter \"path demod\" from \"UHF_init_demod\" function\n daq.subscribe(path_demod) \n\n daq.flush() # Getting rid of previous read data in the buffer\n\n daq.setInt(path_demod_enable, 1) # Enable demodulator \n\n # Wait for the demodulator filter to settle\n time.sleep(Num_of_TC*TC) \n\n AWG_instr._ins.run() # Forcing AWG to start output \n\n\n time.sleep(record_time) # Waiting until whole desired data is in buffer\n\n\n\n\n data = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict) # Readout from subscribed node (demodulator)\n\n daq.setInt(path_demod_enable, 0) # Disable demodulator\n\n #END OF MEASURE\n\n # Check the dictionary returned is non-empty\n assert data, \"poll() returned an empty data dictionary, did you subscribe to any paths?\"\n # Note, the data could be empty if no data arrived, e.g., if the demods were\n # disabled or had demodulator rate 0\n assert path in data, \"data dictionary has no key '%s'\" % path\n # The data returned is a dictionary of dictionaries that reflects the node's path\n\n\n # The data returned is a dictionary of dictionaries that reflects the node's path\n sample = data[path]\n sample_x = np.array(sample['x']) # Converting samples to numpy arrays for faster calculation\n sample_y = np.array(sample['y']) # Converting samples to numpy arrays for faster calculation\n sample_r = np.sqrt(sample_x**2 + sample_y**2) # Calculating R value from X and y values\n \n\n \n return sample_r",
"def prepData(self):\n\n\n from coorconv import loffset,eq2gal,gal2eq,dist\n import re\n \n ret = \"\"\n wd = os.getcwd()\n files = os.listdir(wd)\n scfile = [ f for f in files if re.search(\"_SC\",f) ]\n if len(scfile) == 0: \n ret += \"No SC files found in the directory.Exit.\\n\"\n self.havedata = False\n self.scfile = \"\"\n return \n if len(scfile) > 1:\n ret += \"Warning: More that one SC file is found in the\\n\"\n ret += \"specified directory.\\n\"\n for f in scfile: print f\n ret += \"Using\"+scfile[0]+\"\\n\"\n phfiles = [ f for f in files if re.search(\"_PH\",f) ]\n if len(phfiles) == 0: \n ret += \"No event files found.Exit.\\n\"\n self.havedata = False\n self.scfile = \"\"\n return\n os.system('echo '+phfiles[0]+' > efiles.list')\n for f in phfiles[1:]:os.system('echo '+f+' >> efiles.list')\n self.havedata = True\n# print \"prepdata:\"+self.havedata\n self.scfile = scfile[0]\n\n gotinfo,self.obs_pars = self.getObsInfo('efiles.list')\n\n# self.tmin = self.obs_pars['tmin']\n# self.tmax = self.obs_pars['tmax']\n self.emin = self.obs_pars['emin']\n self.emax = self.obs_pars['emax']\n self.ra = self.obs_pars['RA']\n self.dec = self.obs_pars['DEC']\n\n\n# ll,bb = eq2gal(self.ra,self.dec)\n# self.bkg_ra,self.bkg_dec = gal2eq(ll-self.offset,bb)\n\n ll,bb = eq2gal(self.ra,self.dec)\n r1,d1 = gal2eq(ll-self.offset,bb)\n r2,d2 = gal2eq(ll+self.offset,bb)\n dist1 = dist((self.obs_pars[\"RA\"],self.obs_pars[\"DEC\"]),(r1,d1))\n dist2 = dist((self.obs_pars[\"RA\"],self.obs_pars[\"DEC\"]),(r2,d2))\n if dist1<dist2:\n self.bkg_ra,self.bkg_dec = gal2eq(ll-self.offset,bb)\n else:\n self.bkg_ra,self.bkg_dec = gal2eq(ll+self.offset,bb)\n \n\n# loff = loffset(ll,bb,self.offset)\n\n\n return ret",
"def dataload():\n\t\n\tglobal A, B, fnA, fnB, lPcnA, lPcnB\n\t\n\tdwd = os.getcwd() # Data WD\n\t\t\n\t# First sample A is loaded. This is the \"calibrating\" sample.\n\t# In this case it is the OGLE III LMC small amplitude RGs.\n\t\n\tfnA = '/LMC-CalSample-cleaned_2.fits'\n\tA = Table.read(dwd+fnA)\n\n\t# Then sample B is loaded. For comparison/testing purposes, this is\n\t# again the OGLE III LMC SARGs.\n\t\n\tfnB = '/LMC-CalSample-cleaned_2.fits'\n\tB = Table.read(dwd+fnB)\n\t\n\t\"\"\" Fix tables so only the stars with all three good periods are \n\tconsidered. \"\"\"\n\t\n\tlPcnA = get_logPcn(A)\n\tlPcnB = get_logPcn(B)\n\t\n\tfor cn in lPcnA:\n\t\tA = A[A[cn]>0]\n\tfor cn in lPcnB:\n\t\tB = B[B[cn]>0]",
"def feature_extraction_accgyro(clip_data):\n \n features_list = ['RMSX','RMSY','RMSZ','rangeX','rangeY','rangeZ','meanX','meanY','meanZ','varX','varY','varZ',\n 'skewX','skewY','skewZ','kurtX','kurtY','kurtZ','xcor_peakXY','xcorr_peakXZ','xcorr_peakYZ',\n 'xcorr_lagXY','xcorr_lagXZ','xcorr_lagYZ','Dom_freq','Pdom_rel','PSD_mean','PSD_std','PSD_skew',\n 'PSD_kur','jerk_mean','jerk_std','jerk_skew','jerk_kur','Sen_X','Sen_Y','Sen_Z']\n# ,'RMS_mag','range_mag',\n# 'mean_mag','var_mag','skew_mag','kurt_mag','Sen_mag']\n acclist = [s + '_acc' for s in features_list]\n gyrlist = [s + '_gyr' for s in features_list]\n\n for trial in clip_data.keys():\n\n for sensor in clip_data[trial].keys():\n\n #cycle through all clips for current trial and save dataframe of features for current trial and sensor\n features = []\n for c in range(len(clip_data[trial][sensor]['data'])):\n rawdata = clip_data[trial][sensor]['data'][c]\n \n #acceleration magnitude\n rawdata_wmag = rawdata.copy()\n rawdata_wmag['Accel_Mag']=np.sqrt((rawdata**2).sum(axis=1))\n\n #extract features on current clip\n\n #Root mean square of signal on each axis\n N = len(rawdata)\n RMS = 1/N*np.sqrt(np.asarray(np.sum(rawdata**2,axis=0)))\n\n # RMS_mag = 1/N*np.sqrt(np.sum(rawdata_wmag['Accel_Mag']**2,axis=0))\n\n #range on each axis\n min_xyz = np.min(rawdata,axis=0)\n max_xyz = np.max(rawdata,axis=0)\n r = np.asarray(max_xyz-min_xyz)\n\n # r_mag = np.max(rawdata_wmag['Accel_Mag']) - np.min(rawdata_wmag['Accel_Mag'])\n\n #Moments on each axis\n mean = np.asarray(np.mean(rawdata,axis=0))\n var = np.asarray(np.std(rawdata,axis=0))\n sk = skew(rawdata)\n kurt = kurtosis(rawdata)\n\n # mean_mag = np.mean(rawdata_wmag['Accel_Mag'])\n # var_mag = np.std(rawdata_wmag['Accel_Mag'])\n # sk_mag = skew(rawdata_wmag['Accel_Mag'])\n # kurt_mag = kurtosis(rawdata_wmag['Accel_Mag'])\n\n #Cross-correlation between axes pairs\n xcorr_xy = np.correlate(rawdata.iloc[:,0],rawdata.iloc[:,1],mode='same')\n # xcorr_xy = xcorr_xy/np.abs(np.sum(xcorr_xy)) #normalize values\n xcorr_peak_xy = np.max(xcorr_xy)\n xcorr_lag_xy = (np.argmax(xcorr_xy))/len(xcorr_xy) #normalized lag\n\n xcorr_xz = np.correlate(rawdata.iloc[:,0],rawdata.iloc[:,2],mode='same')\n # xcorr_xz = xcorr_xz/np.abs(np.sum(xcorr_xz)) #normalize values\n xcorr_peak_xz = np.max(xcorr_xz)\n xcorr_lag_xz = (np.argmax(xcorr_xz))/len(xcorr_xz)\n\n xcorr_yz = np.correlate(rawdata.iloc[:,1],rawdata.iloc[:,2],mode='same')\n # xcorr_yz = xcorr_yz/np.abs(np.sum(xcorr_yz)) #normalize values\n xcorr_peak_yz = np.max(xcorr_yz)\n xcorr_lag_yz = (np.argmax(xcorr_yz))/len(xcorr_yz)\n\n #pack xcorr features\n xcorr_peak = np.array([xcorr_peak_xy,xcorr_peak_xz,xcorr_peak_yz])\n xcorr_lag = np.array([xcorr_lag_xy,xcorr_lag_xz,xcorr_lag_yz])\n\n #Dominant freq and relative magnitude (on acc magnitude)\n Pxx = power_spectra_welch(rawdata_wmag,fm=0,fM=10)\n domfreq = np.asarray([Pxx.iloc[:,-1].idxmax()])\n Pdom_rel = Pxx.loc[domfreq].iloc[:,-1].values/Pxx.iloc[:,-1].sum() #power at dominant freq rel to total\n\n #moments of PSD\n Pxx_moments = np.array([np.nanmean(Pxx.values),np.nanstd(Pxx.values),skew(Pxx.values),kurtosis(Pxx.values)])\n\n #moments of jerk magnitude\n jerk = rawdata_wmag['Accel_Mag'].diff().values\n jerk_moments = np.array([np.nanmean(jerk),np.nanstd(jerk),skew(jerk[~np.isnan(jerk)]),kurtosis(jerk[~np.isnan(jerk)])])\n\n #sample entropy raw data (magnitude) and FFT\n sH_raw = []; sH_fft = []\n\n for a in range(3):\n x = rawdata.iloc[:,a]\n n = len(x) #number of samples in clip\n Fs = np.mean(1/(np.diff(x.index)/1000)) #sampling rate in clip\n sH_raw.append(nolds.sampen(x)) #samp entr raw data\n #for now disable SH on fft\n # f,Pxx_den = welch(x,Fs,nperseg=min(256,n/4))\n # sH_fft.append(nolds.sampen(Pxx_den)) #samp entr fft\n\n sH_mag = nolds.sampen(rawdata_wmag['Accel_Mag'])\n\n #Assemble features in array\n # Y = np.array([RMS_mag,r_mag,mean_mag,var_mag,sk_mag,kurt_mag,sH_mag])\n X = np.concatenate((RMS,r,mean,var,sk,kurt,xcorr_peak,xcorr_lag,domfreq,Pdom_rel,Pxx_moments,jerk_moments,sH_raw)) #,Y))\n features.append(X)\n\n F = np.asarray(features) #feature matrix for all clips from current trial\n # clip_data['features'] = pd.DataFrame(data=F,columns=features_list,dtype='float32')\n \n # add condition of sensor\n if sensor == 'accel':\n column_list=acclist\n clip_data[trial]['features'] = pd.DataFrame(data=F,columns=column_list,dtype='float32')\n else:\n column_list=gyrlist\n # Need to concat on columns\n df_to_append = pd.DataFrame(data=F,columns=column_list,dtype='float32')\n clip_data[trial]['features'] = pd.concat([clip_data[trial]['features'], df_to_append],ignore_index=True,axis=1)\n # option 2 just change feature names to gyr\n clip_data[trial]['features'] = df_to_append = pd.DataFrame(data=F,columns=column_list,dtype='float32')",
"def update_trigger_files(self):\n\n notes_df = pd.read_csv(self.summary_fpath).set_index('participant')\n\n for k, val in self.trigger_files.items():\n # find matching header file (based on id).\n header = self.header_files[k][0]\n # Get sampling rate from header file.\n fnirs_start, sampling_rate = self.get_fnirs_time_data(header)\n loc_duration_samples = self.LOC_DURACTION_SEC * sampling_rate\n loc_order = notes_df.loc[k]['Localizer Order'][1:-1].replace(\"'\", \"\").split(', ')\n\n # REMOVES ALL SENTENCE LEVEL TRIGGERS.\n df = pd.read_csv(val[0], sep=\";\", names=[\"t\", \"sample\", \"value\"])\n df = df[df[\"value\"] != 22]\n\n df = self.add_new_durations_col(df, loc_duration_samples)\n # REMOVES ALL TASK END TRIGGERS.\n df = df[df[\"value\"] != 26]\n # Add new values based on localizer order.\n df = self.add_localizer_condition_information(df, loc_order)\n\n # Creates a trigger to indicate the beginning of this RUN.\n # Trigger is created by looking at end of resting state and subtracting\n # TRUNCATION_OFFSET_DURATION.\n rest_end_sample = df[df[\"value\"] == 27][\"sample\"]\n start_sample = int(rest_end_sample - (self.TRUCATION_OFFSET_SEC * sampling_rate))\n\n df = self.add_start_and_stop_triggers(df, start_sample, sampling_rate, loc_duration_samples)\n\n df.to_csv(f\"{self.EXPORT_DIR}/{k}_loc_trucated.tri\", header=False, index=False)",
"def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the free energy change computed using the equilibrated snapshots between the proper target time frames (f_ts and r_ts) in both forward (data points are stored in F_df and F_ddf) and reverse (data points are stored in R_df and R_ddf) directions.
|
def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf):
fig = pl.figure(figsize=(8,6))
ax = fig.add_subplot(111)
pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2)
pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2)
for dire in ['top', 'right']:
ax.spines[dire].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_fts = max(f_ts)
rr_ts = [aa/max_fts for aa in f_ts[::-1]]
f_ts = [aa/max_fts for aa in f_ts]
r_ts = [aa/max_fts for aa in r_ts]
line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5)
for i in range(len(f_ts)):
line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1)
line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2)
for i in range(len(rr_ts)):
line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3)
line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4)
pl.xlim(r_ts[0], f_ts[-1])
pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10)
pl.yticks(fontsize=10)
leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=1, prop=FP(size=18), frameon=False)
pl.xlabel(r'$\mathrm{Fraction\/of\/the\/simulation\/step}$', fontsize=16, color='#151B54')
pl.ylabel(r'$\mathrm{\Delta G\/%s}$' % P.units, fontsize=16, color='#151B54')
pl.xticks(f_ts, ['%.2f' % i for i in f_ts])
pl.tick_params(axis='x', color='#D2B9D3')
pl.tick_params(axis='y', color='#D2B9D3')
pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf'))
pl.close(fig)
return
|
[
"def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()",
"def sysPLQF(mirror, blkFlag=True):\n import matplotlib.pyplot as plt\n import numpy as np # to ndarray.flatten ax\n\n mir = mirror\n xend = max(mir.r_t)\n\n fig, ax = plt.subplots(nrows=2, ncols=2,)\n ax = np.ndarray.flatten(ax)\n ax[0].set_title('Real Power Generated')\n for mach in mir.Machines:\n ax[0].plot(mir.r_t, mach.r_Pe, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Pe Gen '+ mach.Busnam)\n ax[0].set_xlabel('Time [sec]')\n ax[0].set_ylabel('MW')\n\n ax[2].set_title('Reactive Power Generated')\n for mach in mir.Machines:\n ax[2].plot(mir.r_t, mach.r_Q, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Q Gen '+ mach.Busnam)\n ax[2].set_xlabel('Time [sec]')\n ax[2].set_ylabel('MVAR')\n\n ax[1].set_title('Total System P Loading')\n ax[1].plot(mir.r_t, mir.r_ss_Pload, \n marker = 11,\n #fillstyle='none',\n #linestyle = ':',\n label = 'Pload')\n ax[1].set_xlabel('Time [sec]')\n ax[1].set_ylabel('MW')\n\n ax[3].set_title('System Mean Frequency')\n ax[3].plot(mir.r_t, mir.r_f,\n marker = '.',\n #linestyle = ':',\n label = r'System Frequency')\n ax[3].set_xlabel('Time [sec]')\n ax[3].set_ylabel('Frequency [PU]')\n\n # Global Plot settings\n for x in np.ndarray.flatten(ax):\n x.set_xlim(0,xend)\n x.legend()\n x.grid(True)\n\n fig.tight_layout()\n\n plt.show(block = blkFlag)",
"def plot_fftr(\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n export: str = \"\",\n sheet_name: str = \"\",\n external_axes: bool = False,\n):\n df_upper = fred_model.get_series_data(\n series_id=\"DFEDTARU\", start_date=start_date, end_date=end_date\n )\n df_lower = fred_model.get_series_data(\n series_id=\"DFEDTARL\", start_date=start_date, end_date=end_date\n )\n df = pd.DataFrame([df_upper, df_lower]).transpose()\n\n fig = OpenBBFigure(yaxis_title=\"Yield (%)\")\n fig.set_title(\"Federal Funds Target Range\")\n\n for series in df.columns:\n fig.add_scatter(x=df.index, y=df[series], name=series)\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"fftr\",\n pd.DataFrame(df, columns=[\"FFTR\"]),\n sheet_name,\n fig,\n )\n\n return fig.show(external=external_axes)",
"def ps_plot(self, join_data = False, save = False, days = False, \n rel = False):\n if (self.spectrum == None) or (self.rel_spectrum == None):\n print(\"Call the fast_find_spectrum() method first!\")\n return None\n else:\n pass\n \n if rel:\n spectrum = self.rel_spectrum\n ylab = 'Relative ' + self.g_name + ' \\'Power\\''\n f_name_new = self.f_name + 'relative'\n else:\n spectrum = self.spectrum\n ylab = self.g_name + ' Amplitude / ' + self.g_unit\n f_name_new = self.f_name\n \n dep_var = spectrum[1]\n if days:\n indep_var = 1/(spectrum[0]*24*60*60)\n xlab = ['Period', 'days']\n else:\n indep_var = spectrum[0] \n xlab = ['Frequency', 'hz']\n \n plt.figure(figsize=(16,10))\n if join_data:\n plt.plot(indep_var, dep_var)\n else:\n plt.plot(indep_var, dep_var, ls = '', marker = '+', ms = 1)\n \n plt.xlabel(xlab[0] + ' / ' + xlab[1], fontsize = 18)\n plt.ylabel(ylab, fontsize = 18)\n plt.title(self.f_title, fontsize = 22)\n if save:\n plt.savefig('Figures/' + self.path + '/' + f_name_new + '.pdf', \n format = 'pdf')\n plt.close()\n print('Figure saved as:')\n print('Figures/' + self.path + '/' + f_name_new + '.pdf')\n return None",
"def plot_ffrmc(\n parameter: str = \"10_year\",\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n export: str = \"\",\n sheet_name: str = \"\",\n external_axes: bool = False,\n):\n series_id = fred_model.FFRMC_PARAMETER_TO_FRED_ID[parameter]\n\n df = fred_model.get_ffrmc(parameter, start_date, end_date)\n\n fig = OpenBBFigure()\n fig.set_title(\n f\"{ID_TO_NAME_FFRMC[series_id]} Treasury Constant Maturity Minus Federal Funds Rate\"\n )\n\n fig.add_scatter(x=df.index, y=df[series_id], name=series_id, mode=\"lines\")\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n series_id,\n pd.DataFrame(df, columns=[series_id]),\n sheet_name,\n fig,\n )\n\n return fig.show(external=external_axes)",
"def live_plot(filename, x_value, y_value, scroll=True, refresh_rate=1000): #default is 1 sample per second\n data_file, dc_ps_dev, device_1, device_2 = setup(filename, x_value, y_value)\n absolute_time = time.time()\n # Create figure for plotting\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n x = []\n y = []\n y1 = []\n f = open(data_file, \"r\")\n lines = f.readlines()\n title = lines[0]\n x_axis, y_axis = lines[1].split(\",\")[0], lines[1].split(\",\")[1]\n f.close()\n \n f = open(data_file, \"a\")\n def animate(i):\n \"\"\"Sub-function that updates data for each new frame.\n \n \"\"\"\n # Take measurements\n item1, item2 = take_measurement(x_value, absolute_time, device_1, 102), take_measurement(y_value, absolute_time, device_2)\n save_to_file(f, item1, item2) # Save to file\n y_vals = str(item2).split(\",\") \n x.append(item1)\n y.append(float(y_vals[0]))\n if len(y_vals)>1: # Handles case with two voltages vs. time\n y1.append(float(y_vals[1]))\n\n ## DEPRECATED: Slows down code\n # Parse data file for x and y\n #f = open(data_file, \"r\")\n #lines = f.readlines()\n #f.close()\n #if len(lines) > len(x)+2:\n # for line in lines[len(x)+2:]:\n # x.append(float(line.split(\",\")[0]))\n # y.append(float(line.split(\",\")[1]))\n \n # Plot data\n if scroll and len(x)> 20: # Window length for scroll mode\n x_plot, y_plot = x[-20:], y[-20:]\n if len(y_vals)>1:\n y1_plot = y1[-20:]\n else:\n x_plot, y_plot, y1_plot = x, y, y1\n ax.clear()\n ax.plot(x_plot, y_plot)\n if len(y_vals)>1: # Handles case with two voltages vs. time\n ax.plot(x_plot, y1_plot)\n plt.title(title)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n\n ani = animation.FuncAnimation(fig, animate, interval=int(refresh_rate))\n plt.show()\n f.close()",
"def plot_delta(fname, temp, delta_ts_list, delta_rxn_list, labels, var='G'):\n max_y_lines = 5\n x_axis = np.array([0, 1, 3, 4, 6, 7])\n y_axis = []\n y_labels = []\n # y_min = np.floor(np.array(g_rxn_list).min())\n # y_max = np.ceil(np.array(g_ts_list).max())\n for index in range(max_y_lines):\n try:\n y_axis.append(np.array([0.0, 0.0, delta_ts_list[index], delta_ts_list[index],\n delta_rxn_list[index], delta_rxn_list[index]]))\n except IndexError:\n y_axis.append(None)\n try:\n y_labels.append(labels[index])\n except IndexError:\n y_labels.append(None)\n\n make_fig(fname, x_axis, y_axis[0],\n x_label='reaction coordinate', y_label='\\u0394' + var + ' at {} K (kcal/mol)'.format(temp),\n y1_label=y_labels[0], y2_label=y_labels[1], y3_label=y_labels[2], y4_label=y_labels[3],\n y5_label=y_labels[4], y2_array=y_axis[1], y3_array=y_axis[2], y4_array=y_axis[3], y5_array=y_axis[4],\n ls2='-', ls3='-', ls4='-', ls5='-',\n # y_lima=y_min, y_limb=y_max,\n hide_x=True,\n )",
"def plot_forecast_actual(solar_fc, solar_ts, wind_fc, wind_ts, time_vector, start_time, end_time, node):\n start_idx = int(np.where(time_vector == start_time)[0])\n end_idx = int(np.where(time_vector == end_time)[0])\n solar_diff = RPD(solar_fc, solar_ts)\n wind_diff = RPD(wind_fc, wind_ts)\n fig, ax = plt.subplots(nrows=2, ncols=2)\n ax[0, 0].plot(solar_fc[start_idx:end_idx, node], label='forecast')\n ax[0, 0].plot(solar_ts[start_idx:end_idx, node], label='actual')\n ax[0, 0].legend()\n ax[0, 0].set_xlabel('Time [h]')\n ax[0, 0].set_ylabel('MWh')\n ax[0, 0].set_title(f'Solar energy of node {node} from {start_time} to {end_time}')\n ax[0, 1].plot(wind_fc[start_idx:end_idx, node], label='forecast')\n ax[0, 1].plot(wind_ts[start_idx:end_idx, node], label='actual')\n ax[0, 1].legend()\n ax[0, 1].set_xlabel('Time [h]')\n ax[0, 1].set_ylabel('MWh')\n ax[0, 1].set_title(f'Wind energy of node {node} from {start_time} to {end_time}')\n ax[1, 0].plot(solar_diff[start_idx:end_idx, node], color='red', label='difference')\n ax[1, 0].legend()\n ax[1, 0].set_xlabel('Time [h]')\n ax[1, 0].set_ylabel('%')\n ax[1, 0].set_title(f'Solar difference between forecast and actual')\n ax[1, 1].plot(wind_diff[start_idx:end_idx, node], color='red', label='difference')\n ax[1, 1].legend()\n ax[1, 1].set_xlabel('Time [h]')\n ax[1, 1].set_ylabel('%')\n ax[1, 1].set_title(f'Wind difference between forecast and actual')\n plt.show()",
"def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()",
"def show_derivative(self):\n for trace in self.plotWidget.plotDataItems:\n dt = float(trace.attrs['dt'])\n dtrace = np.diff(trace.data)\n x = pgplot.make_xvector(dtrace, dt)\n self.plotWidget.plot(x, dtrace, pen=pg.mkPen('r'))",
"def plot_firing_rates(self):\n\t\tprint \"Firing Rates plot\"\n\t\tpylab.plot(self.frate)",
"def init(self, f_plot=100, timewindow=90.):\n # Data stuff\n # RT data\n t_plot = 1/f_plot\n self.interval = int(100 * t_plot) # Number of points plotted = Plot refreshing period\n self.bufsize = int(timewindow / t_plot) # Buffer size\n self.rtdata = np.array([0.00] * self.bufsize) # RT Data buffer\n self.x = np.linspace(-timewindow, 0.0, self.bufsize) # Defining the x-axe marges\n self.y = np.zeros(self.bufsize, dtype=float) # Defining the y-axe marges\n # Full data\n self.full_y = [0.00] # Full data buffer\n self.full_x = [0]\n # pyqtGraph stuff\n self.RT_plt = ui.graphicsViewRT # Connecting the RT_plotter output to graphicsViewRT\n self.full_plt = ui.graphicsViewRT_full\n\n # RT plot\n # self.RT_plt.setLabel(\"left\", \"Glucose concentration\", \"mM/L\") # y label\n self.RT_plt.setLabel(\"left\", \"Voltage\", \"mV\")\n self.RT_plt.setLabel(\"bottom\", \"Time\", \"s\") # x label\n self.RT_curve = self.RT_plt.plot(self.x, self.y, pen='y') # Plotting the curve self.y = f(self.x). The pen argument is for the color.\n # self.RT_plt.setBackground('w') # Change the background to white\n\n # Full plot\n # self.full_plt.setLabel(\"left\", \"Glucose concentration\", \"mM/L\") # y label\n self.full_plt.setLabel(\"left\", \"Voltage\", \"mV\")\n self.full_plt.setLabel(\"bottom\", \"Time\", \"s\") # x label\n self.full_curve = self.full_plt.plot(self.full_x, self.full_y, pen='g') # Plotting the curve self.y = f(self.x). The pen argument is for the color.\n # self.full_plt.setBackground('w') # Change the background to white",
"def volatility_factor_plot(prices: list, dates: list, vf_data: VFStopsResultType,\n green_zone_x_values: List[list], red_zone_x_values: List[list],\n yellow_zone_x_values: List[list], y_range: float, minimum: float,\n text_str: str = \"\", str_color: str = \"\", **kwargs):\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n register_matplotlib_converters()\n\n title = kwargs.get('title', '')\n save_fig = kwargs.get('save_fig', False)\n filename = kwargs.get('filename', 'temp_candlestick.png')\n\n stop_loss_objects = vf_data.data_sets\n\n shown_stop_loss = f\"VF: {np.round(vf_data.vf.curated, 3)}\\n\"\n if vf_data.current_status.status.value != 'stopped_out':\n shown_stop_loss += f\"Stop Loss: ${np.round(vf_data.stop_loss.curated, 2)}\"\n else:\n shown_stop_loss += \"Stop Loss: n/a\"\n\n fig, ax_handle = plt.subplots()\n\n date_indexes = [datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n ax_handle.plot(date_indexes, prices, color='black')\n\n # Set the tick spacing (this is because dates crowd easily)\n mid_tick_size = int(len(date_indexes) / 4)\n ax_handle.xaxis.set_ticks([\n date_indexes[0], date_indexes[mid_tick_size], date_indexes[mid_tick_size * 2],\n date_indexes[mid_tick_size * 3], date_indexes[-1]\n ])\n\n y_start = minimum - (y_range * 0.05)\n height = y_range * 0.02\n\n for stop in stop_loss_objects:\n sub_dates = [date_indexes[index] for index in stop.time_index_list]\n ax_handle.plot(sub_dates, stop.caution_line, color='gold')\n ax_handle.plot(sub_dates, stop.stop_loss_line, color='red')\n\n for green_zone in green_zone_x_values:\n start = mdates.date2num(date_indexes[green_zone[0]])\n end = mdates.date2num(date_indexes[green_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='green',\n facecolor='green',\n fill=True\n )\n )\n\n for red_zone in red_zone_x_values:\n start = mdates.date2num(date_indexes[red_zone[0]])\n end = mdates.date2num(date_indexes[red_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='red',\n facecolor='red',\n fill=True\n )\n )\n\n for yellow_zone in yellow_zone_x_values:\n start = mdates.date2num(date_indexes[yellow_zone[0]])\n end = mdates.date2num(date_indexes[yellow_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='yellow',\n facecolor='yellow',\n fill=True\n )\n )\n\n ax_handle.set_title(title)\n\n if len(text_str) > 0 and len(str_color) > 0:\n new_start = minimum - (y_range * 0.2)\n new_end = minimum + (y_range * 1.02)\n ax_handle.set_ylim(new_start, new_end)\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.02,\n text_str,\n color=str_color,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n if len(shown_stop_loss) > 0:\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.90,\n shown_stop_loss,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n try:\n if save_fig:\n temp_path = os.path.join(\"output\", \"temp\")\n if not os.path.exists(temp_path):\n # For functions, this directory may not exist.\n plt.close(fig)\n plt.clf()\n return\n\n filename = os.path.join(temp_path, filename)\n if os.path.exists(filename):\n os.remove(filename)\n plt.savefig(filename)\n\n else:\n plt.show()\n\n except: # pylint: disable=bare-except\n print(\n f\"{utils.WARNING}Warning: plot failed to render in 'volatility factor plot' of \" +\n f\"title: {title}{utils.NORMAL}\")\n\n plt.close('all')\n plt.clf()",
"def mainCalculation(): \r\n tau = 0.25 # Difference between time steps\r\n T = 1 # Time horizon\r\n EulSteps = 100 # Euler discretization steps between two tenor points\r\n \r\n NoOfSteps = int(T / tau) # Number of time-steps \r\n NoOfFor = int(T / tau) # Number of forward rates we want to generate\r\n \r\n # Note that normally you will obtain these using calibration, now I simulate them randomly\r\n V = insVol(NoOfSteps, NoOfFor)\r\n V2 = insVol(NoOfSteps*EulSteps, NoOfFor)\r\n L = inFor(NoOfFor)\r\n\r\n FRW, FRWanti = Generate_FRW_Eul(NoOfSteps*EulSteps, NoOfFor, T, tau, V2, L) # Generate forward rates using Euler discretization\r\n\r\n # Obtain labels and locations for the x-axis\r\n labels = np.zeros(NoOfFor+1) ; locs = np.zeros(NoOfFor+1) ; locsLS = np.zeros(NoOfFor+1)\r\n hv = 0\r\n for i in range(NoOfFor+1):\r\n labels[i] = hv\r\n locs[i] = i * EulSteps\r\n locsLS[i] = i\r\n hv += 0.25\r\n\r\n # Plot the different forward rates\r\n plt.figure()\r\n for i in range(len(FRW)):\r\n plt.plot(FRW[i,:])\r\n plt.title('Forward rates using Euler discretization')\r\n plt.ylabel('Forward rate')\r\n plt.xlabel('Time (years)')\r\n plt.xticks(locs,labels, rotation='45')\r\n plt.show()\r\n \r\n FRWLS, FRWLSanti = Generate_FRW_LS(NoOfSteps, NoOfFor, T, tau, V, L) # Generate forward rates using a large time-step\r\n \r\n # Plot the different forward rates\r\n plt.figure()\r\n for i in range(len(FRWLS)):\r\n plt.plot(FRWLS[i,:])\r\n plt.title('Forward rates using big time-steps')\r\n plt.ylabel('Forward rate')\r\n plt.xlabel('Time (years)')\r\n plt.xticks(locsLS, labels, rotation='45')\r\n plt.show()",
"def plot_fits_and_residuals(all_fits_df, dfs_list, expt_name, **kwargs):\n\n colors = cm.rainbow(np.linspace(0, 1, len(dfs_list)))\n fig = plt.figure(figsize=(5, 5), tight_layout=True)\n fig.set_dpi(300)\n\n filename = f'{expt_name}_fits_and_residuals'\n fileformat = '.png'\n \n # Set parameters for decay traces plot\n xlabel_traces = kwargs.get('xlabel_traces', 'Time after Chase (Hrs.)')\n ylabel_traces = kwargs.get('ylabel_traces', 'YFP(t)/YFP(0)')\n ylim_traces = kwargs.get('ylim_traces', (0, 1.2))\n xticks_traces = kwargs.get('xticks_traces', make_ticks(all_fits_df.x_input, decimals=0))\n yticks_traces = kwargs.get('y_ticks_traces', make_ticks((0, 1), decimals=1, n_ticks=7))\n xlim_traces = kwargs.get('xlim_traces', (xticks_traces.min(), xticks_traces.max())) \n # Set parameters for decay fit residuals plot\n xlabel_resids = kwargs.get('xlabel_resids', xlabel_traces)\n ylabel_resids = kwargs.get('ylabel_resids', 'Residuals')\n xlim_resids = kwargs.get('xlim_resids', xlim_traces)\n xticks_resids = xticks_traces\n yticks_resids = kwargs.get('yticks_resids', make_yticks_0cent(all_fits_df.residual))\n ylim_resids = kwargs.get('ylim_resids', (yticks_resids.min(), yticks_resids.max()))\n \n # Set parameters for decay fit residuals kernel density estimate\n # plot \n xlabel_kde = kwargs.get('xlabel_kde', ylabel_resids)\n ylabel_kde = kwargs.get('ylabel_kde', 'Density')\n xlim_kde = kwargs.get('xlim_kde', ylim_resids)\n ylim_kde = kwargs.get('ylim_kde', None)\n xticks_kde = yticks_resids\n # yticks_kde will get set below during \n # density calcuation\n \n # Set parameters used across all plots\n hidden_spines = kwargs.get('hidden_spine', ['top', 'right'])\n labelfontsize = kwargs.get('labelfontsize', 12)\n linewidth = kwargs.get('linewidth', 1)\n linealpha = kwargs.get('linealpha', 1)\n scatteralpha = kwargs.get('scatteralpha', 0.8)\n scattersize = kwargs.get('scattersize', 5)\n \n # Make the residuals scatter plot\n ax = fig.add_subplot(222)\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n ax.scatter(cell_df.x_input, cell_df.residual,\n s=scattersize, alpha=scatteralpha,\n facecolor='white', edgecolor=colors[cell_index])\n\n ax.axhline(0, linewidth=linewidth, alpha=linealpha, color='black')\n for spine in [ax.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n try:\n ax.set_xticks(xticks_resids)\n except:\n pass\n try:\n ax.set_yticks(yticks_resids)\n except:\n pass\n try:\n ax.set_ylim(ylim_resids)\n except:\n pass\n if xlabel_resids:\n ax.set_xlabel(xlabel_resids, fontsize=labelfontsize)\n if ylabel_resids:\n ax.set_ylabel(ylabel_resids, fontsize=labelfontsize) \n\n ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box')\n\n # Scatter plot of traces and line plot of fitted decays\n ax2 = fig.add_subplot(221)\n\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n ax2.plot(cell_df.x_input, cell_df.y_pred_norm/cell_df.y_pred_norm.max(),\n linewidth=linewidth, alpha=linealpha, color=colors[cell_index])\n ax2.scatter(cell_df.x_input, cell_df.y_input_norm/cell_df.y_input_norm.max(),\n s=scattersize, alpha=scatteralpha, color=colors[cell_index])\n\n if ylim_traces:\n ax2.set_ylim(ylim_traces)\n if xlim_traces:\n ax2.set_xlim(xlim_traces) \n try:\n ax2.set_xticks(xticks_traces)\n except:\n pass\n try:\n ax2.set_yticks(yticks_traces)\n except:\n pass \n if xlabel_traces:\n ax2.set_xlabel(xlabel_traces, fontsize=labelfontsize)\n if ylabel_traces:\n ax2.set_ylabel(ylabel_traces, fontsize=labelfontsize) \n\n ax2.set_aspect(1.0/ax2.get_data_ratio(), adjustable='box')\n for spine in [ax2.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n\n # Smoothed hist of residuals for each cell (KDE plot)\n ax3 = fig.add_subplot(223)\n \n densities = []\n for cell_index in all_fits_df.cell_index.unique()[:]:\n cell_df = all_fits_df.loc[all_fits_df.cell_index == cell_index, :]\n density = gaussian_kde(cell_df.residual)\n xs = np.linspace(all_fits_df.residual.min(),all_fits_df.residual.max(),200)\n ax3.plot(xs,density(xs), color=colors[cell_index],\n alpha=linealpha, linewidth=linewidth)\n densities.append(density(xs))\n\n # Also plot total residuals\n density = gaussian_kde(all_fits_df.residual)\n xs = np.linspace(all_fits_df.residual.min(),all_fits_df.residual.max(),200)\n ax3.plot(xs,density(xs), color='black',\n alpha=linealpha*2, linewidth=linewidth)\n densities.append(density(xs))\n \n # Figure out whech density outuput array has the highest y value and\n # set the yticks of the plot using that density array\n max_dens = np.array([np.max(arr) for arr in densities])\n longest_range_den = densities[max_dens.argmax()]\n yticks_kde = make_ticks(longest_range_den)\n\n if ylim_kde:\n ax3.set_ylim(ylim)\n if xlim_kde:\n ax3.set_xlim(xlim_kde)\n if xlabel_kde:\n ax3.set_xlabel(xlabel_kde, fontsize=labelfontsize)\n if ylabel_kde:\n ax3.set_ylabel(ylabel_kde, fontsize=labelfontsize)\n try:\n ax3.set_yticks(yticks_kde)\n except:\n pass\n try:\n ax3.set_xticks(xticks_kde)\n except:\n pass\n\n ax3.set_aspect(1.0/ax3.get_data_ratio(), adjustable='box')\n for spine in [ax3.spines[hidden_spine] for hidden_spine in hidden_spines]:\n spine.set_visible(False)\n\n if filename and fileformat:\n fig.savefig(f'{filename}{fileformat}', transparent=True)\n print(f'Saved plot at {filename}{fileformat}')",
"def plot_tbffr(\n parameter: str = \"TB3SMFFM\",\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n export: str = \"\",\n sheet_name: str = \"\",\n external_axes: bool = False,\n):\n series_id = fred_model.TBFFR_PARAMETER_TO_FRED_ID[parameter]\n\n df = fred_model.get_tbffr(\n parameter=parameter, start_date=start_date, end_date=end_date\n )\n\n if df.empty:\n return console.print(\n f\"[red]No data found for {ID_TO_NAME_TBFFR[series_id]}[/red]\"\n )\n\n fig = OpenBBFigure(yaxis_title=\"Yield (%)\")\n fig.set_title(\n f\"{ID_TO_NAME_TBFFR[series_id]} Treasury Bill Minus Federal Funds Rate\"\n )\n\n fig.add_scatter(x=df.index, y=df[series_id], name=\"Yield\")\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n series_id,\n pd.DataFrame(df, columns=[\"TBFFR\"]),\n sheet_name,\n fig,\n )\n\n return fig.show(external=external_axes)",
"def update(fgframeno, *update_artists):\n \n fgout = fgout_grid.read_frame(fgframeno)\n print('Updating plot at time %s' % timedelta(seconds=fgout.t))\n \n # unpack update_artists (must agree with definition above):\n eta_plot, u_plot, sim_zeta_plot, sim_u_plot, title_text = update_artists\n \n # reset title to current time:\n title_text = ax1.set_title('y = %.3f at time %.3f seconds, frame %i' \\\n % (ytrans,fgout.t,fgframeno), fontsize=8)\n\n # reset surface eta to current state:\n #eta = ma.masked_where(fgout.h<0.001, fgout.eta)\n h = fgout.h[:,jytrans]\n #import pdb; pdb.set_trace()\n jwet = where(h > 0.001)[0].max()\n eta_wet = fgout.eta[:,jytrans]\n eta_wet[(jwet+2):] = nan\n \n eta_plot.set_ydata(eta_wet)\n \n tx = vstack((fgout.t*ones(fgout.x.shape), fgout.x)).T\n sim_zeta = zeta_fcn(tx)\n sim_zeta_plot.set_ydata(sim_zeta)\n \n sim_u_vel = u_vel_fcn(tx)\n sim_u_plot.set_ydata(sim_u_vel)\n\n u_plot.set_ydata(fgout.u[:,jytrans])\n \n update_artists = (eta_plot, u_plot, sim_zeta_plot, sim_u_plot, title_text)\n return update_artists",
"def main():\n # total snapshots collected\n ntsnap = 6000\n\n # total time\n tot = 60.0\n\n tsnap = np.linspace(0, tot, ntsnap,endpoint=False)\n dt = tsnap[2] - tsnap[1]\n\n print 'dt = ', dt\n\n\n x0 = np.array([1,0])\n\n def F_vdp(x):\n \"\"\"\n Function for vdp\n\n :param x:\n :return:\n \"\"\"\n nu = 2\n F = np.zeros(x.shape)\n F[0] = x[1]\n F[1] = nu*(1-x[0]*x[0])*x[1] - x[0]\n return F\n\n # A12\n A12 = np.array([[-1, -1]])\n\n # matrix exponential\n xsnap = np.zeros((ntsnap, 2))\n closure = np.zeros((ntsnap, 1))\n xsnap[0,:] = x0\n for i_time in range(1, ntsnap):\n\n # first order euler\n xsnap[i_time, :] = xsnap[i_time - 1, :] + dt*F_vdp(xsnap[i_time - 1, :])\n closure[i_time, :] = xsnap[i_time, 1] # F_vdp(xsnap[i_time - 1])[0]\n\n # mkdir\n mkdir('data')\n mkdir('image')\n\n # plot full\n plt.figure()\n plt.plot(tsnap, xsnap[:,0], 'k-', label='$x_1$')\n plt.plot(tsnap, xsnap[:,1], 'r-', label='$x_2$')\n lgd = plt.legend(bbox_to_anchor=(1, 0.5))\n plt.xlabel('time')\n plt.ylabel('component value of $x$')\n plt.savefig('./image/2d_vdp_full.png', bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.close()\n\n # save state data\n # first need to cut to a LES\n xsnap_les = xsnap[:,0:1]\n # need to transpose to (1,4000) for consistence\n xsnap_save = xsnap_les.transpose()\n np.save('./data/physical_snapshots_resolved_2d_vdp_ntsnap_' + \\\n str(ntsnap) + '_tot_' + str(int(tot)) + '.npy', xsnap_save)\n\n # save closure+state data\n closure_save = closure.transpose()\n np.savez('./data/closure_2d_vdp_ntsnap_' + \\\n str(ntsnap) + '_tot_' + str(int(tot)) + '.npz', usnap_les=xsnap_save, ec_snap=closure_save)\n\n # print\n print xsnap_save.shape\n print closure_save.shape\n\n \n ##############################################################\n # debug phase: check MSE of dy/dt vs analytical expression\n ## target without last term\n target = (closure[1:,:] - closure[:-1,:])/dt\n eff_states = xsnap_les[:-1,:]\n eff_closure = closure[:-1,:]\n\n ## analytically:\n nu = 2.0\n \n analytical_prediction = nu*eff_closure - nu*eff_states*eff_states*eff_closure - eff_states\n \n\n print 'mean squared error on whole data =', np.square(analytical_prediction-target).mean()",
"def view(self):\n import matplotlib.pyplot as plt\n for sp in range(self.nspecies):\n plt.figure(sp+1)\n plt.title('Orbitals for specie='+ str(sp)+' Znuc='+str(self.sp2charge[sp]))\n for j,ff in zip(self.sp_mu2j[sp], self.psi_log[sp]):\n if j>0 :\n plt.plot(self.rr, ff, '--', label=str(j))\n else:\n plt.plot(self.rr, ff, '-', label=str(j))\n #plt.xlim([0.0,3.0])\n plt.legend()\n \n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the free energy differences evaluated for each pair of adjacent states for all methods. The layout is approximately 'nb' bars per subplot.
|
def plotdFvsLambda2(nb=10):
x = numpy.arange(len(df_allk))
if len(x) < nb:
return
xs = numpy.array_split(x, len(x)/nb+1)
mnb = max([len(i) for i in xs])
fig = pl.figure(figsize = (8,6))
width = 1./(len(P.methods)+1)
elw = 30*width
colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}
ndx = 1
for x in xs:
lines = tuple()
ax = pl.subplot(len(xs), 1, ndx)
for name in P.methods:
y = [df_allk[i][name]/P.beta_report for i in x]
ye = [ddf_allk[i][name]/P.beta_report for i in x]
line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))
lines += (line[0],)
for dir in ['left', 'right', 'top', 'bottom']:
if dir == 'left':
ax.yaxis.set_ticks_position(dir)
else:
ax.spines[dir].set_color('none')
pl.yticks(fontsize=10)
ax.xaxis.set_ticks([])
for i in x+0.5*width*len(P.methods):
ax.annotate('$\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')
pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))
ndx += 1
leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\mathrm{\Delta G\/%s\/}\mathit{vs.}\/\mathrm{lambda\/pair}$' % P.units, fancybox=True)
leg.get_frame().set_alpha(0.5)
pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')
pl.close(fig)
return
|
[
"def plot_comparison():\n fig,ax = plt.subplots(figsize=(7,3),ncols=2)\n fig.suptitle('TIM4 difference for distributed stiffness')\n for i in np.arange(200,1200,200):\n UIC60props = UIC60properties()\n tim4 = Timoshenko4(UIC60props,0.5)\n tim4el = Timoshenko4eb(UIC60props,Pad(K = i*10**6),0.5)\n ax[0].plot(-tim4el.N_w1(np.arange(0,1.1,0.1))+tim4.N_w1(np.arange(0,1.1,0.1)),label = '${} [MN/m^2]$'.format(str(i)))#r'$N_{w1}(\\xi)$')\n ax[1].plot(-tim4el.N_w2(np.arange(0,1.1,0.1))+tim4.N_w2(np.arange(0,1.1,0.1)),label = '${} [MN/m^2]$'.format(str(i)))#r'$N_{w2}(\\xi)$')\n ax[0].legend()\n ax[0].set_xlabel(r\"$\\xi = x/L$\")\n ax[1].set_xlabel(r\"$\\xi = x/L$\")\n ax[0].set_ylabel('$N_{1}-N_{1.k_p} [-]$')\n ax[1].set_ylabel('$N_2-N_{2.k_p} [-]$')\n fig.subplots_adjust(wspace=0.28)\n fig.tight_layout()\n return fig,ax",
"def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)",
"def plot_variables(self, n, show=False):\n\t\tfig, ax = plt.subplots(6, 1, sharex = True, figsize = (8, 14))\n\t\tplt.subplots_adjust(hspace = 0)\n\t\tend = len(n.history[\"det(F)\"])\n\t\tepochs = np.arange(end)\n\t\ta, = ax[0].plot(epochs, n.history[\"det(F)\"], label = 'Training data')\n\t\tb, = ax[0].plot(epochs, n.history[\"det(test F)\"], label = 'Test data')\n\t\t# ax[0].axhline(y=5,ls='--',color='k')\n\t\tax[0].legend(frameon = False)\n\t\tax[0].set_ylabel(r'$|{\\bf F}_{\\alpha\\beta}|$')\n\t\tax[0].set_title('Final Fisher info on test data: %.3f'%n.history[\"det(test F)\"][-1])\n\t\tax[1].plot(epochs, n.history[\"Λ\"])\n\t\tax[1].plot(epochs, n.history[\"test Λ\"])\n\t\tax[1].set_xlabel('Number of epochs')\n\t\tax[1].set_ylabel(r'$\\Lambda$')\n\t\tax[1].set_xlim([0, len(epochs)]);\n\t\tax[2].plot(epochs, n.history[\"det(C)\"])\n\t\tax[2].plot(epochs, n.history[\"det(test C)\"])\n\t\tax[2].set_xlabel('Number of epochs')\n\t\tax[2].set_ylabel(r'$|{\\bf C}|$')\n\t\tax[2].set_xlim([0, len(epochs)]);\n\t\t\n\t\t\n\t\t# Derivative of first summary wrt to theta1\t\t\t\t theta1 is 3rd dimension index 0\n\t\tax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,0]\n\t\t\t, color = 'C0', label=r'$\\theta_1$',alpha=0.5)\n\t\t# Test Derivative of first summary wrt to theta1\t\t\t\t theta1 is 3rd dimension index 0\n\t\tax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,0]\n\t\t\t, color = 'C1', label=r'$\\theta_1$',alpha=0.5)\n\t\tax[3].set_ylabel(r'$\\partial\\mu/\\partial\\theta_1$')\n\t\tax[3].set_xlabel('Number of epochs')\n\t\tax[3].set_xlim([0, len(epochs)])\n\t\t# ax[3].legend(frameon=False)\n\n\t\t# Derivative of first summary wrt to theta2\t\t\t\t theta2 is 3rd dimension index 1\n\t\tax[4].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,1]\n\t\t\t, color = 'C0', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n\t\t# Test Derivative of first summary wrt to theta2\t\t\t\t theta2 is 3rd dimension index 1\n\t\tax[4].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,1]\n\t\t\t, color = 'C1', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n\t\tax[4].set_ylabel(r'$\\partial\\mu/\\partial\\theta_2$')\n\t\tax[4].set_xlabel('Number of epochs')\n\t\tax[4].set_xlim([0, len(epochs)])\n\t\t# ax[4].legend(frameon=False)\n\n\t\t# Mean of network output summary 1\n\t\tax[5].plot(epochs, np.array(n.history[\"μ\"])[:,0],alpha=0.5)\n\t\t# Mean of test output network summary 1\n\t\tax[5].plot(epochs, np.array(n.history[\"test μ\"])[:,0],alpha=0.5)\n\t\tax[5].set_ylabel('μ_1')\n\t\tax[5].set_xlabel('Number of epochs')\n\t\tax[5].set_xlim([0, len(epochs)])\n\t\t\n\n\t\tprint ('Maximum Fisher info on train data:',np.max(n.history[\"det(F)\"]))\n\t\tprint ('Final Fisher info on train data:',(n.history[\"det(F)\"][-1]))\n\t\t\n\t\tprint ('Maximum Fisher info on test data:',np.max(n.history[\"det(test F)\"]))\n\t\tprint ('Final Fisher info on test data:',(n.history[\"det(test F)\"][-1]))\n\n\t\tif np.max(n.history[\"det(test F)\"]) == n.history[\"det(test F)\"][-1]:\n\t\t\tprint ('Promising network found, possibly more epochs needed')\n\n\t\tplt.savefig(f'{self.figuredir}variables_vs_epochs_{self.modelversion}.png')\n\t\tif show: plt.show()\n\t\tplt.close()",
"def display_states_derivatives(time_axis, states_derivatives):\n grid_opt = {\"color\":\"lightgray\", \"linestyle\":\"--\"}\n cycle = cycler(color=['r', 'g', 'b'])\n\n fig, axs = plt.subplots(2, 2, figsize=((12, 9)))\n fig.suptitle('Evolution of states', fontsize=16)\n\n legend = ['vx', 'vy', 'vz']\n axs[0, 0].set_prop_cycle(cycle)\n axs[0, 0].plot(time_axis, states_derivatives[:, 0:3])\n axs[0, 0].legend(legend)\n axs[0, 0].set_xlabel('Time (s)')\n axs[0, 0].set_ylabel('Speed (m/s)')\n axs[0, 0].set_title('Evolution of linear velocities')\n axs[0, 0].grid(**grid_opt)\n\n legend = ['ax', 'ay', 'az']\n axs[0, 1].set_prop_cycle(cycle)\n axs[0, 1].plot(time_axis, states_derivatives[:, 6:9])\n axs[0, 1].legend(legend)\n axs[0, 1].set_xlabel('Time (s)')\n axs[0, 1].set_ylabel('Acceleration [m/s2]')\n axs[0, 1].set_title('Evolution of linear accelerations')\n axs[0, 1].grid(**grid_opt)\n\n legend = ['p', 'q', 'r']\n axs[1, 0].set_prop_cycle(cycle)\n axs[1, 0].plot(time_axis, states_derivatives[:, 3:6])\n axs[1, 0].legend(legend)\n axs[1, 0].set_xlabel('Time (s)')\n axs[1, 0].set_ylabel('Angular velocity (rad/s)')\n axs[1, 0].set_title('Evolution of angular velocities')\n axs[1, 0].grid(**grid_opt)\n\n legend = ['ap', 'aq', 'ar']\n axs[1, 1].set_prop_cycle(cycle)\n axs[1, 1].plot(time_axis, states_derivatives[:, 9:12])\n axs[1, 1].legend(legend)\n axs[1, 1].set_xlabel('Time (s)')\n axs[1, 1].set_ylabel('Angular acceleration [rad/s2]')\n axs[1, 1].set_title('Evolution of angular accelerations')\n axs[1, 1].grid(**grid_opt)",
"def plot_hydrogen_balance(self):\n n_axes = self.process_output[\"times\"].shape[0]\n fig = plt.figure(figsize=(6.0, 5.5))\n fig.suptitle('Hydrogen production and utilization over the year', fontsize=fontsize+1, fontweight='normal', color='k')\n axes = fig.subplots(n_axes)\n for index, ax in enumerate(axes):\n x1, y1 = self.process_output[\"times\"][index, :] / 24, +self.process_output[\"H2_produced\"][index, :]\n x2, y2 = self.process_output[\"times\"][index, :] / 24, -self.process_output[\"H2_utilized\"][index, :]\n for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)\n for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)\n ax.plot([0.0], [0.0], linestyle=\"\", marker=\"\", label=\"Period \" + str(index + 1))\n ax.plot(x1, y1, linewidth=0.75, linestyle='-', color='k', label=\"Produced\")\n ax.plot(x2, y2, linewidth=0.75, linestyle='-', color='r', label=\"Utilized\")\n ax.set_ylabel('Mass flow (kg/s)', fontsize=fontsize, color='k', labelpad=fontsize)\n if index + 1 == n_axes:\n ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)\n ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0)\n dy = max(np.max(y1)-np.min(y2), 0.02)\n ax.set_ylim([np.min(y2)-dy/5, np.max(y1)+dy/5])\n fig.tight_layout()\n return fig, axes",
"def test_run_beta_diversity_through_plots(self):\r\n run_beta_diversity_through_plots(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n parallel=False,\r\n status_update_callback=no_status_updates)\r\n\r\n unweighted_unifrac_dm_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_dm.txt')\r\n weighted_unifrac_dm_fp = join(self.test_out, 'weighted_unifrac_dm.txt')\r\n unweighted_unifrac_pc_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_pc.txt')\r\n weighted_unifrac_pc_fp = join(self.test_out, 'weighted_unifrac_pc.txt')\r\n weighted_unifrac_html_fp = join(self.test_out,\r\n 'weighted_unifrac_emperor_pcoa_plot', 'index.html')\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(unweighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)",
"def plot_all_state_estimates(self, title, steps=None, two_sigma=True):\n fig1, (ax1, ax2, ax3) = plt.subplots(3, 1)\n\n plt.sca(ax1)\n self.plot_state_estimate('x_1', steps)\n self.plot_two_sigma('x_1', steps) if two_sigma else None\n plt.title(title)\n plt.legend([\"State Estimate\", \"2 Sigma Bounds\"], loc='upper right')\n plt.ylabel('Easting (m)')\n ax1.xaxis.set_ticklabels([])\n\n plt.axes(ax2)\n self.plot_state_estimate('x_2', steps)\n self.plot_two_sigma('x_2', steps) if two_sigma else None\n plt.ylabel('Northing (m)')\n ax2.xaxis.set_ticklabels([])\n\n plt.axes(ax3)\n self.plot_state_estimate('x_3', steps)\n self.plot_two_sigma('x_3', steps) if two_sigma else None\n plt.ylabel('Altitude (m)')\n plt.xlabel('Time step (k)')",
"def n27_and_sidebands():\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5, 4))\n # n=26 through n=29\n folder = os.path.join(\"..\", \"..\", \"2018-09-06\")\n fname = \"1_dye_fscan.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n ax.axhline(0, color='grey')\n data.plot(x='fpoly', y='sig', label=\"MW Off\", c='k', ax=ax)\n # sidebands\n folder = os.path.join(\"..\", \"..\", \"2018-09-09\")\n fname = \"1_freq_dye.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['asig'] = data['sig'] - 0.3\n ax.axhline(-0.3, color='grey')\n data.plot(x='fpoly', y='asig', label=\"MW On\", c='k', ax=ax)\n # pretty figure\n ax.legend().remove()\n ax.set_ylabel(r\"$e^-$ Signal\")\n ax.set_yticks([])\n ax.set_xlabel(\"Frequency (GHz from Limit)\")\n ax.set_xticks([-4863, -4511, -4195, -3908])\n ax.text(-4400, -0.15, \"MW On\")\n ax.text(-4400, 0.3, \"MW Off\")\n # save\n fig.tight_layout()\n fig.savefig(\"n27_and_sidebands.pdf\")\n return",
"def main_plots_and_errors(x_lim, h, t_0, tau, steps_list, nsize = 1, delimiter = ' & ',decimal_places= 4,plots_names =None, **params):\n \n t = t_0\n x= np.arange(*x_lim, step =h)[1:]\n\n cond = BoundaryConditions_first_type(x_lim, t_0, analytical_solution, **params)\n\n method_im = ImplicitDifferenceScheme(x,cond, h, tau, **params)\n method_two_step = TwoStepFiniteDifferenceAlgorithm(x,cond, h, tau, **params)\n\n err_list = [[],[]]\n\n steps_list.sort()\n n = len(steps_list)\n\n if plots_names is None:\n plots_names = ['Analit. solution','Implicit scheme', 'Two steps sim. method']\n fig, ax = plt.subplots(n//2 +n%2,2, figsize = (nsize*15, nsize*15 *(n//2+n%2)/2) )\n ax = ax.ravel()\n\n for i in range(steps_list[-1]):\n print(f'step {i+1}')\n method_im.update()\n method_two_step.update()\n t+=tau\n if i+1 in steps_list:\n j = steps_list.index(i+1)\n ax[j].plot(x, analytical_solution(x,t, **params), label=plots_names[0])\n ax[j].plot(x, method_im(), label = plots_names[1])\n ax[j].plot(x, method_two_step(), label = plots_names[2])\n\n ax[j].legend()\n ax[j].set_title(f'steps: {method_im.count}, time: {round(method_im.t, decimal_places//2)}')\n\n err_list[0].append(absolute_error(analytical_solution(x,t, **params), method_im()))\n err_list[1].append(absolute_error(analytical_solution(x,t, **params), method_two_step()))\n \n for i in range(j+1, len(ax)):\n ax[i].remove()\n\n print('Error table')\n print(\"N\", delimiter, delimiter.join([str(item) for item in steps_list]))\n for index, err in enumerate(err_list):\n print(plots_names[index+1], delimiter, delimiter.join([str(round(item,decimal_places)) for item in err]), sep='')\n\n plt.show()",
"def vis_difference(self):\n print(self.init_vec)\n\n init = self.init_output.numpy()\n\n alphas = np.linspace(0, 1, 20)\n for i, alpha in enumerate(alphas):\n\n display.clear_output(wait=True)\n norm = [torch.linalg.norm(torch.tensor(\n self.init_vec + alpha*self.eigen[i]), axis=1).detach().numpy() for i in range(2)]\n\n diff = np.array([self.compute_difference(\n alpha, self.eigen[i]) for i in range(2)])\n\n fig = plt.figure(figsize=(14, 12), tight_layout=True)\n fig.suptitle(\"Latent direction variation\", fontsize=20)\n gs = gridspec.GridSpec(2, 2)\n\n ax_temp = plt.subplot(gs[0, :])\n ax_temp.scatter(\n init[:, 0], init[:, 1])\n ax_temp.set_title(\"Initial Dataset\")\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n for j in range(2):\n ax_temp = plt.subplot(gs[1, j])\n sc = ax_temp.quiver(\n init[:, 0], init[:, 1], diff[j, :, 0], diff[j, :, 1], norm[j])\n sc.set_clim(np.min(norm[j]), np.max(norm[j]))\n plt.colorbar(sc)\n ax_temp.set_title(\n \"Direction: {}, alpha: {}\".format(j+1, alpha))\n ax_temp.set_xlim(-1, 1)\n ax_temp.set_ylim(-1, 1)\n [s.set_visible(False) for s in ax_temp.spines.values()]\n\n plt.savefig(\"frames_dir/fig_{}\".format(i))\n plt.show()",
"def plot_bs(self, show=False, density=True, pcolor=\"r\", mcolor=\"b\", lw=1.0, subtract = 0):\n cm = matplotlib.cm.jet\n\n if (subtract !=0):\n geqbispectra = subtract\n else:\n geqbispectra = np.zeros(np.shape(self.eqbispectra))\n\n if (density):\n \"\"\" also read the local overdensity value and plot line colors according to\n the density value, + = red, - = blue; adjust alpha accordingly\n \"\"\"\n if len(self.ds)<self.Nsubs:\n print (\"no density data\")\n return 0\n\n ads=np.abs(self.ds)\n meands=np.mean(self.ds)\n mads=np.max(ads)\n normds=np.array([ads[i]/mads for i in range(len(ads))])\n self.normds=normds\n\n cNorm = colors.Normalize(min(self.ds), vmax=max(self.ds))\n scalarMap = cmap.ScalarMappable(norm=cNorm, cmap=cm)\n scalarMap.set_array([])\n\n fig, ax = self.plt.subplots()\n\n for sub in range(self.Nsubs):\n #print sub\n if not(density):\n lplot=ax.plot(self.klist, self.fNLeq[sub])\n else:\n colorVal = scalarMap.to_rgba(self.ds[sub])\n lplot = ax.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=colorVal, alpha=normds[sub], linewidth=lw)\n \"\"\"\n if self.ds[sub]>meands:\n self.plt.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=pcolor, alpha=normds[sub], linewidth=lw)\n else:\n self.plt.plot(self.klist[1:-1], self.eqbispectra[sub][1:-1]-geqbispectra[sub][1:-1], color=mcolor, alpha=normds[sub], linewidth=lw)\n \"\"\"\n\n ax.set_xlabel(r\"$k {\\rm (h/Mpc)}$\")\n ax.set_ylabel(r\"${\\rm Q}(k)$\")\n ax.set_xscale('log')\n cbar = fig.colorbar(scalarMap, format='%.0e')\n #self.plt.yscale('log')\n if (show):\n self.plt.show()",
"def plot_explorer_panels(self, param_val, photonnumber, initial_index, final_index, qbt_index, osc_index):\n def fig_ax(index):\n return fig, axes_list_flattened[index]\n\n param_index = np.searchsorted(self.param_vals, param_val)\n param_val = self.param_vals[param_index]\n\n initial_bare = self.sweep.lookup.bare_index(initial_index, param_index)\n final_bare = self.sweep.lookup.bare_index(final_index, param_index)\n energy_ground = self.sweep.lookup.energy_dressed_index(0, param_index)\n energy_initial = self.sweep.lookup.energy_dressed_index(initial_index, param_index) - energy_ground\n energy_final = self.sweep.lookup.energy_dressed_index(final_index, param_index) - energy_ground\n qbt_subsys = self.sweep.hilbertspace[qbt_index]\n\n nrows = 3\n ncols = 2\n fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)\n axes_list_flattened = [elem for sublist in axs for elem in sublist]\n\n # Panel 1 ----------------------------------\n panels.display_bare_spectrum(self.sweep, qbt_subsys, param_val, fig_ax(0))\n\n # Panels 2 and 6----------------------------\n if type(qbt_subsys).__name__ in ['Transmon', 'Fluxonium']: # do not plot wavefunctions if multi-dimensional\n panels.display_bare_wavefunctions(self.sweep, qbt_subsys, param_val, fig_ax(1))\n panels.display_charge_matrixelems(self.sweep, initial_bare, qbt_subsys, param_val, fig_ax(5))\n\n # Panel 3 ----------------------------------\n panels.display_dressed_spectrum(self.sweep, initial_bare, final_bare, energy_initial, energy_final, param_val,\n fig_ax(2))\n\n # Panel 4 ----------------------------------\n panels.display_n_photon_qubit_transitions(self.sweep, photonnumber, initial_bare, param_val, fig_ax(3))\n\n # Panel 5 ----------------------------------\n panels.display_chi_01(self.sweep, qbt_index, osc_index, param_index, fig_ax(4))\n\n fig.tight_layout()\n return fig, axs",
"def test_run_beta_diversity_through_plots_even_sampling(self):\r\n\r\n run_beta_diversity_through_plots(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n sampling_depth=20,\r\n tree_fp=self.test_data['tree'][0],\r\n parallel=False,\r\n status_update_callback=no_status_updates)\r\n\r\n unweighted_unifrac_dm_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_dm.txt')\r\n weighted_unifrac_dm_fp = join(self.test_out, 'weighted_unifrac_dm.txt')\r\n unweighted_unifrac_pc_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_pc.txt')\r\n weighted_unifrac_pc_fp = join(self.test_out, 'weighted_unifrac_pc.txt')\r\n weighted_unifrac_html_fp = join(self.test_out,\r\n 'weighted_unifrac_emperor_pcoa_plot', 'index.html')\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(unweighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)",
"def plot_overscan_diff(overscan, img, TITLE, OUT_DIR):\n fig = plt.figure(figsize=(20, 20))\n gs0 = gridspec.GridSpec(3, 3)\n\n for i, f in enumerate(img):\n x = f.dev_index % 3\n\n gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, wspace=0, subplot_spec=gs0[f.dev_index])\n ax2 = plt.subplot(gs[0, 0])\n for j in range(9, 17):\n plt.plot(overscan[i, j - 1] - overscan[i, 15] +\n 500 * (j - 8), label='seg' + str(j + 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 0):\n ax2.set_yticklabels([])\n\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax2.set_title(f.dev_name + ' (seg 10-17)')\n\n ax1 = plt.subplot(gs[0, 1])\n for j in range(1, 9):\n plt.plot(overscan[i, j - 1] - overscan[i, 7] +\n 500 * j, label='seg' + str(j - 1))\n plt.legend(fontsize=6, loc='upper center', ncol=4)\n if(x != 2):\n ax1.set_yticklabels([])\n if(x == 2):\n ax1.yaxis.tick_right()\n plt.grid()\n plt.xlim(0, 2100)\n plt.ylim(0, 4500)\n ax1.set_title(f.dev_name + ' (seg 0-7)')\n #\tax1.set_title('S-'+f[7:9]+' (seg 0-7)')\n\n fig.suptitle('Overscan (diff) ' + TITLE, y=0.94, size=20)\n plt.subplots_adjust(wspace=0.05)\n plt.savefig(OUT_DIR + TITLE + '_diff_spatial.png')\n plt.close(fig)",
"def plot_bus_load(self):\n stops = {key: 0 for key, _ in self.route.timetable().items()}\n for passenger in self.passengers:\n trip = self.passenger_trip(passenger)\n stops[trip[0][1]] += 1\n stops[trip[1][1]] -= 1\n prev = None\n for i, stop in enumerate(stops):\n if i > 0:\n stops[stop] += stops[prev]\n prev = stop\n fig, ax = plt.subplots()\n ax.step(range(len(stops)), list(stops.values()), where=\"post\")\n ax.set_xticks(range(len(stops)))\n ax.set_xticklabels(list(stops.keys()))\n return fig, ax",
"def test_run_beta_diversity_through_plots_parallel(self):\r\n run_beta_diversity_through_plots(\r\n self.test_data['biom'][0],\r\n self.test_data['map'][0],\r\n self.test_out,\r\n call_commands_serially,\r\n self.params,\r\n self.qiime_config,\r\n tree_fp=self.test_data['tree'][0],\r\n parallel=True,\r\n status_update_callback=no_status_updates)\r\n\r\n unweighted_unifrac_dm_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_dm.txt')\r\n weighted_unifrac_dm_fp = join(self.test_out, 'weighted_unifrac_dm.txt')\r\n unweighted_unifrac_pc_fp = join(\r\n self.test_out,\r\n 'unweighted_unifrac_pc.txt')\r\n weighted_unifrac_pc_fp = join(self.test_out, 'weighted_unifrac_pc.txt')\r\n weighted_unifrac_html_fp = join(self.test_out,\r\n 'weighted_unifrac_emperor_pcoa_plot', 'index.html')\r\n\r\n # check for expected relations between values in the unweighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(unweighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n # check for expected relations between values in the weighted unifrac\r\n # distance matrix\r\n dm = parse_distmat_to_dict(open(weighted_unifrac_dm_fp))\r\n self.assertTrue(dm['f1']['f2'] < dm['f1']['p1'],\r\n \"Distance between pair of fecal samples is larger than distance\"\r\n \" between fecal and palm sample (unweighted unifrac).\")\r\n self.assertEqual(dm['f1']['f1'], 0)\r\n\r\n # check that final output files have non-zero size\r\n self.assertTrue(getsize(unweighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_pc_fp) > 0)\r\n self.assertTrue(getsize(weighted_unifrac_html_fp) > 0)\r\n\r\n # Check that the log file is created and has size > 0\r\n log_fp = glob(join(self.test_out, 'log*.txt'))[0]\r\n self.assertTrue(getsize(log_fp) > 0)",
"def update_visuals(self):\n # Update compartment_table by iterating through compartment_list\n self.compartment_table.setRowCount(len(self.compartment_list))\n for index, compartment in enumerate(self.compartment_list):\n self.compartment_table.setItem(index, 0, self.get_uneditable_table_widget_item(compartment.name,\n compartment.infection_state))\n self.compartment_table.setItem(index, 1, self.get_uneditable_table_widget_item(compartment.symbol))\n self.compartment_table.setItem(index, 2, QTableWidgetItem(f'{compartment.value:.4f}')) # 3rd column acc 4dp\n\n # Update variable_table by iterating through variable_list\n self.variable_table.setRowCount(len(self.variable_list))\n for index, variable in enumerate(self.variable_list):\n self.variable_table.setItem(index, 0, self.get_uneditable_table_widget_item(variable.equation))\n self.variable_table.setItem(index, 1, self.get_uneditable_table_widget_item(\n str(variable.origin.name) if variable.origin is not None else \"-\")) # Birth is a -\n self.variable_table.setItem(index, 2, self.get_uneditable_table_widget_item(\n str(variable.end.name) if variable.end is not None else \"-\")) # Death is a -\n\n # Create NetworkX graph\n G = nx.DiGraph()\n edges = list()\n labels = dict()\n for variable in self.variable_list: # Set labels = {[Source Node1, End Node1]: Latex Equation1, ...}\n se_list = [variable.origin.name if variable.origin is not None else 'Birth',\n variable.end.name if variable.end is not None else 'Death']\n edges.append(se_list)\n labels[tuple(se_list)] = \"${}$\".format(latex(variable.equation))\n\n G.add_edges_from(edges)\n pos = nx.planar_layout(G) # Planar layout so edges do not collide\n plt.figure() # Start a figure before drawing\n nx.draw(G, pos, edge_color='black', node_size=2000, node_shape='s', node_color='white', with_labels=True,\n style='bold')\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.savefig('graph.png') # Save as graph.png so QPixmap can be created from it below\n plt.close() # Close plt to save memory and prevent issues downline\n\n scene = QGraphicsScene()\n scene.addPixmap(QPixmap('graph.png'))\n self.diagram_view.setScene(scene) # DiagramView is set to be the graph.png\n self.diagram_view.show()",
"def plt_energy_convergence(self, save_figs=True):\n plt.figure(figsize=(10, 5))\n for opt in self.optimizers:\n self.result_df[opt].apply(lambda x: abs(x - self.ref)).plot(logy=True, label=opt)\n\n plt.title('Optimizer Energy Convergence', size=24)\n plt.xlabel('Evaluation Count', size=18)\n plt.ylabel('Energy Difference', size=18)\n plt.legend(fontsize='x-large')\n sns.despine()\n if save_figs:\n if os.path.exists(fr'{self.file_name}_energy_convergence.png'):\n plt.savefig(fr'{self.file_name}_energy_convergence(1).png');\n else:\n plt.savefig(fr'{self.file_name}_energy_convergence.png');\n else:\n plt.show()",
"def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the ave_dhdl array as a function of the lambda value. If (TI and TICUBIC in methods) plots the TI integration area and the TICUBIC interpolation curve, elif (only one of them in methods) plots the integration area of the method.
|
def plotTI():
min_dl = dlam[dlam != 0].min()
S = int(0.4/min_dl)
fig = pl.figure(figsize = (8,6))
ax = fig.add_subplot(1,1,1)
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for k, spine in ax.spines.items():
spine.set_zorder(12.2)
xs, ndx, dx = [0], 0, 0.001
colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']
min_y, max_y = 0, 0
lines = tuple()
## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper
lv_names2 = []
for j in range(n_components):
y = ave_dhdl[:,j]
if not (y == 0).all():
lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())
for j in range(n_components):
y = ave_dhdl[:,j]
if not (y == 0).all():
# Get the coordinates.
lj = lchange[:,j]
x = lv[:,j][lj]
y = y[lj]/P.beta_report
if 'TI' in P.methods:
# Plot the TI integration area.
ss = 'TI'
for i in range(len(x)-1):
min_y = min(y.min(), min_y)
max_y = max(y.max(), max_y)
#pl.plot(x,y)
if i%2==0:
pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)
else:
pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)
xlegend = [-100*wnum for wnum in range(len(lv_names2))]
pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper
if 'TI-CUBIC' in P.methods and not cubspl[j]==0:
# Plot the TI-CUBIC interpolation curve.
ss += ' and TI-CUBIC'
xnew = numpy.arange(0, 1+dx, dx)
ynew = cubspl[j].interpolate(y, xnew)
min_y = min(ynew.min(), min_y)
max_y = max(ynew.max(), max_y)
pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)
else:
# Plot the TI-CUBIC integration area.
ss = 'TI-CUBIC'
for i in range(len(x)-1):
xnew = numpy.arange(x[i], x[i+1]+dx, dx)
ynew = cubspl[j].interpolate(y, xnew)
ynew[0], ynew[-1] = y[i], y[i+1]
min_y = min(ynew.min(), min_y)
max_y = max(ynew.max(), max_y)
if i%2==0:
pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)
else:
pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)
# Store the abscissa values and update the subplot index.
xs += (x+ndx).tolist()[1:]
ndx += 1
# Make sure the tick labels are not overcrowded.
xs = numpy.array(xs)
dl_mat = numpy.array([xs-i for i in xs])
ri = range(len(xs))
def getInd(r=ri, z=[0]):
primo = r[0]
min_dl=ndx*0.02*2**(primo>10)
if dl_mat[primo].max()<min_dl:
return z
for i in r:
for j in range(len(xs)):
if dl_mat[i,j]>min_dl:
z.append(j)
return getInd(ri[j:], z)
xt = [i if (i in getInd()) else '' for i in range(K)]
pl.xticks(xs[1:], xt[1:], fontsize=10)
pl.yticks(fontsize=10)
#ax = pl.gca()
#for label in ax.get_xticklabels():
# label.set_bbox(dict(fc='w', ec='None', alpha=0.5))
# Remove the abscissa ticks and set up the axes limits.
for tick in ax.get_xticklines():
tick.set_visible(False)
pl.xlim(0, ndx)
min_y *= 1.01
max_y *= 1.01
pl.ylim(min_y, max_y)
for i,j in zip(xs[1:], xt[1:]):
pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')
if ndx>1:
lenticks = len(ax.get_ymajorticklabels()) - 1
if min_y<0: lenticks -= 1
if lenticks < 5:
from matplotlib.ticker import AutoMinorLocator as AML
ax.yaxis.set_minor_locator(AML())
pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)
pl.ylabel(r'$\mathrm{\langle{\frac{ \partial U } { \partial \lambda }}\rangle_{\lambda}\/%s}$' % P.units, fontsize=20, color='#151B54')
pl.annotate('$\mathit{\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')
if not P.software.title()=='Sire':
lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)
for l in lege.legendHandles:
l.set_linewidth(10)
pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))
pl.close(fig)
return
|
[
"def plot(self, kind, det_thrsh=.999, band_conf=.95, det_conf=None,\n methods=None, title=True, filetype='png', alpha=.3, shade=True,\n scale=1., hide_data=False, legend=True, xlim=None, ylim=None,\n rollwindow=2e-26, rollcolor=None, band=True, bestonly=False,\n suffix='', path=g.paths['plots'], bbox='tight', legendfont=18):\n\n methods = methods or self.search_methods\n\n self.log.info('Plotting.')\n # obtain data\n y, kindname = self.pickseries(kind)\n if det_thrsh:\n # obtain fit & noise threshold\n noise, slope, rmse, ytop, ybot, y1side, x_short, rollmean, rollstd\\\n = self.quantify(kind, det_thrsh, det_conf=det_conf,\n band_conf=band_conf, methods=methods,\n rollwindow=rollwindow)\n # find \"best\" method\n maxslope = np.max([slope[m] for m in methods])\n\n # process\n fig, ax = plt.subplots(1)\n for m in methods:\n # plot\n if not hide_data:\n ax.plot(self.hinj, y[m], g.plotcolor[m]+'+', label=m)\n\n if det_thrsh and slope[m] == maxslope: # BUG HERE!\n # set axes limits\n ax.set_xlim(xlim or (0., scale * max(self.hinj)))\n if kind == 'h':\n ylim = ylim or (0., np.max(self.hinj))\n plt.gca().set_ylim(ylim[0], ylim[1])\n else:\n ax.set_ylim(ylim or (0., scale * np.around(y[m].max(), 1)))\n\n #extra features\n switch = not bestonly or slope[m] == maxslope\n\n if det_thrsh and switch:\n # plot noise line\n noise_line = [noise[m]] * self.ninst\n ax.plot(self.hinj, noise_line, color=g.plotcolor[m])\n\n if det_conf and switch:\n det_line = slope[m] * self.hinj + (y1side[m][1] - slope[m] *\n y1side[m][0])\n ax.plot(self.hinj, det_line, color=g.plotcolor[m],\n linestyle='--')\n\n if band_conf is not None and band and switch:\n # plot band lines\n bestfit_line = slope[m] * self.hinj\n ax.plot(self.hinj, bestfit_line, color=g.plotcolor[m],\n alpha=alpha)\n if band_conf:\n topband_line = slope[m] * self.hinj + \\\n (ytop[m][1] - slope[m] * ytop[m][0])\n botband_line = slope[m] * self.hinj +\\\n (ybot[m][1] - slope[m] * ybot[m][0])\n ax.plot(self.hinj, topband_line, color=g.plotcolor[m],\n alpha=alpha)\n ax.plot(self.hinj, botband_line, color=g.plotcolor[m],\n alpha=alpha)\n\n if shade:\n # shade confidence band\n ax.fill_between(self.hinj, botband_line, topband_line,\n color=g.plotcolor[m], alpha=alpha/10,\n where=self.hinj > 0)\n # note the where argument is necessary to close polygon\n\n if det_conf and rollcolor and switch:\n ax.plot(x_short[m], rollmean[m], rollcolor,\n linewidth=2)\n ax.plot(x_short[m], rollmean[m] + rollstd[m],\n rollcolor)\n ax.plot(x_short[m], rollmean[m] - rollstd[m],\n rollcolor)\n\n if shade:\n ax.fill_between(x_short[m], rollmean[m]-rollstd[m],\n rollmean[m] + rollstd[m],\n color=rollcolor, alpha=.3)\n\n # add labels indicating noise threshold and band confidence\n if det_thrsh:\n ax.text(.02, .7, 'Detection threshold: ' + str(det_thrsh),\n fontsize=20, transform=ax.transAxes)\n if band_conf:\n ax.text(.02, .65, 'Band confidence: ' + str(band_conf),\n fontsize=20, transform=ax.transAxes)\n # style\n ax.set_xlabel(r'$h_{\\rm inj}$')\n ax.set_ylabel(kindname)\n if legend:\n ax.legend(numpoints=1, loc=2, prop={'size': legendfont})\n if title:\n ax.set_title('%s injections on %s %s data for %s'\n % (self.injkind, self.det, self.run, self.psr))\n # check destination directory exists\n try:\n os.makedirs(path)\n self.log.debug('Plot directory created.')\n except OSError:\n self.log.debug('Plot directory already exists.')\n # save\n filename = 'injsrch_' + self.det + self.run + '_' + self.injkind +\\\n '_' + self.psr + '_' + kind\n p = path + filename + suffix + '.' + filetype\n fig.savefig(p, bbox_inches=bbox)\n plt.close(fig)\n print 'Figure saved: %r.' % p",
"def investigate4DRepeatability():\n parentdir = '/home/rallured/Dropbox/Interferometer/SolarBFlat/Repeatability/'\n avgs = [1,2,4,8,16,32]\n\n #Temporal with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTiltTemporal*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptemptilt = d[-1]-d[-2]\n figtemptilt = d[-1]\n\n #Dynamic with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTilt_*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = [met.readFlatScript(fi.split('.')[0])[0] for fi in fn]\n #Make progressive averaging plot\n plt.figure('DynamicTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdyntilt = d[-1]-d[-2]\n figdyntilt = d[-1]\n \n #Temporal with fringes nulled\n fn = glob.glob(parentdir+'Nulled/17*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalNulledFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptempnull = d[-1]-d[-2]\n figtempnull = d[-1]\n \n #Dynamic with fringes nulled\n d = pyfits.getdata('/home/rallured/Dropbox/Interferometer/'\n 'SolarBFlat/Repeatability/'\n 'Nulled/170103_Processed.fits')\n rep = np.array([d[i,0]-d[i,1] for i in range(32)])\n #Make progressive averaging plot\n plt.figure('DynamicNulledFigure')\n for i in [0,1,3,7,15,31]:\n f,p = fourier.meanPSD(d[i,0],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(i+1))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdynnull = d[-1][0]-d[-1][1]\n figdynnull = d[-1][0]\n\n #Make comparative repeatability plots with 32 averages\n plt.figure('CompareRepeatability')\n f,p = fourier.meanPSD(repdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(repdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(reptemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(reptempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Repeatability - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make comparative figure plots with 32 averages\n plt.figure('CompareFigure')\n f,p = fourier.meanPSD(figdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(figdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(figtemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(figtempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Figure - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make parroting repeatability plots\n fig = plt.figure('Parroting')\n fig.add_subplot(2,2,1)\n plt.imshow(repdyntilt)\n plt.title('Dynamic Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,2)\n plt.imshow(reptemptilt)\n plt.title('Temporal Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,3)\n res = legendre2d(repdyntilt,xo=3,yo=3)[0]\n plt.imshow(repdyntilt-res)\n plt.title('Dynamic Repeatability Filtered')\n plt.colorbar()\n fig.add_subplot(2,2,4)\n res = legendre2d(reptemptilt,xo=3,yo=3)[0]\n plt.imshow(reptemptilt-res)\n plt.title('Temporal Repeatability Filtered')\n plt.colorbar()",
"def anharm_plot2():\n set_tag(qdt, \"EjdivEc\", log=False)\n set_tag(qdt, \"Ej\", log=False)\n pl=Plotter(fig_width=9.0, fig_height=6.0)\n #qdt.epsinf=qdt.epsinf/3.72\n #qdt.Np=10\n #qdt.Ec=qdt.fq*0.1*h\n print qdt.max_coupling, qdt.coupling_approx\n #flux_o_flux0=qdt.call_func(\"flux_over_flux0\", voltage=yoko)\n #Ej=qdt.call_func(\"Ej\", flux_over_flux0=flux_o_flux0)\n #EjdivEc=Ej/qdt.Ec\n anharm=qdt.call_func(\"anharm\", EjdivEc=EjdivEc)\n anharmp=qdt.call_func(\"lamb_shifted_anharm\", EjdivEc=EjdivEc)\n fq=qdt.call_func(\"fq\", Ej=EjdivEc*qdt.Ec)\n ls_fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n ls_fq2=qdt.call_func(\"lamb_shifted_fq2\", EjdivEc=EjdivEc)\n #pl, pf=line(fq, anharm/h, linewidth=0.5, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\")\n\n pl, pf=line(EjdivEc, anharmp/h/1e9, linewidth=1.0, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\", plotter=pl)\n line(EjdivEc, anharm/h/1e9, linewidth=1.0, color=\"purple\", label=r\"anharm\", plotter=pl)\n\n line(EjdivEc, (ls_fq-fq)/1e9, plotter=pl, color=\"blue\", linewidth=1.0, label=r\"$\\Delta_{1,0}$\")\n E0, E1, E2=qdt.call_func(\"transmon_energy_levels\", EjdivEc=EjdivEc, n_energy=3)\n fq2=(E2-E1)/h\n line(EjdivEc, (ls_fq2-fq2)/1e9, plotter=pl, color=\"red\", linewidth=1.0, label=r\"$\\Delta_{2,1}$\")\n pl.set_ylim(-2, 1.5)\n #pl.set_xlim(0.0, 70)\n pl.xlabel=r\"$E_j/E_c$\"\n pl.ylabel=r\"$\\Delta (GHz)$\"\n #pl.legend(loc='lower right')\n #fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n #line(EjdivEc, fq, plotter=pl, color=\"green\", linewidth=0.5)\n\n #line(EjdivEc, E1p, plotter=pl, color=\"green\", linewidth=0.5)\n #line(EjdivEc, E2p, plotter=pl, color=\"purple\", linewidth=0.5)\n return pl",
"def _plotHHIDetection(peaks, HHI_ls, idx, curr_path):\n\n HHI_df = pd.DataFrame(HHI_ls, columns=[\"HHI\"], index=idx)\n\n peaks[\"avgFilter\"] = np.insert(peaks[\"avgFilter\"], 0, 0)\n peaks[\"stdFilter\"] = np.insert(peaks[\"stdFilter\"], 0, 0)\n HHI_df[\"avgFilter\"] = peaks[\"avgFilter\"][:-1]\n HHI_df[\"stdFilter\"] = peaks[\"stdFilter\"][:-1]\n HHI_df[\"HHI\"].plot(label=\"HHI\", color=colors[0])\n\n HHI_df[\"avgFilter\"].plot(label=\"Moving mean\", figsize=(6.4, 2), color=colors[1], alpha=0.5)\n\n (HHI_df[\"avgFilter\"] + 3 * HHI_df[\"stdFilter\"]).plot(label=\"Upper/lower bound\", color=\"grey\", alpha=0.5)\n (HHI_df[\"avgFilter\"] - 3 * HHI_df[\"stdFilter\"]).plot(color=\"grey\", alpha=0.5)\n plt.legend([\"HHI\", \"Moving mean\", \"upper/lower bound\"])\n\n plt.ylabel(\"HHI index\", fontsize=16)\n plt.ylim([0.1, 0.6])\n # plt.show()\n plt.savefig(curr_path + \"/HHI.png\", bbox_inches=\"tight\", dpi=600)\n plt.close()\n\n signal_df = pd.DataFrame(peaks[\"signals\"], columns=[\"signal\"], index=idx)\n signal_df[\"signal\"].plot(color=\"red\", figsize=(6.4, 2))\n plt.ylabel(\"Signal\", fontsize=16)\n plt.yticks([-1, 0, 1])\n plt.savefig(curr_path + \"/signals.png\", bbox_inches=\"tight\", dpi=600)\n plt.close()",
"def plot_tild_integrands(self):\n fig, ax = plt.subplots()\n lambdas = self.get_lambdas()\n thermal_average, standard_error = self.get_tild_integrands()\n ax.plot(lambdas, thermal_average, marker=\"o\")\n ax.fill_between(\n lambdas,\n thermal_average - standard_error,\n thermal_average + standard_error,\n alpha=0.3,\n )\n ax.set_xlabel(\"Lambda\")\n ax.set_ylabel(\"dF/dLambda\")\n return fig, ax",
"def evidence_tuning_plots(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\",\n x_name=\"Mean Predicted\",\n y_name=\"Empirical Probability\"):\n\n def lineplot(x, y, trials, methods, **kwargs):\n \"\"\"method_lineplot.\n\n Args:\n y:\n methods:\n kwargs:\n \"\"\"\n uniq_methods = set(methods.values)\n method_order = sorted(uniq_methods)\n\n method_new_names = [f\"$\\lambda={i:0.4f}$\" for i in method_order]\n method_df = []\n for method_idx, (method, method_new_name) in enumerate(zip(method_order,\n method_new_names)):\n lines_y = y[methods == method]\n lines_x = x[methods == method]\n for index, (xx, yy,trial) in enumerate(zip(lines_x, lines_y, trials)):\n\n to_append = [{x_name : x,\n y_name: y,\n \"Method\": method_new_name,\n \"Trial\" : trial}\n for i, (x,y) in enumerate(zip(xx,yy))]\n method_df.extend(to_append)\n method_df = pd.DataFrame(method_df)\n x = np.linspace(0,1,100)\n plt.plot(x, x, linestyle='--', color=\"black\")\n sns.lineplot(x=x_name, y=y_name, hue=\"Method\",\n alpha=0.8,\n hue_order=method_new_names, data=method_df,)\n # estimator=None, units = \"Trial\")\n\n df = df.copy()\n # Query methods that have evidence_new_reg_2.0\n df = df[[\"evidence\" in i for i in\n df['method_name']]].reset_index()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n g = sns.FacetGrid(df, col=\"Data\", height=6, sharex = False, sharey = False)\n g.map(lineplot, x_input, y_input, \"trial_number\",\n methods=df[\"Method\"]).add_legend()",
"def makeaplot(events,\n sensitivities,\n hrf_estimates,\n roi_pair,\n fn=True):\n import matplotlib.pyplot as plt\n\n # take the mean and transpose the sensitivities\n sensitivities_stacked = mv.vstack(sensitivities)\n\n if bilateral:\n sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.bilat_ROIs)\n mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)\n else:\n sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),\n sensitivities_stacked.sa.all_ROIs)\n mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)\n\n mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())\n\n # some parameters\n # get the conditions\n block_design = sorted(np.unique(events['trial_type']))\n reorder = [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11]\n block_design = [block_design[i] for i in reorder]\n # end indices to chunk timeseries into runs\n run_startidx = np.array([0, 157, 313, 469])\n run_endidx = np.array([156, 312, 468, 624])\n\n runs = np.unique(mean_sens_transposed.sa.chunks)\n\n for j in range(len(hrf_estimates.fa.bilat_ROIs_str)):\n comparison = hrf_estimates.fa.bilat_ROIs[j][0]\n if (roi_pair[0] in comparison) and (roi_pair[1] in comparison):\n roi_pair_idx = j\n roi_betas_ds = hrf_estimates[:, roi_pair_idx]\n roi_sens_ds = mean_sens_transposed[:, roi_pair_idx]\n\n for run in runs:\n fig, ax = plt.subplots(1, 1, figsize=[18, 10])\n colors = ['#7b241c', '#e74c3c', '#154360', '#3498db', '#145a32', '#27ae60',\n '#9a7d0a', '#f4d03f', '#5b2c6f', '#a569bd', '#616a6b', '#ccd1d1']\n plt.suptitle('Timecourse of sensitivities, {} versus {}, run {}'.format(roi_pair[0],\n roi_pair[1],\n run + 1),\n fontsize='large')\n plt.xlim([0, max(mean_sens_transposed.sa.time_coords)])\n plt.ylim([-5, 7])\n plt.xlabel('Time in sec')\n plt.legend(loc=1)\n plt.grid(True)\n # for each stimulus, plot a color band on top of the plot\n for stimulus in block_design:\n onsets = events[events['trial_type'] == stimulus]['onset'].values\n durations = events[events['trial_type'] == stimulus]['duration'].values\n stimulation_end = np.sum([onsets, durations], axis=0)\n r_height = 1\n color = colors[0]\n y = 6\n\n # get the beta corresponding to the stimulus to later use in label\n beta = roi_betas_ds.samples[hrf_estimates.sa.condition == stimulus.replace(\" \", \"\"), 0]\n\n for i in range(len(onsets)):\n r_width = durations[i]\n x = stimulation_end[i]\n rectangle = plt.Rectangle((x, y),\n r_width,\n r_height,\n fc=color,\n alpha=0.5,\n label='_'*i + stimulus.replace(\" \", \"\") + '(' + str('%.2f' % beta) + ')')\n plt.gca().add_patch(rectangle)\n plt.legend(loc=1)\n del colors[0]\n\n times = roi_sens_ds.sa.time_coords[run_startidx[run]:run_endidx[run]]\n\n ax.plot(times, roi_sens_ds.samples[run_startidx[run]:run_endidx[run]], '-', color='black', lw=1.0)\n glm_model = hrf_estimates.a.model.results_[0.0].predicted[run_startidx[run]:run_endidx[run], roi_pair_idx]\n ax.plot(times, glm_model, '-', color='#7b241c', lw=1.0)\n model_fit = hrf_estimates.a.model.results_[0.0].R2[roi_pair_idx]\n plt.title('R squared: %.2f' % model_fit)\n if fn:\n plt.savefig(results_dir + 'timecourse_localizer_glm_sens_{}_vs_{}_run-{}.svg'.format(roi_pair[0], roi_pair[1], run + 1))",
"def agg_diagnostic_plots(self,a,b,save):\r\n \r\n \r\n #sample_rate =self.filter_class.filter_params[\"sample_rate\"]\r\n \r\n #f=plt.figure(figsize=(12,8))\r\n #for j in range(int(a.shape[1]/2)):\r\n # plt.plot(a[:,(2*j)],a[:,(2*j)+1],lw=3) \r\n # plt.xlim([0,self.filter_class.model_params[\"width\"]])\r\n # plt.ylim([0,self.filter_class.model_params[\"height\"]])\r\n # plt.xlabel(\"Corridor Width\")\r\n # plt.ylabel(\"Corridor Height\")\r\n # plt.title(f\"Agent True Positions\")\r\n\r\n #g = plt.figure(figsize=(12,8))\r\n #for j in range(int(a.shape[1]/2)):\r\n # plt.plot(b[::sample_rate,2*j],b[::sample_rate,(2*j)+1],lw=3) \r\n # plt.xlim([0,self.filter_class.model_params[\"width\"]])\r\n # plt.ylim([0,self.filter_class.model_params[\"height\"]])\r\n # plt.xlabel(\"Corridor Width\")\r\n # plt.ylabel(\"Corridor Height\")\r\n # plt.title(f\"Aggregate KF Predictions\")\r\n \r\n \r\n c,c_index,agent_means,time_means = self.L2s(a,b)\r\n \r\n # h = plt.figure(figsize=(12,8))\r\n #time_means[np.isnan(time_means)]\r\n # plt.plot(c_index,time_means,lw=5,color=\"k\",label=\"Mean Agent L2\")\r\n # for i in range(c.shape[1]):\r\n # plt.plot(c_index,c[:,i],linestyle=\"-.\",lw=3)\r\n # \r\n # plt.axhline(y=0,color=\"k\",ls=\"--\",alpha=0.5)\r\n # plt.xlabel(\"Time (steps)\")\r\n # plt.ylabel(\"L2 Error\")\r\n # plt.legend()\r\n # \"\"\"find agent with highest L2 and plot it.\r\n # mainly done to check something odd isnt happening\"\"\"\r\n # \r\n # index = np.where(agent_means == np.nanmax(agent_means))[0][0]\r\n # print(index)\r\n # a1 = a[:,(2*index):(2*index)+2]\r\n # b1 = b[:,(2*index):(2*index)+2]\r\n # \r\n # i = plt.figure(figsize=(12,8))\r\n # plt.plot(a1[:,0],a1[:,1],label= \"True Path\",lw=3)\r\n # plt.plot(b1[::self.filter_class.sample_rate,0],\r\n # b1[::self.filter_class.sample_rate,1],label = \"KF Prediction\",lw=3)\r\n # plt.legend()\r\n # plt.xlim([0,self.filter_class.model_params[\"width\"]])\r\n # plt.ylim([0,self.filter_class.model_params[\"height\"]])\r\n # plt.xlabel(\"Corridor Width\")\r\n # plt.ylabel(\"Corridor Height\")\r\n #plt.title(\"worst agent\")\r\n\r\n j = plt.figure(figsize=(12,8))\r\n plt.hist(agent_means,density=False,\r\n bins = self.filter_class.model_params[\"pop_total\"],edgecolor=\"k\")\r\n plt.xlabel(\"Agent L2\")\r\n plt.ylabel(f\" {self.filter_class.sample_size} Aggregated Agent Counts\")\r\n # kdeplot(agent_means,color=\"red\",cut=0,lw=4)\r\n \r\n if save:\r\n #f.savefig(self.save_dir+f\"Aggregate_obs.pdf\")\r\n #g.savefig(self.save_dir+f\"Aggregate_kf.pdf\")\r\n #h.savefig(self.save_dir+f\"Aggregate_l2.pdf\")\r\n #i.savefig(self.save_dir+f\"Aggregate_worst.pdf\")\r\n j.savefig(self.save_dir+f\"Aggregate_agent_hist.pdf\")\r\n \r\n return c,time_means",
"def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return",
"def plot_hdus(optlist, hduids, hdulist, ax):\n if optlist.row:\n map_axis = 0 # first axis (y) converts to scalar\n elif optlist.col:\n map_axis = 1 # second axis (x) converts to scalar\n elif optlist.gradient:\n logging.debug('not implemented yet')\n else:\n exit(1)\n # Process each HDU in the list \"hduids\"\n for hduid in hduids:\n hdu = hdulist[hduid]\n try:\n name = hdu.name\n except IndexError as ierr:\n logging.debug('IndexError: %s', ierr)\n logging.debug('using name=%s', hduid)\n name = \"{}\".format(hduid)\n if optlist.bias:\n iu.subtract_bias(optlist.bias, optlist.btype, hdu)\n (datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)\n slices = [] # define regions to plot\n for reg in optlist.row or optlist.col:\n logging.debug('processing %s', reg)\n if re.match(r\"data\", reg):\n slice_spec = datasec\n elif re.match(r\"over\", reg):\n slice_spec = soscan\n elif re.match(r\"pover\", reg):\n slice_spec = poscan\n else:\n slice_spec = iu.parse_region(reg)\n if slice_spec is not (None, None):\n slices.append(slice_spec)\n else:\n logging.error('skipping region %s', reg)\n for slice_spec in slices:\n logging.debug('calling line_plot() %s[%s]', name, reg)\n line_plot(slice_spec, hdu.data, optlist.ltype, optlist.steps,\n optlist.offset, map_axis, name, ax)\n ncalls()",
"def lambda_Param_Scan_visualization(lambds,method, mse_tr0):\n plt.semilogx(lambds, mse_tr0, marker=\".\", color='b', label='train error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"mse\")\n plt.title(\"Penalty term parameter_scan for \"+method)\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(method+\"lambda\")",
"def plotpeak(steps=[0,1,2], n=20, ipb=1, iph=1):\n # -- binning figures\n plt.figure(1); ax1=plt.gca()\n plt.figure(2); ax2=plt.gca()\n\n # -- average figures\n plt.figure(3); ax3=plt.gca()\n# plt.figure(4); ax4=plt.gca()\n\n a1 = []\n a2 = []\n for i in xrange(len(steps)):\n wgt1, phi1, wgt2, phi2, avg1, avg2 \\\n = arcscanout(istep=steps[i], ipb=ipb, iph=iph)\n a1.append(avg1)\n a2.append(avg2)\n\n # -- detector 1\n p0s, wbar = binning(n=n, phi=phi1, wgt=wgt1)\n p0s = p0s[::-1]\n wbar = wbar[::-1]\n ax1.plot(p0s, wbar, '-o', label='%i'%steps[i])\n\n # -- detector 2\n p0s, wbar = binning(n=n, phi=phi2, wgt=wgt2)\n p0s = p0s[::-1]\n wbar = wbar[::-1]\n ax2.plot(p0s, wbar, '-o', label='%i'%steps[i])\n\n\n ax3.plot(steps, a1, '-o', mfc='None', mec='red')\n ax3.plot(steps, a2, '-x', mec='blue')\n\n ax1.set_xlabel(r'$\\theta^{B}$', dict(fontsize=25))\n ax2.set_xlabel(r'$\\theta^{B}$', dict(fontsize=25))\n ax3.set_xlabel(r'steps', dict(fontsize=30))\n ax1.set_ylabel('Intensity')\n ax2.set_ylabel('Intensity')\n ax3.set_ylabel('Intensity')\n\n# ax1.legend(loc='best', fancybox=True).get_frame().set_alpha(0.5)\n# ax2.legend(loc='best', fancybox=True).get_frame().set_alpha(0.5)\n# ax3.legend(loc='best', fancybox=True).get_frame().set_alpha(0.5)\n\n plt.show()\n\n # Below is for effective modulus and stress factor",
"def plot_hill_func(self,sim_run=None,trace=0,synapse=0,average=False):\n\n if sim_run is None:\n sim_run = self.default_runs[0]\n\n cav_hits = sim_run.data[\"Ca_t\"][:,trace,synapse]\n\n p_v_func = hill(np.arange(200)/100.,S=1,ec50=sim_run.params[\"ca_ec50\"],n=sim_run.params[\"ca_coop\"])\n plt.plot(np.arange(200)/100.,p_v_func)\n for i in range(len(cav_hits)):\n plt.plot((cav_hits[i],cav_hits[i]),(0,1))\n plt.ylabel('Probbility of Vesicle Release')\n plt.xlabel('Calcium Concentration (arb. units)')\n plt.title('Location of [Ca] response on Hill Function for sequential APs')\n plt.show()",
"def view_datashade(self):\n # Select only sufficient data\n if self.x in self.y:\n self.y.remove(self.x)\n if self.y == []:\n return self.gif\n\n df = self.dataframe[[self.x] + self.y].copy()\n plot_opts = {\n 'Scatter': {'color': self.color_key, 'marker': self.marker_keys, 'size':10},\n 'Curve': {'color': self.color_key}\n }\n lines_overlay = df.hvplot.scatter(**self.plot_options).options(plot_opts)\n\n def hover_curve(x_range=[df.index.min(), df.index.max()]): # , y_range):\n # Compute\n dataframe = df.copy()\n if x_range is not None:\n dataframe = dataframe[(dataframe[self.x] > x_range[0]) & (dataframe[self.x] < x_range[1])]\n data_length = len(dataframe) * len(dataframe.columns)\n step = 1 if data_length < self.max_step else data_length // self.max_step\n \n plot_df = dataframe[::step].hvplot.line(**self.plot_options) * \\\n dataframe[::step*60].hvplot.scatter(**self.plot_options) \n plot_opts = {\n 'Scatter': {'color': 'k', 'marker': self.marker_keys, 'size':10},\n 'Curve': {'color': self.color_key}\n }\n if len(self.y) != 1:\n plot_opts['Scatter']['color'] = self.color_key\n return plot_df.options(plot_opts)\n\n # Define a RangeXY stream linked to the image\n rangex = hv.streams.RangeX(source=lines_overlay)\n data_shade_plot = hv.DynamicMap(hover_curve, streams=[rangex])\n if len(self.y) == 1:\n data_shade_plot *= datashade(lines_overlay)\n else:\n data_shade_plot *= datashade(lines_overlay, aggregator=ds.count_cat('Variable'))\n return pn.panel(data_shade_plot)",
"def anharm_plot():\n set_tag(qdt, \"EjdivEc\", log=False)\n set_tag(qdt, \"Ej\", log=False)\n\n #qdt.epsinf=qdt.epsinf/3.72\n #qdt.Np=10\n #qdt.Ec=qdt.fq*0.1*h\n print qdt.max_coupling, qdt.coupling_approx\n anharm=qdt.call_func(\"anharm\", EjdivEc=EjdivEc)\n anharmp=qdt.call_func(\"lamb_shifted_anharm\", EjdivEc=EjdivEc)\n fq=qdt.call_func(\"fq\", Ej=EjdivEc*qdt.Ec)\n ls_fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n ls_fq2=qdt.call_func(\"lamb_shifted_fq2\", EjdivEc=EjdivEc)\n\n pl, pf=line(fq/qdt.f0, (anharmp/h-anharm/h)/(2.0*qdt.max_coupling), linewidth=0.5, color=\"black\", label=r\"$\\Delta_{2,1}-\\Delta_{1,0}$\")\n line(fq/qdt.f0, (ls_fq-fq)/(2.0*qdt.max_coupling), plotter=pl, color=\"blue\", linewidth=0.5, label=r\"$\\Delta_{1,0}$\")\n E0, E1, E2=qdt.call_func(\"transmon_energy_levels\", EjdivEc=EjdivEc, n_energy=3)\n fq2=(E2-E1)/h\n line(fq/qdt.f0, (ls_fq2-fq2)/(2.0*qdt.max_coupling), plotter=pl, color=\"red\", linewidth=0.5, label=r\"$\\Delta_{2,1}$\")\n pl.set_ylim(-1.0, 0.6)\n pl.set_xlim(0.7, 1.3)\n pl.xlabel=r\"$f_{10}/f_{IDT}$\"\n pl.ylabel=r\"$\\Delta/\\Gamma_{10}^{MAX}$\"\n pl.legend(loc='lower left')\n #fq=qdt.call_func(\"lamb_shifted_fq\", EjdivEc=EjdivEc)\n #line(EjdivEc, fq, plotter=pl, color=\"green\", linewidth=0.5)\n\n #line(EjdivEc, E1p, plotter=pl, color=\"green\", linewidth=0.5)\n #line(EjdivEc, E2p, plotter=pl, color=\"purple\", linewidth=0.5)\n return pl",
"def density_scatterplot(GCAL_area_pwds, L_area_pwds, Kaschube_JSON, central_hc_area, show_axes=False,\n lw=3, s=60, half_width=0.16, regression=True, avgfn = np.median):\n fig = plt.figure(figsize=(8,6))\n ax = plt.subplot(111)\n max_pwds, min_pwds = [],[]\n\n plt.axhline(y=math.pi, linestyle='dotted')\n d = json.load(open(Kaschube_JSON,'r'))\n\n fcolor = (0,0,0,0)\n (color_f, color_s, color_g) = ('#27833a','#7fcbf1', '#f9ab27')\n plt.scatter(d['F']['x'], d['F']['y'], marker='D', edgecolor=color_f, facecolor=fcolor, s=s, lw=lw)\n plt.scatter(d['S']['x'], d['S']['y'], marker='D', edgecolor=color_s, facecolor=fcolor, s=s, lw=lw)\n plt.scatter(d['G']['x'], d['G']['y'], marker='D', edgecolor=color_g, facecolor=fcolor, s=s, lw=lw)\n\n F_medx, F_medy = np.median(d['F']['x']), np.median(d['F']['y'])\n S_medx, S_medy = np.median(d['S']['x']), np.median(d['S']['y'])\n G_medx, G_medy = np.median(d['G']['x']), np.median(d['G']['y'])\n\n plt.hlines(F_medy, F_medx-half_width, F_medx+half_width,colors=color_f, linestyles='solid', lw=lw)\n plt.hlines(S_medy, S_medx-half_width, S_medx+half_width,colors=color_s, linestyles='solid', lw=lw)\n plt.hlines(G_medy, G_medx-half_width, G_medx+half_width,colors=color_g, linestyles='solid', lw=lw)\n\n for (area_pwds, color, marker) in [(GCAL_area_pwds,'r','o'), (L_area_pwds,'b','o')]:\n hc_area_ordered = sorted(area_pwds)\n pwds = [pwd for (_, pwd) in hc_area_ordered]\n hc_areas = [hc for (hc, pwd) in hc_area_ordered]\n\n scaled_hc_areas = np.array([(central_hc_area/ np.mean(hc_areas))*hc_area for hc_area in hc_areas])\n plt.scatter(scaled_hc_areas, np.array(pwds), marker=marker,\n edgecolor=color, facecolor=(0,0,0,0), s=s,lw=lw)\n\n plt.hlines(avgfn(pwds), np.median(scaled_hc_areas)-half_width,\n np.median(scaled_hc_areas)+half_width,\n colors=color, linestyles='solid', lw=lw)\n\n if regression:\n from scipy import stats\n slope, intercept, r_value, p_value, std_err = stats.linregress(scaled_hc_areas,pwds)\n samples = np.linspace(0,1.1,100)\n plt.plot(samples, slope*samples + intercept, 'r--')\n print 'R value: %f' % r_value\n\n if not show_axes:\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n [spine.set_visible(False) for spine in ax.spines.values()]\n\n plt.ylim((0, 12))#15\n plt.xlim((0, 1.1))\n return fig",
"def plot(self, observable, plot_id, index=0, path=None, **kwargs):\n self.plot_id = plot_id\n self.figure = None\n self.axis = None\n # TODO : THIS LINE IS SO PROBLEMATIC...\n # self.plots = {}\n if observable not in self.observables:\n print(f\"Observable not found. Available ones are : {self.observables}\")\n return\n\n if plot_id not in self.plots:\n self.plots[plot_id] = plt.subplots(figsize=(16, 12), sharex='all')\n self.figure = self.plots[plot_id][0]\n self.axis = self.plots[plot_id][1]\n\n df = self.index\n protein = df.iloc[index][\"Protein\"]\n ionic_strength = df.iloc[index][\"I\"]\n eps = df.iloc[index][\"Eps\"]\n ls = df.iloc[index][\"Scale\"]\n\n self.protein = protein\n # if os.path.exists(os.path.join(os.path.join(definitions.hps_data_dir, 'sequences'), f'{self.protein}.seq')):\n # with open(os.path.join(os.path.join(definitions.hps_data_dir, 'sequences'), f'{self.protein}.seq')) as f:\n # self.sequence = f.readlines()[0]\n # else:\n # with open(os.path.join(os.path.join(definitions.hps_data_dir, 'sequences'), f'CPEB4.seq')) as f:\n # self.sequence = f.readlines()[0]\n\n def_label = f'{protein}, I = {ionic_strength:.0f}, ε = {eps:.0f} HPS = {ls:.1f}'\n self.label = kwargs.get(\"label\", def_label)\n self.style = kwargs[\"style\"] if \"style\" in kwargs else '-'\n self.color = kwargs[\"color\"] if \"color\" in kwargs else None\n\n # fout = f'{ls:.1f}ls-{ionic_strength:.0f}I-{eps:.0f}e'\n # d = os.path.join(self.oliba_data_dir, observable, protein, fout)\n # if os.path.exists(d) and self.force_recalc is False:\n # print(\"Requested data already available\")\n # self.obs_data = np.genfromtxt(os.path.join(d, 'data.txt'))\n\n if path:\n self.o_wd = path\n else:\n self.o_wd = df.iloc[index][\"FullPath\"]\n # TODO BETTER WAY FOR THIS ???\n # TODO : Maybe pass metaobject to plot ?\n equil_frames = kwargs.get('equil_frames', 300)\n super().__init__(oliba_wd=self.o_wd, equil_frames=equil_frames)\n # TODO : TOO LATE TO DO THIS ?\n self.temperatures = self.get_temperatures()\n\n if observable == 'rg':\n data = self.plot_rg(**kwargs)\n self.gyrations[protein] = data\n if observable == 'distance_map' or observable == 'contact_map':\n self.figure2, self.axis2 = plt.subplots(figsize=(16, 12), sharex='all')\n data = self.plot_distance_map(**kwargs)[0]\n self.contact_maps[protein] = data\n self.axis2.legend(fontsize=self.label_fsize)\n self.axis2.xaxis.set_tick_params(labelsize=self.ticks_fsize)\n self.axis2.yaxis.set_tick_params(labelsize=self.ticks_fsize)\n if observable == 'dij':\n data = self.plot_dijs(plot_flory_fit=True, plot_ideal_fit=False, **kwargs)[1]\n self.dijs[protein] = data\n if observable == 'flory':\n data = self.plot_flory(**kwargs)\n if observable == 'inter_r':\n data = self.plot_distance_between_chains(**kwargs)\n if observable == 'charge':\n data = self.plot_q_distr(**kwargs)\n if observable == 'rg_distr':\n data = self.plot_rg_distr(**kwargs)\n if observable == 'phase_diagram':\n first = kwargs.get('first',0)\n last = kwargs.get('last',None)\n data = self.plot_phase_diagram(first=first, last=last)\n if observable == 'clusters':\n data = self.plot_clusters(**kwargs)\n if observable == 'rho':\n temperature = kwargs[\"temperature\"] if \"temperature\" in kwargs else 0\n data = self.plot_density_profile(T=temperature)\n if self.label != def_label:\n self.axis.legend(fontsize=self.label_fsize)\n self.axis.xaxis.set_tick_params(labelsize=self.ticks_fsize)\n self.axis.yaxis.set_tick_params(labelsize=self.ticks_fsize)\n # if self.obs_data is None:\n # pathlib.Path(d).mkdir(parents=True, exist_ok=True)\n # np.savetxt(os.path.join(d, 'data.txt'), data)\n self.figure.savefig(os.path.join(self.this, f'temp/plot_{plot_id}.png'))\n return data",
"def dtw_plot_appendix(output = 'output/img/series/dtw'):\n # regions\n regs = _src.regions()\n regsL = [(v) for v in regs.values()]\n \n # iterate\n for r1 in range(len(regs)):\n for r2 in range(r1+1, len(regs)):\n dtw_plot(regsL[r1]['NUTS3'], regsL[r2]['NUTS3'],\n output = output, font_size = 12)",
"def plot_EDR_dVR(table):\n fig = plt.figure(figsize=(24, 10), dpi=300)\n # For plotting the size: y = 0 x = 50, \n coefa = (50/(table.init_alt.values.min() - table.init_alt.values.max()))\n coefb = 75 - (coefa * table.init_alt.values.min())\n # -- 1\n ax1 = fig.add_subplot(121)\n scatter = plt.scatter(x = table.dist_1.values, \n y = table.turbulence.values, \n marker = 'o', \n s = size_pt(table.init_alt.values,table.init_alt.values.min(), table.init_alt.values.max())*20,\n c = np.log10(table.area.values),\n cmap = 'viridis', vmin = 0, vmax = 3, alpha = 0.65,\n edgecolor='dimgrey')\n ax1.axhline(0, color = 'k', linestyle = 'dotted')\n\n legend_elements = [Line2D([0], [0], color='dimgrey',lw=0,linestyle = None,marker = 'o',\n markersize = np.sqrt(size_pt(5000,table.init_alt.values.min(), table.init_alt.values.max())*20),\n label='5 km'),\n Line2D([0], [0], color='dimgrey',lw=0,linestyle = None,marker = 'o',\n markersize = np.sqrt(size_pt(9000,table.init_alt.values.min(), table.init_alt.values.max())*20),\n label='9 km'),\n Line2D([0], [0], color='dimgrey',lw=0,linestyle = None,marker = 'o',\n markersize = np.sqrt(size_pt(13000,table.init_alt.values.min(), table.init_alt.values.max())*20),\n label='13 km')]\n legend = plt.legend(handles = legend_elements, ncol = 1,loc = 'upper right',\n labelspacing = 2,fontsize = 12, title='Altitude',\n framealpha = 0, borderpad = 0.7)\n legend.set_title('Altitude',prop={'size':14})\n\n cbar = plt.colorbar(label='$log_{10}$ Flash Area ($km^2$)', pad = 0.01)\n cbar.set_label('$log_{10}$ Flash Area ($km^2$)', fontsize = 15)\n cbar.ax.tick_params(labelsize = 13)\n\n plt.xticks(fontsize = 14)\n plt.yticks(fontsize = 14)\n plt.xlabel('Distance from the flash initiation point to the interception point on the RHI scan (m)', fontsize = 14)\n plt.ylabel('EDR $(m^2 s^{-3}) ^{1/3}$', fontsize = 14)\n\n # -- 2\n ax2 = fig.add_subplot(122)\n plt.scatter(x = table.dist_1.values[np.where(table.area.values>0)], \n y = table.dVRde.values[np.where(table.area.values>0)], \n marker = 'o', \n s = size_pt(table.init_alt.values,table.init_alt.values.min(), table.init_alt.values.max())*20,\n c = np.log10(table.area.values[np.where(table.area.values>0)]),\n cmap='Blues_r',vmin=0, vmax = 3 ,alpha = 0.55, edgecolor='dimgrey')\n\n plt.scatter(x = table.dist_1.values[np.where(table.area.values>0)], \n y = table.dVRdr.values[np.where(table.area.values>0)], \n marker = 'o', \n s = size_pt(table.init_alt.values,table.init_alt.values.min(), table.init_alt.values.max())*20,\n c = np.log10(table.area.values[np.where(table.area.values>0)]), \n cmap = 'Reds_r', alpha=0.55, vmin = 0, vmax =3, edgecolor='dimgrey')\n\n ax2.axhline(0, color = 'k', linestyle = 'dotted')\n\n\n legend_elements1 = [Line2D([0], [0], color='dimgrey',lw=0,linestyle = None,marker = 'o',\n markersize = np.sqrt(size_pt(5000,table.init_alt.values.min(), table.init_alt.values.max())*20),\n label='5 km'),\n Line2D([0], [0], color='dimgrey',lw=0,linestyle = None,marker = 'o',\n markersize = np.sqrt(size_pt(9000,table.init_alt.values.min(), table.init_alt.values.max())*20),\n label='9 km'),\n Line2D([0], [0], color='dimgrey',lw=0,linestyle = None,marker = 'o',\n markersize = np.sqrt(size_pt(13000,table.init_alt.values.min(), table.init_alt.values.max())*20),\n label='13 km')]\n legend1 = plt.legend(handles=legend_elements1, ncol = 1, loc = 'upper right',\n labelspacing=2,fontsize = 12, title='Altitude',\n framealpha = 0, borderpad=0.7)\n legend1.set_title('Altitude',prop={'size':14})\n plt.gca().add_artist(legend1)\n\n legend_elements2 = [Line2D([0], [0], color='blue',lw=0,linestyle = None,marker = 'o',\n markersize = 14, alpha=0.55,\n label = r'$\\frac{1}{r} \\frac{\\partial V_R}{\\partial \\hat{\\Phi}}$'),\n Line2D([0], [0], color='red',lw=0,linestyle = None,marker = 'o',\n markersize = 14,alpha=0.55,\n label = r'$\\frac{\\partial V_R}{\\partial \\hat{r}}$')]\n legend2 = plt.legend(handles=legend_elements2, ncol = 1, loc = 'lower right',\n labelspacing=0.7,fontsize = 20, \n framealpha = 0, borderpad=0.7)\n\n cmap = plt.get_cmap('Greys_r',100)\n norm = mpl.colors.Normalize(vmin=0,vmax=3)\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n sm.set_array([])\n cb = plt.colorbar(sm, label='$log_{10}$ Flash Area ($km^2$)',pad=0.01)\n cb.set_label('$log_{10}$ Flash Area ($km^2$)', size=15)\n cb.ax.tick_params(labelsize=13)\n\n plt.xlabel('Distance from the flash initiation point to the interception point on the RHI scan (m)', fontsize = 14)\n plt.ylabel('Velocity derivative in space ($s^{-1}$)', fontsize = 14)\n plt.xticks(fontsize = 14)\n plt.yticks(fontsize = 14)\n plt.ylim(-0.2, 0.2)\n \n td1 = fig.text(0.1274, 0.8898, \"a\", \n fontsize = 18,fontweight = 'bold')\n\n td2 = fig.text(0.55, 0.8898, \"b\", \n fontsize = 18,fontweight = 'bold')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Searches for winning sequence in columns.
|
def check_columns(self, win: list) -> bool:
for row in range(self.size):
column = [self.tags[x][row] for x in range(self.size)]
for j in range(len(column) - len(win) + 1):
if win == column[j:j+self.win_condition]:
return True
|
[
"def check_columns():\n global game_still_going\n # Check if any of the rows have all the same value.\n column1 = board[0] == board[3] == board[6] != '_'\n column2 = board[1] == board[4] == board[7] != '_'\n column3 = board[2] == board[5] == board[8] != '_'\n # If any column does have a match, then game still going to False.\n if column1 or column2 or column3:\n game_still_going = False\n # Return winner 'X' or 'O'.\n if column1:\n return board[0]\n if column2:\n return board[1]\n if column3:\n return board[2]",
"def win_column(playerid):\n\n if board[0][0] is playerid and board[1][0] is playerid and board[2][0] is playerid:\n return (True, \"Column 1\")\n\n if board[0][1] is playerid and board[1][1] is playerid and board[2][1] is playerid:\n return (True, \"Column 2\")\n\n if board[0][2] is playerid and board[1][2] is playerid and board[2][2] is playerid:\n return (True, \"Column 3\")\n\n return False",
"def check_col_win(board, symb):\r\n for i in range(3):\r\n column = []\r\n # Creating column:\r\n for j in range(3):\r\n column.append(board.data[j][i])\r\n # Checking:\r\n if to_bool(symb) in column and (to_bool(opposite(symb)) not in column) and \\\r\n (None not in column):\r\n return True\r\n return False",
"def check_win_column(board, max_rows, max_cols, num_connect, col, player):\r\n adjacent = 0\r\n col_top = 0\r\n while col_top < max_cols:\r\n piece = get_piece(board, col_top, col)\r\n if piece == player:\r\n adjacent += 1\r\n else:\r\n adjacent = 0\r\n if adjacent >= num_connect:\r\n return True\r\n col_top += 1\r\n return False",
"def is_winner_column(table, column):\n column = tuple((line[column] for line in table))\n if column == WINNER_P1 or column == WINNER_P2:\n return True\n return False",
"def checkColumn(cur_player,pos):\n i=0\n for row in range(3):\n if positions[row][pos]==cur_player:\n i+=1\n if i==3:\n return True\n else:\n return False",
"def column_similarity (self, row, col):\n my_number = self.board[row][col]\n \n for i in range (9):\n if (i,col) == (row,col):\n continue\n elif self.board[i][col] == my_number:\n return [i, col, False] \n else:\n continue",
"def find_column_index(self, columns):\n for i in range(len(columns)):\n if self.match(columns[i]):\n return i\n return None",
"def horizontalSeq(row, col):\n count = 0\n for colIndex in range(col, BOARD_COLS):\n if board[row][colIndex] == board[row][col]:\n count += 1\n else:\n break\n if count >= length:\n if col >= 1 and col + length < BOARD_ROWS:\n if board[row][col-1] == 0 and board[row][col+length] == 0:\n return 3\n elif board[row][col-1] == 0 or board[row][col+length] == 0:\n return 2\n else:\n return 1\n return 1\n else:\n return 0",
"def check_if_there_is_win(self):\n # The function goes along all the sequences in the game\n for sequence in self.all_possible_sequences_in_game:\n winning_sequence_was_found = True\n for i in range(1, len(sequence)):\n # Checks if anyone of the slots is empty.\n # If one was founded, it changes the flag to False.\n if self.board[sequence[i][0]][sequence[i][1]] == self.EMPTY_BLOCK:\n winning_sequence_was_found = False\n break\n # Checks if all slots has equal value.\n # If they don't, it changes the flag to False.\n if self.board[sequence[i][0]][sequence[i][1]] != \\\n self.board[sequence[i - 1][0]][sequence[i - 1][1]]:\n winning_sequence_was_found = False\n break\n # If the flag is still True\n # it means all slots aren't empty and equal the function returns\n # the winner and the sequence of slots that 'won' the game.\n if winning_sequence_was_found:\n return self.board[sequence[0][0]][sequence[0][1]], sequence\n\n return False",
"def check_win(board):\n\n is_winner = False\n\n vertical_list_first_column = []\n vertical_list_second_column = []\n vertical_list_third_column = []\n column_list = [vertical_list_first_column, vertical_list_second_column, vertical_list_third_column]\n\n cross_from_left = [board[0][0], board[1][1], board[2][2]]\n cross_from_right = [board[0][2], board[1][1], board[2][0]]\n \n #Check win horizontally \n for row in board:\n vertical_list_first_column.append(row[0])\n vertical_list_second_column.append(row[1])\n vertical_list_third_column.append(row[2])\n \n if len(set(row)) == 1 and list(set(row))[0] == PLAYER1:\n is_winner = True\n return is_winner, PLAYER1\n\n elif len(set(row)) == 1 and set(row) == PLAYER2:\n is_winner = True\n return is_winner, PLAYER2\n\n #Check win vertical\n for col in column_list:\n if len(set(col)) == 1 and list(set(col))[0] == PLAYER1:\n is_winner = True\n return is_winner, PLAYER1\n\n elif len(set(col)) == 1 and set(col) == PLAYER2:\n is_winner = True\n return is_winner, PLAYER2\n\n #Check win from left cross\n if len(set(cross_from_left)) == 1 and list(set(cross_from_left))[0] == PLAYER1:\n is_winner = True \n return is_winner, PLAYER1\n elif len(set(cross_from_left)) == 1 and list(set(cross_from_left))[0] == PLAYER2:\n is_winner = True\n return is_winner, PLAYER2\n\n #Check win from right cross\n if len(set(cross_from_right)) == 1 and list(set(cross_from_right))[0] == PLAYER1:\n is_winner = True\n return is_winner, PLAYER1\n elif len(set(cross_from_right)) == 1 and list(set(cross_from_right))[0] == PLAYER2:\n is_winner = True\n return is_winner, PLAYER2\n\n return is_winner, None",
"def test_go_for_win_column(self):\n state = [0,0,0,C,0,0,C,0,0]\n self.assertEquals(botMove(state),0)\n\n state = [C,0,0,0,0,0,C,0,0]\n self.assertEquals(botMove(state),3)\n\n state = [C,0,0,C,0,0,0,0,0]\n self.assertEquals(botMove(state),6)\n\n state = [0,0,0,0,C,0,0,C,0]\n self.assertEquals(botMove(state),1)\n\n state = [0,C,0,0,0,0,0,C,0]\n self.assertEquals(botMove(state),4)\n\n state = [0,C,0,0,C,0,0,0,0]\n self.assertEquals(botMove(state),7)\n\n state = [0,0,0,0,0,C,0,0,C]\n self.assertEquals(botMove(state),2)\n\n state = [0,0,C,0,0,0,0,0,C]\n self.assertEquals(botMove(state),5)\n\n state = [0,0,C,0,0,C,0,0,0]\n self.assertEquals(botMove(state),8)",
"def _check_winner_in_sequence(self, sequence):\n current_streak = 0\n current_player = None\n for token in sequence:\n if token is not None and token == current_player:\n current_streak += 1\n if current_streak >= 4:\n return current_player\n else:\n current_streak = 1\n current_player = token",
"def winning_move(board):\n for row in WINNING_POSITIONS:\n if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:\n return True\n\n return False",
"def searchcols(self,fctn,cols,*args):\n goodkeys=[]\n for key in self.allrowkeys:\n temp=[]\n for c in cols:\n temp.append(self.getentry(key,c))\n for i in range(len(args)):\n temp.append(args[i])\n\n if fctn(*tuple(temp)):\n goodkeys.append(key)\n return(goodkeys)",
"def look_through_rows(board, column, player):\n if board.shape[1] > column:\n count = board.shape[0] - 1\n count2 = 1\n while count >= 0 and count2 == 1:\n if board[count,column] == 0:\n board[count,column] = player\n count2 = count2 - 1\n else:\n count = count - 1\n return board\n else:\n print('Improper Column Given')",
"def winner(self):\n for i in range(self.height):\n for j in range(self.width):\n for getter_function in (self.get_row, self.get_column, self.get_diagonal_lowleft_to_upright, self.get_diagonal_upleft_to_lowright):\n try:\n line, positions = getter_function(i,j)\n except IndexError:\n continue\n if abs(line.sum()) == 5:\n return line[0], positions\n return None, []",
"def same_col(i, j):\n return (i - j) % 9 == 0",
"def _scan_row(self, row, start_col, end_col):\n for col in range(start_col, end_col):\n if (self._text_maze[row, col] != self._wall_char\n or self._covered[row, col]):\n return col\n return end_col"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check for winning sequence in all possible diagonals that are at least as long as winning condition.
|
def check_diagonals(self, win: list) -> bool:
for i in range(self.size - self.win_condition + 1):
# [x x ]
# [ x x ]
# [ x x]
# [ x]
diagonal = []
x = i
y = 0
for j in range(self.size - i):
diagonal.append(self.tags[x][y])
x += 1
y += 1
for j in range(len(diagonal) - len(win) + 1):
if win == diagonal[j:j + self.win_condition]:
return True
# [x ]
# [x x ]
# [ x x ]
# [ x x]
diagonal = []
x = 0
y = i
for j in range(self.size - i):
diagonal.append(self.tags[x][y])
x += 1
y += 1
for j in range(len(diagonal) - len(win) + 1):
if win == diagonal[j:j + self.win_condition]:
return True
# [ x x]
# [ x x ]
# [x x ]
# [x ]
diagonal = []
x = self.size - 1 - i
y = 0
for j in range(self.size - i):
diagonal.append(self.tags[x][y])
x -= 1
y += 1
for j in range(len(diagonal) - len(win) + 1):
if win == diagonal[j:j + self.win_condition]:
return True
# [ x]
# [ x x]
# [ x x ]
# [x x ]
diagonal = []
x = self.size - 1
y = 0 + i
for j in range(self.size - i):
diagonal.append(self.tags[x][y])
x -= 1
y += 1
for j in range(len(diagonal) - len(win) + 1):
if win == diagonal[j:j + self.win_condition]:
return True
|
[
"def __checkDiagonalsRL(self):\n for i in range(self.board.cols - self.winCondition, self.board.cols - 1):\n counter = [0, 0]\n for j in range(i + 1):\n position = (j * self.board.cols) + i - j\n counter = self.check(position, counter)\n if counter[1] == self.winCondition:\n return True\n\n for m in range(self.board.rows - self.winCondition + 1):\n counter = [0, 0]\n for n in range(self.board.rows - m):\n position = ((m + n + 1) * self.board.cols) - n - 1\n counter = self.check(position, counter)\n if counter[1] == self.winCondition:\n return True\n\n return False",
"def __checkDiagonalsLR(self):\n for i in range(1, self.board.cols - self.winCondition + 1):\n counter = [0, 0]\n for j in range(self.board.cols - i):\n position = (j * self.board.cols) + i + j\n counter = self.check(position, counter)\n if counter[1] == self.winCondition:\n return True\n\n for m in range(self.board.rows - self.winCondition + 1):\n counter = [0, 0]\n for n in range(self.board.rows - m):\n position = ((m + n) * self.board.cols) + n\n counter = self.check(position, counter)\n if counter[1] == self.winCondition:\n return True\n\n return False",
"def check_wins(self):\n trace = np.sum(np.diag(self.board))\n antitrace = np.sum(np.diag(np.flipud(self.board)))\n all_sums = np.concatenate((np.sum(self.board, 0), np.sum(self.board, 1), [trace], [antitrace]))\n if self.crosses * self.win_len in all_sums:\n return 1\n elif self.naughts * self.win_len in all_sums:\n return -1\n elif np.prod(self.board) != 0:\n return -2\n else:\n return 0",
"def is_winning(self, curr_state):\n rows = [[0,1,2], [3,4,5], [6,7,8]]\n columns = [[0,3,6], [1,4,7], [2,5,8]]\n diagonal = [[0,4,8], [2,4,6]]\n total_checks = rows + columns + diagonal\n for row in total_checks:\n sum = 0\n count = 0\n for pos in row:\n if np.isnan(curr_state[pos]):\n break\n else:\n sum = sum + curr_state[pos]\n count = count + 1\n if sum == 15 and count == 3:\n return True\n return False",
"def diag_win(board):\n\tif board[1][1] != EMPTY and (board[1][1] == board[0][2] == board[2][0] or board[1][1] == board[0][0] == board[2][2]):\n\t\treturn True\n\treturn False",
"def game_win(success_num, list, x, y):\r\n length1, length2, total = 0, 0, 0 # judge if win horizontally\r\n for i in range(1, success_num):\r\n if (x - i) >= 0 and (x - i, y) in list:\r\n length1 += 1\r\n else:\r\n break\r\n for i in range(1, success_num):\r\n if (x + i) <= 14 and (x + i, y) in list:\r\n length2 += 1\r\n else:\r\n break\r\n total = length1 + length2 + 1\r\n if (total >= success_num):\r\n return True\r\n\r\n length1, length2, total = 0, 0, 0 # judge if win vertically\r\n for i in range(1, success_num):\r\n if (y - i) >= 0 and (x, y - i) in list:\r\n length1 += 1\r\n else:\r\n break\r\n for i in range(1, success_num):\r\n if (y + i) <= 14 and (x, y + i) in list:\r\n length2 += 1\r\n else:\r\n break\r\n total = length1 + length2 + 1\r\n if (total >= success_num):\r\n return True\r\n\r\n length1, length2, total = 0, 0, 0 # judge if win diagonally in Second and fourth quadrants\r\n for i in range(1, success_num):\r\n if (x - i) >= 0 and (y - i) >= 0 and (x - i, y - i) in list:\r\n length1 += 1\r\n else:\r\n break\r\n for i in range(1, success_num):\r\n if (x + i) <= 14 and (y + i) <= 14 and (x + i, y + i) in list:\r\n length2 += 1\r\n else:\r\n break\r\n total = length1 + length2 + 1\r\n if (total >= success_num):\r\n return True\r\n\r\n length1, length2, total = 0, 0, 0 # judge if win diagonally in First and Third quadrants\r\n for i in range(1, success_num):\r\n if (x - i) >= 0 and (y + i) <= 14 and (x - i, y + i) in list:\r\n length1 += 1\r\n else:\r\n break\r\n for i in range(1, success_num):\r\n if (x + i) <= 14 and (y - i) >= 0 and (x + i, y - i) in list:\r\n length2 += 1\r\n else:\r\n break\r\n total = length1 + length2 + 1\r\n if (total >= success_num):\r\n return True\r\n\r\n return False",
"def check_if_there_is_win(self):\n # The function goes along all the sequences in the game\n for sequence in self.all_possible_sequences_in_game:\n winning_sequence_was_found = True\n for i in range(1, len(sequence)):\n # Checks if anyone of the slots is empty.\n # If one was founded, it changes the flag to False.\n if self.board[sequence[i][0]][sequence[i][1]] == self.EMPTY_BLOCK:\n winning_sequence_was_found = False\n break\n # Checks if all slots has equal value.\n # If they don't, it changes the flag to False.\n if self.board[sequence[i][0]][sequence[i][1]] != \\\n self.board[sequence[i - 1][0]][sequence[i - 1][1]]:\n winning_sequence_was_found = False\n break\n # If the flag is still True\n # it means all slots aren't empty and equal the function returns\n # the winner and the sequence of slots that 'won' the game.\n if winning_sequence_was_found:\n return self.board[sequence[0][0]][sequence[0][1]], sequence\n\n return False",
"def is_down_diagonal_win(self, checker):\n for row in range(self.height-3):\n for col in range(self.width-3):\n if self.slots[row][col] == checker and \\\n self.slots[row+1][col+1] == checker and \\\n self.slots[row+2][col+2] == checker and \\\n self.slots[row+3][col+3] == checker:\n return True\n return False",
"def diagonal_win():\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True",
"def is_up_diagonal_win(self, checker):\n for row in range(3, self.height):\n for col in range(self.width-3):\n if self.slots[row][col] == checker and \\\n self.slots[row-1][col+1] == checker and \\\n self.slots[row-2][col+2] == checker and \\\n self.slots[row-3][col+3] == checker:\n return True\n return False",
"def check_winner(self):\n if self.history:\n last_move = self.history[-1]\n last_player = self.get_last_player()\n\n connected_token = last_player*self.n_in_row\n # check for row\n if connected_token in [sum(self.state[last_move[0]][j:j+self.n_in_row]) for j in range(0, self.width-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for column\n if connected_token in [sum(self.state.T[last_move[1]][i:i+self.n_in_row]) for i in range(0, self.height-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for diagonal with slope 1\n diagonal = np.diag(self.state, last_move[1]-last_move[0])\n if connected_token in [sum(diagonal[i:i+self.n_in_row]) for i in range(0, len(diagonal)-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for diagonal with slope -1\n diagonal = np.diag(self.state[:,::-1], self.width-1-last_move[1]-last_move[0])\n if connected_token in [sum(diagonal[i:i+self.n_in_row]) for i in range(0, len(diagonal)-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for draw game\n if len(np.argwhere(self.state==0)) == 0:\n self.winner = [True, 0]\n return self.winner\n return self.winner",
"def is_down_diagonal_win(self, checker):\r\n for row in range(self.height - self.win_condition + 1):\r\n for col in range(self.width - self.win_condition + 1):\r\n num_checkers = 0\r\n for i in range(self.win_condition):\r\n if self.grid[row + i][col + i] == checker:\r\n num_checkers += 1\r\n\r\n if num_checkers == self.win_condition:\r\n return True\r\n\r\n # if we get here, there's no horizontal win\r\n return False",
"def check_winner(board):\n\n # For rows\n for i in range(rows_num):\n row = board[i,:]\n winner = check_list(row)\n\n if winner is not None:\n print('Winner is player {}'.format(winner))\n return True\n\n # For columns\n for i in range(cols_num):\n column = board[:,i]\n winner = check_list(column)\n\n if winner is not None:\n print('Winner is player {}'.format(winner))\n return True\n\n # For diagonals\n # get diags from left side\n diags = [board[::-1, :].diagonal(i) for i in range(-board.shape[0] + 1, board.shape[1])]\n # get diags from right side\n diags.extend(board.diagonal(i) for i in range(board.shape[1] - 1, -board.shape[0], -1))\n\n for i in range(len(diags)):\n if len(diags[i]) >= 4:\n column = diags[i]\n winner = check_list(column)\n\n if winner is not None:\n print('Winner is player {}'.format(winner))\n return True",
"def check_board_completed(self, row, col):\n rowsum = 0\n colsum = 0\n main_diag_sum = 0 # sum of diagonal from top-left to bottom-right\n alt_diag_sum = 0 # sum of diagonal from top-right to bottom-left\n for i in [0, 1, 2]:\n rowsum += self.check_cell(row, i)\n colsum += self.check_cell(i, col)\n main_diag_sum += self.check_cell(i, i)\n alt_diag_sum += self.check_cell(i, 2 - i)\n\n if rowsum == Board.X_WIN_COND or colsum == Board.X_WIN_COND or main_diag_sum == Board.X_WIN_COND or alt_diag_sum == Board.X_WIN_COND:\n self.board_completed = True\n self.winner = Board.X\n elif rowsum == Board.O_WIN_COND or colsum == Board.O_WIN_COND or main_diag_sum == Board.O_WIN_COND or alt_diag_sum == Board.O_WIN_COND:\n self.board_completed = True\n self.winner = Board.O\n\n # check for tie\n completed_boards = 0\n for row in [0, 1, 2]:\n for col in [0, 1, 2]:\n if self.check_cell(row, col) in [Board.X, Board.O, Board.CAT]:\n completed_boards += 1\n\n if completed_boards == 9:\n self.board_completed = True",
"def is_winning(self, curr_state):\n \n # Check sum of any column is 15\n is_column_win = (curr_state.sum(axis=0) == 15).sum() > 0\n \n # Check sum of any row is 15\n is_row_win = (curr_state.sum(axis=1) == 15).sum() > 0\n \n # Check sum of any diagonal is 15\n is_diagonal_win = curr_state.trace() == 15 or np.flip(curr_state,1).trace()\n \n return is_column_win or is_row_win or is_diagonal_win",
"def diagonal_check(row, col, board, player, streak):\n total = 0\n\n # check for diagonals with negative slope\n consecutive_count = 0\n j = col\n for i in range(row, c4.BOARD_WIDTH):\n if j > c4.BOARD_HEIGHT - 1:\n break\n elif board[i][j] == player:\n consecutive_count += 1\n else:\n break\n j += 1 # increment column when row is incremented\n\n if consecutive_count >= streak:\n total += 1\n\n consecutive_count = 0\n j = col\n for i in range(row, c4.BOARD_WIDTH):\n if j < 0:\n break\n elif board[i][j] == player:\n consecutive_count += 1\n else:\n break\n j -= 1 # increment column when row is incremented\n if consecutive_count >= streak:\n total += 1\n return total",
"def diag_win(self, c):\n diag = []\n offdiag = []\n for i in range(self.size):\n diag += self.board[i*self.size+i]\n offdiag += self.board[((i+1)*self.size)-1-i]\n if diag == [c]*self.size or offdiag == [c]*self.size:\n return True\n return False",
"def is_up_diagonal_win(self, checker):\r\n for row in range(self.height - self.win_condition + 1):\r\n for col in range(self.width - self.win_condition + 1):\r\n num_checkers = 0\r\n for i in range(self.win_condition):\r\n if self.grid[self.height - row - 1 - i][col+i] == checker:\r\n num_checkers += 1\r\n\r\n if num_checkers == self.win_condition:\r\n return True\r\n\r\n # if we get here, there's no horizontal win\r\n return False",
"def check_diags(self, lastPlayer):\n if self.cells[5] == \" \":\n return False\n # on s'intéresse à celles qui se croisent en 4\n if (self.cells[1] == lastPlayer\n and self.cells[5] == lastPlayer\n and self.cells[9] == lastPlayer):\n return True\n elif (self.cells[3] == lastPlayer\n and self.cells[5] == lastPlayer\n and self.cells[7] == lastPlayer):\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if the board is fully packed with figures, which in practice means => if the tags array is full.
|
def full_board(self) -> bool:
counter = 0
for column in self.tags:
if None in column:
counter += 1
return counter == 0
|
[
"def is_full(board):\r\n return False",
"def is_full(board):\n return False",
"def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True",
"def is_full(self):\n print(\"Implem: checking that plate is full could done be better\")\n \n for well in self.wells:\n if isinstance(well, Container):\n continue\n else: \n return False\n return True",
"def __isBoardFull(self):\n if len(self.userPiecesService.getList()) + len(self.aiPiecesService.getList()) == 225:\n return True\n return False",
"def check_board_if_full(self):\n for row in range(self.height):\n for col in range(self.width):\n if self.board[row][col] == Game_Board.EMPTY_BLOCK:\n return False\n return True",
"def full():\r\n\r\n count = 0\r\n for slot in board:\r\n if slot not in '012345678':\r\n count += 1\r\n return count == 9",
"def game_board_full(game_board):\n \n if EMPTY in game_board[:]:\n return False\n else: \n return True",
"def is_full(self):\n return len(self.walls) == 4",
"def isBoardFull(self):\n\n if \" \" in self.current_state:\n return False\n else:\n return True",
"def is_full(self): #checks to see if stack is full by comparing it to the capacity\n if self.num_items == self.capacity:\n return True\n else:\n return False",
"def is_board_full(board):\r\n\r\n board = eval(board)\r\n\r\n for row in board:\r\n for col in range(3):\r\n if row[col] in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\r\n return False\r\n\r\n return True",
"def isGridFull(self):\r\n for i, j in itertools.product(range(1, self.size + 1), range(1, self.size + 1)):\r\n if self.getCell(i, j) == 0:\r\n return False\r\n return True",
"def is_full(self):\n if self.stack1_size + self.stack2_size == len(self._data):\n return True\n return False",
"def _top_row_is_full(self) -> bool:\r\n for cell in self.field[0]:\r\n if cell.status == EMPTY:\r\n return False\r\n return True",
"def test_bin_seelection(self):\n p = packer.PackerBNF(pack_algo=guillotine.GuillotineBafSas, \n sort_algo=packer.SORT_NONE, rotation=False)\n p.add_bin(50, 50, count=100)\n p.add_bin(100, 100)\n p.add_bin(300, 300, count=100)\n\n p.add_rect(40, 40)\n p.add_rect(90, 90)\n p.add_rect(5, 5)\n p.add_rect(20, 20)\n p.add_rect(10, 10)\n p.pack()\n \n self.assertTrue((0, 0, 0, 40, 40, None) in p.rect_list())\n self.assertTrue((1, 0, 0, 90, 90, None) in p.rect_list())\n self.assertTrue((1, 0, 90, 5, 5, None) in p.rect_list())\n self.assertTrue((2, 0, 0, 20, 20, None) in p.rect_list())\n self.assertTrue((2, 0, 20, 10, 10, None) in p.rect_list())\n\n self.assertEqual(len(p), 3)\n self.assertEqual(p[0].width, 50)\n self.assertEqual(p[1].width, 100)\n self.assertEqual(p[2].width, 50)",
"def queue_is_crowded(self):\n a = [self.model.grid.is_cell_empty(a) for a in self.queue_list] #check if cells are empty in queue list\n if sum(a)>4: #if less than 4 cells are non-empty return false\n return False\n else:\n return True",
"def label_is_filled(self):\n for label in self.__labels:\n if label is not None:\n if label.is_tagged() is True:\n if label.get_best().get_content() != \"\":\n return True\n return False",
"def isFullBlackBox(self):\r\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks for empty spaces in the tags list. If the empty space is found it's coordinates are being packed into tuple and into new list.
|
def check_for_moves(self) -> list:
avail_moves = []
for x in range(self.size):
for y in range(self.size):
if self.tags[x][y] is None:
avail_moves.append((x, y))
return avail_moves
|
[
"def empty_space(array):\n global l\n global empty_coordinates_list\n #This is not needed anymore.\n # def x(x):\n # if x == None:\n # x = int(randint(0,30))\n # return x\n\n # def y(y):\n # if y == None:\n # y = int(randint(0,30))\n # return y\n # x(x)\n # y(y)\n coordinate = [array[0], array[1]]\n if l[array[0]][array[1]] == \" \":\n print(\"Already visited empty place\") # print \"already here\"\n\n elif l[array[0]][array[1]] == '#':\n empty_coordinates_list.append(coordinate)\n y = coordinate[0]\n x = coordinate[1]\n l[y][x] = \" \"\n return coordinate",
"def tagify(x):\n\t\tm = tag_match.match(x)\n\t\tif m:\n\t\t\tg = m.groups()\n\n\t\t\tword = (g[0] if g[0] is not None else \"NA\")\n\t\t\ttag = (g[1] if g[1] is not None else \"NA\")\n\t\t\treturn (word,tag)\n\t\t\t#if g[1] is None: return (g[0], \"NA\")\n\t\t\t#else: return g\n\t\telse: return []",
"def free_positions(self):\r\n result_list = []\r\n for i in range(3):\r\n for j in range(3):\r\n if self.data[i][j] is None:\r\n result_list.append(tuple([i, j]))\r\n return result_list",
"def removeBlankPoints(self):\n i = 0\n while i < len(self.items):\n if self.items[i].mark.strip() == \"\":\n self.removeItem(i)\n else:\n i+=1",
"def removeOverlapsAndBadEntries(tupleTags):\n \n charsUsedOverall = set()\n overlapTagNums = []\n \n #find tag num which overlaps with previous tags\n for tagNum in range(len(tupleTags)):\n tag = tupleTags[tagNum]\n start = tag[0]\n end = tag[1]\n charsUsedOneTag = set()\n \n if start == -1:\n overlapTagNums.append(tagNum)\n continue\n \n for char in range(start, end):\n charsUsedOneTag.add(char)\n intersect = charsUsedOverall.intersection(charsUsedOneTag)\n if len(intersect) == 0:\n charsUsedOverall = charsUsedOverall.union(charsUsedOneTag)\n else:\n overlapTagNums.append(tagNum)\n \n #remove overlapping tags\n for tagNum in reversed(overlapTagNums):\n tupleTags.pop(tagNum)\n return tupleTags",
"def get_empty_spaces(self):\n empty_spaces = []\n for y, row in enumerate(self.__board):\n for x, piece in enumerate(row):\n if piece == BoardPiece.EMPTY:\n empty_spaces.append((x, y))\n return empty_spaces",
"def find_empty_positions(grid: List[List[str]]) -> Optional[Tuple[int, int]]:\n n = len(grid)\n\n for row in range(0, n):\n for col in range(0, n):\n if grid[row][col] == '.':\n return (row, col)\n return (-42, -42)",
"def space_nodes(nodes, spacing_m=0.0):\n spaced = []\n dangling = []\n last_node = None\n for node in nodes:\n cur_node = [node['longitude'], node['latitude']]\n if last_node is not None:\n meters = Geo.haversine(last_node, cur_node)\n if meters < spacing_m:\n dangling.append(node)\n continue\n else:\n spaced.append(node)\n else:\n spaced.append(node)\n last_node = cur_node\n return spaced, dangling",
"def clean_nodes_no_names(tag, data):\n\tif not isinstance(tag, tuple):\n\t\tfor each in data:\n\t\t\tif each['k'] != [] and each['v'] != []:\n\t\t\t\tif tag in each['k'] and 'name' not in each['k']:\n\t\t\t\t\teach['removed'] = 'true'\n\t\t\t\t\ttagValueData = dict(zip(each['k'], each['v']))\n\t\t\t\t\tif tagValueData.get('amenity') == 'atm':\n\t\t\t\t\t\teach['removed'] = 'false'\n\t\t\tyield each\n\telse:\n\t\tfor each in data:\n\t\t\tif each['k'] != [] and each['v'] != []:\n\t\t\t\tif tag[0] in each['k'] and tag[1] in each['v'] and 'name' not in each['k']:\n\t\t\t\t\teach['removed'] = 'true'\n\t\t\tyield each",
"def sanitise_tags(tags):\n\n # hack out all kinds of whitespace, then split on ,\n # if you run into more illegal characters (simplenote does not want to sync them)\n # add them to the regular expression above.\n illegals_removed = tags_illegal_chars.sub('', tags)\n if len(illegals_removed) == 0:\n # special case for empty string ''\n # split turns that into [''], which is not valid\n return []\n\n else:\n return illegals_removed.split(',')",
"def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)",
"def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty",
"def get_blank_spaces(self):\n \treturn [(x,y) for y in range(ylimit) for x in range(xlimit) if self.board[x][y] == 0]",
"def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty",
"def word_tagger(phrase_tup):\n annotated_phrase = []\n for token in phrase_tup:\n spacy_word = nlp(token)\n try:\n pos_tag = spacy_word[0].pos_\n except IndexError:\n # print(phrase)\n # print(f'This is the token that threw an error >{token}<')\n pos_tag = token\n annotated_phrase.append((token, pos_tag))\n return annotated_phrase",
"def _filter_empty(lst):\n return [cell for cell in lst if cell is not Sudoku.EMPTY_CELL]",
"def mps_null_spaces(mpslist):\n AL, C, AR = mpslist\n d, chi, _ = AL.shape\n NLshp = (d, chi, (d-1)*chi)\n ALdag = fuse_left(AL).T.conj()\n NLm = null_space(ALdag)\n NL = NLm.reshape(NLshp)\n\n ARmat = fuse_right(AR)\n NRm_dag = null_space(ARmat)\n NRm = NRm_dag.conj()\n NR = NRm.reshape((d, chi, (d-1)*chi))\n NR = NR.transpose((0, 2, 1))\n return (NL, NR)",
"def test_format_bad_tags(self):\n tags = self.c._format_tags(None)\n self.assertEqual(0, len(tags))",
"def remove_bad_tags(self, tags_list):\n new_tags = []\n for item in tags_list:\n word = item[0]\n\n if self.enchant_check(word):\n new_tags.append(item)\n else:\n pass\n #print(\"word is not found:\", word)\n\n return new_tags"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function mashes together classes functionality and performs the AI's move. It recursively calls the minimax algorithm and after finding the best move it adds tag into the tags list.
|
def bot_handle_move(self) -> None:
best_value = -INFINITY # default best value for maximizing player (bot in this app is a maximizing player)
available_moves = self.check_for_moves() # for more info check the minimax algorithm theory
depth = int(1.4*self.size - self.win_condition) # (depth) decides of how deep into recursion the algorithm will
best_move = None # get. 1.4 seems to be the best consensus between time of
# execution and accuracy of moves
for move in available_moves:
self.tags[move[0]][move[1]] = 'o'
move_value = self.minimax(depth, -INFINITY, INFINITY, False)
self.tags[move[0]][move[1]] = None
if move_value > best_value:
best_value = move_value
best_move = move
self.tags[best_move[0]][best_move[1]] = 'o'
|
[
"def move(self):\n value,move_Location,search_nodes=self._Min_Max_decision();\n\n #print(value)\n #print(move_Location)\n print(\"Minimax algorithm has generated: \"+str(search_nodes)+\" search nodes for deciding a move\")\n print(\"Computer has decided move:\" + str(move_Location))\n value,move_Location,search_nodes=self._Min_Max_alpha_beta_decision();\n print(\"Minimax algorithm with alpha beta pruning has generated: \"+str(search_nodes)+\" search nodes for deciding a move\")\n print(\"Computer has decided move:\" + str(move_Location))\n\n self.game.place_move(move_Location[0],move_Location[1],self.playerNumber)",
"def minimax(self, game, depth):\n # The Functions are a modification of the version of MINIMAX-DECISION of the AIMA text\n # The idea is similar.\n\n def max_value(game, depth):\n\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n if depth == 0:\n return self.score(game, self)\n\n V = float('-inf')\n \n for move in game.get_legal_moves(): \n score = min_value(game.forecast_move(move), depth - 1)\n V = max(score,V)\n return V\n\n def min_value(game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n if depth == 0:\n return self.score(game, self)\n\n V = float('inf')\n\n for move in game.get_legal_moves():\n score = max_value(game.forecast_move(move), depth - 1)\n V = min(score,V)\n\n return V\n\n #Code Main \n \n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n if len(game.get_legal_moves())==0:\n return (-1,-1)\n \n\n main_score = float('-inf')\n best_move = (-1,-1)\n \n \n for move in game.get_legal_moves(): \n score = min_value(game.forecast_move(move), depth-1)\n if score >= main_score:\n best_move = move\n main_score = score\n\n return best_move",
"def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)",
"def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n #sort_fn stores sorting heuristics used to prioritize moves that are estimated to be better first\r\n sort_fn = [vitalpoint, eyeHeur]\r\n #survivalheur returns a 1 if the input state is a goal state\r\n eval_fn = survivalheur\r\n player = state.to_move() #the player whose turn it is to go next\r\n prune = 0 #stores the number of pruned nodes\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0] #stores the deepest level the recursive function reached\r\n visited = {} #stores the number of visited nodes\r\n heuristicInd = 0 #index for which sort heuristic to use\r\n \r\n '''\r\n max_value: GoGame, Int, Int, Int, Int\r\n -'state' is a GoGame object representing the current state of the game board\r\n -'alpha' is the alpha value used for alpha-beta pruning\r\n -'beta' is the beta value used for alpha-beta pruning\r\n -'depth' is an integer that keeps track of the current depth of the alpha beta tree\r\n -heuristicInd is used to let the program know which heuristic to use. \r\n - After one heuristic returns no results, the other will take over\r\n max_value is called when, on the minmax tree, we want to find the move with the lowest\r\n score. The successive moves are generated with the function state.successor.\r\n This recursively calls min_value if we have not yet reached a terminal state\r\n '''\r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort by vital spots first, returns a list of actions\r\n tempher = heuristicInd\r\n \r\n #sorting heuristic is chosen and sorted here\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n \r\n #use new sorted list to generate successor functions and call min_value (recursively)\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) \r\n if v >= beta: #pruning happens here\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n return v\r\n \r\n '''\r\n min_value: GoGame, Int, Int, Int, Int\r\n -state is a GoGame object representing the current state of the game board\r\n -alpha is the alpha value used for alpha-beta pruning\r\n -beta is the beta value used for alpha-beta pruning\r\n -depth is an integer that keeps track of the current depth of the alpha beta tree\r\n -heuristicInd is used to let the program know which heuristic to use. \r\n - After one heuristic returns no results, the other will take over\r\n min_value is called when, on the minmax tree, we want to find the move with the lowest\r\n score. The successive moves are generated with the function state.successor.\r\n This recursively calls max_value if we have not yet reached a terminal state\r\n '''\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n \r\n #use new sorted list to generate successor functions and call min_value (recursively)\r\n for a in sortedactions: \r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n return v\r\n beta = min(beta, v)\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n #argmax is a fn in the utility.py and is used to call min_value on all of the\r\n # available moves in state. It would then return the best scoring move\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n \r\n #print statements to output information on the alpha beta pruning\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if (len(pruned) -1 < i):\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove",
"def ai_move():\n global BOARD, CURRENT_PLAYER\n best_score = MAX_VALUE\n move = []\n alpha = MIN_VALUE\n beta = MAX_VALUE\n \n for i in range(3):\n for j in range(3):\n if BOARD[i][j] == ' ':\n BOARD[i][j] = AI\n score = minimax(10, True, alpha, beta)\n BOARD[i][j] = ' '\n \n if score < best_score:\n best_score = score\n move = [i,j]\n beta = min(beta, best_score)\n \n print('AI move: {0} {1}'.format(move[0], move[1]))\n BOARD[move[0]][move[1]] = AI\n CURRENT_PLAYER = True",
"def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)",
"def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n #sort state.actions in increasing or decreasing based on max or min (alpha or beta)\r\n #use heuristics fn to get a value for each move (move is in format (x,y) where x and y are ints\r\n \r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n sort_fn = [vitalpoint, eyeHeur]\r\n eval_fn = survivalheur \r\n #randnumheuristics \r\n player = state.to_move()\r\n prune = 0\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0]\r\n visited = {}\r\n heuristicInd = 0\r\n \r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #max wants decreasing\r\n #sorted(state.actions(), key = eval_sort, reverse = True)\r\n \r\n #sort by favorites first, returns a list of actions\r\n # for sorts in sort_fn:\r\n tempher = heuristicInd\r\n\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n ''''''\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) #+ vitscore.count(a)\r\n if v >= beta: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n \r\n return v\r\n\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n #sorted(state.actions(), key = eval_sort)\r\n #Shayne\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n for a in sortedactions: #state.actions():\r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n beta = min(beta, v)\r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n #by default, utility score is used\r\n \r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n #print state.actions()\r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if len(pruned) < i:\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove",
"def autonextmove(self):\n nextmoves, score = self.getscore(self.searchdepth)\n # look for nextmoves that are best for us (nextplayer)\n #\n # The filter function accepts a function (like a lambda) and a list\n # as arguments. Filter runs the function with each member of of the\n # list and returns the list of all members for which the function\n # result or return was True.\n #\n # This lambda function is written in two parts. The first is the\n # argument list: move and np. np is always initialized with\n # the value of nextplayer. move is the next move from the list of\n # possible next moves. The lambda returns True in the second part\n # if the move's score is the same value as the next player, meaning\n # it is a GOOD move for that player.\n #\n # If there are no favorable moves, we build a list of neutral moves\n # or even unfavorable moves, using the same method.\n bestnextmoves = list(filter( lambda move, np = self.nextplayer:\n move.score == np,\n nextmoves))\n if not len(bestnextmoves):\n # otherwise, just take neutral ones\n bestnextmoves = list(filter(lambda move: move.score == 0, nextmoves))\n if not len(bestnextmoves):\n # only options favor the enemy: probably should give up\n bestnextmoves = list(filter( lambda move, np = -1 * self.nextplayer:\n move.score == np,\n nextmoves))\n # choose randomly from the available\n return random.choice(bestnextmoves) if len(bestnextmoves) else None",
"def run(self):\n\n # keep track of counter\n counter = 0\n\n while self.queue:\n\n # print depth of tree every 10000 steps\n if counter % 10000 == 0:\n print(len(self.queue[0]))\n\n # get first moves set from queue\n moves_set = self.get_moves_set()\n\n # move all moves from set\n self.try_moves(moves_set)\n\n # continue branch (add to queue) if layout is not in archive\n if self.not_in_archive():\n self.add_to_queue(moves_set)\n \n # check for win\n if self.won_game():\n\n # return winning set of moves\n return moves_set\n \n # reverse moves to original layout\n self.reverse_moves(moves_set)\n \n # add to counter\n counter += 1",
"def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])",
"def _aiStep(self, ):\n moves = []\n moves.append(self._cat_ai(self._cat, self._cat,\n self._dogs, self._goal,\n self._field_size))\n for dog in self._dogs:\n moves.append(self._dog_ai(dog, self._cat,\n self._dogs, self._goal,\n self._field_size))\n return moves",
"def alphabeta_search(state, game):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n state = game\r\n d = 20 #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n eval_fn = None \r\n player = game.to_move(state)\r\n \r\n def max_value(state, alpha, beta, depth):\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n for a in game.actions(state):\r\n print \"max call:\"\r\n print state.moves\r\n print game.moves\r\n print a\r\n v = max(v, min_value(game.result(state, a),\r\n alpha, beta, depth+1))\r\n if v >= beta:\r\n return v\r\n alpha = max(alpha, v)\r\n return v\r\n\r\n def min_value(state, alpha, beta, depth):\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n for a in game.actions(state):\r\n v = min(v, max_value(game.result(state, a),\r\n alpha, beta, depth+1))\r\n if v <= alpha:\r\n return v\r\n beta = min(beta, v)\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or game.terminal_test(state)))\r\n eval_fn = eval_fn or (lambda state: game.utility(state, player))\r\n #by default, utility score is used\r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n return argmax(game.actions(state),\r\n lambda a: min_value(game.result(state, a),\r\n -infinity, infinity, 0))",
"def minimax(self, state, agent, parents_positions):\n if termialTest(state):\n return utility(state)\n return self.computeMinimaxScore(state, agent, parents_positions)",
"def recommend_move(self, depth_override = None):\n if not depth_override:\n depth_override = self.ai_depth\n\n self.tree_generator(depth_override)\n return self.minimax(self.current_game_state, 0)",
"def player_loop(self):\n\n # Generate game tree object\n first_msg = self.receiver()\n # Initialize your minimax model\n model = self.initialize_model(initial_data=first_msg)\n\n while True:\n msg = self.receiver()\n\n # Create the root node of the game tree\n node = Node(message=msg, player=0)\n\n # Possible next moves: \"stay\", \"left\", \"right\", \"up\", \"down\"\n best_move = self.search_best_next_move(\n model=model, initial_tree_node=node)\n\n # Execute next action\n self.sender({\"action\": best_move, \"search_time\": None})",
"def move_actors(self, actor_class, paces):\n for yi, outer in enumerate(self.grid):\n for xi, inner in enumerate(outer):\n #print \"xi: {}, yi: {}\".format(xi, yi)\n\n i = 0\n actors_in_tile_to_process = True\n # This processess all actors to move by their initial position\n # on a tile per turn.\n while actors_in_tile_to_process:\n # To prevent index calls when a tile is empty.\n tile = self.grid[yi][xi]\n if len(tile) == 0:\n # Essentially same as break:\n actors_in_tile_to_process = False\n continue\n\n actor = tile[i]\n\n if actor.__class__ == actor_class:\n if not actor.moved_or_created_in_turn:\n # Remember position before actor did any paces.\n xi_init = xi\n yi_init = yi\n for pace in range(paces):\n # This is a human movement implementation,\n # need to handle zombie movement properly!!!\n direction = random.choice(self.directions)\n xi, yi = self._move_to_adjacent_tile(direction, actor, xi, yi)\n print self.grid\n print \"\"\n\n # Mark actor so that in case he moved to a tile\n # that we still need to process we don't move them\n # subsequently again.\n actor.moved_or_created_in_turn = True\n # Continue processing next actor based on initial\n # position.\n xi = xi_init\n yi = yi_init\n\n tile = self.grid[yi][xi]\n if i < len(tile) - 1:\n i += 1\n else:\n actors_in_tile_to_process = False\n\n actors = 0\n # Reset marks for all actors of the given actor_class.\n for yi, outer in enumerate(self.grid):\n for xi, inner in enumerate(outer):\n for actor in inner:\n if actor.__class__ == actor_class:\n # This thus also includes newly created zombies if the\n # actor is a zombie\n actors += 1\n actor.moved_or_created_in_turn = False\n\n return actors",
"def train(self):\r\n stack_nodes = [self.root]\r\n # !!! Write code to train decision tree. If the node is pure, set the majority_class attribute.\r\n # Use .pop(0) to pop the top of the stack\r\n for data in stack_nodes:\r\n self.split_attr, self.split_value =compute_best_split()\r\n if self.split_attr <= self.split_value:\r\n self.root =get_data_for_left()\r\n else\r\n self.root =get_data_for_right()\r\n\r\n stack_nodes.pop(0)\r\n pass",
"def move_robot(self):\n global gl\n global mb\n global imported_class\n self.run_info = self.ids.run_info\n if self.pressed_algo:\n self.run_info.text='[color=#0010FF]Route completed.[/color]'\n try:\n if self.user_class:\n if imported_class._distance < gl.best_distance:\n follow_route = imported_class.route\n print(\"Executing: {} route\".format(self.user_class.upper()))\n else:\n follow_route = gl.best_distance.routes\n print(\"Executing: {} route\".format(gl.best_distance.name))\n else:\n print(\"Executing: {} route\".format(gl.best_distance.name))\n follow_route = gl.best_distance.routes\n except Exception as e:\n pass\n finally:\n mb = MoveBase(follow_route)\n else:\n self.run_info.text='[color=#FF0000]Run algorithms first.[/color]'",
"def step(self):\n if self.model.schedule.steps < self.model.residential_steps:\n residential_move = True\n else:\n residential_move = False\n\n\n if residential_move:\n # only step the agents if the number considered is not exhausted\n if self.model.total_considered < self.model.residential_moves_per_step:\n # move residential\n U_res = self.get_res_satisfaction(self.pos)\n self.model.res_satisfaction.append(U_res)\n\n # print(\"U_res\",U_res)\n if U_res < self.T:\n\n # todo: implement different move schemes, for now only random\n # find all empty places\n # rank them\n # take one with boltzmann probability.\n self.evaluate_move(U_res, school=False)\n\n else:\n self.model.res_happy += 1\n\n self.model.total_considered += 1\n #print(\"considered\",self.model.total_considered)\n\n\n else:\n if self.model.total_considered < self.model.school_moves_per_step:\n # school moves\n # satisfaction in current school\n U = self.get_school_satisfaction(self.school, self.dist_to_school)\n self.model.satisfaction.append(U)\n\n # If unhappy, compared to threshold move:\n if U < self.T:\n #print('unhappy')\n self.evaluate_move(U, school=True)\n\n else:\n self.model.happy += 1\n if self.model.total_considered>0:\n self.model.percent_happy = np.ma(self.model.happy/self.model.total_considered)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Workqueue element site restriction check (same as workRestrictions)
|
def testPassesSiteRestriction(self):
# test element ala MonteCarlo
ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"], SiteBlacklist=["T1_US_FNAL"])
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
# test element with input dataset
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
# test element with input and parent dataset
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []}
ele['ParentFlag'] = True
ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": []}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]}
ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": ["T1_IT_CNAF", "T2_CH_CERN", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
# test element with input, parent and pileup dataset
ele['PileupData'] = {"/MY/DATASET/NAME": []}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY"))
ele['PileupData'] = {"/MY/DATASET/NAME": ["T2_US_Nebraska", "T1_IT_CNAF"]}
self.assertFalse(ele.passesSiteRestriction("T1_IT_CNAF"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T1_IT_CNAF", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertFalse(ele.passesSiteRestriction("T2_DE_DESY"))
|
[
"def testPassesSiteRestrictionLocationFlags(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n ele['NoPileupUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T2_US_Nebraska\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n # only the pileup flag enabled now\n ele['NoInputUpdate'] = False\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))",
"def testPossibleSitesLocationFlags(self):\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and no location, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but pu flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [\"T1_IT_CNAF\"])\n # test element with InputDataset and one match, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData and no location, but both flags on\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but pileup flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [])\n\n # test element with InputDataset, PileupData and ParentData with no location, but pileup flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertEqual(possibleSites(ele), [])",
"def check_allowed_site(self, site_name):\n\n if self.include_sites is not None:\n for pattern in self.include_sites:\n if pattern.match(site_name):\n break\n else:\n # no pattern matched\n LOG.debug('Site %s is not in include list.', site_name)\n return False\n\n if self.exclude_sites is not None:\n for pattern in self.exclude_sites:\n if pattern.match(site_name):\n LOG.debug('Site %s is in exclude list.', site_name)\n return False\n\n return True",
"def siteWhitelist(self):\n return self.data.constraints.sites.whitelist",
"def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers",
"def boundary_condition(self):\n pass",
"def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1",
"def has_special_restriction(self, user, perm):\n return False",
"def restriction(self):\n return self.__restriction",
"def limit_by_origin(self, request):\n # We're allowing all if explicitly configured\n if settings.TMS_WEBHOOK_ALLOWED_CIDR_NETS == ['*']:\n return True\n\n allowed_cidr_nets = [IPNetwork(net) for net in settings.TMS_WEBHOOK_ALLOWED_CIDR_NETS]\n ip = get_client_ip(request)\n\n for net in allowed_cidr_nets:\n if ip in net:\n return True\n raise DisallowedHost(\"Unexpected origin for a webhook request\")",
"def notInQueue(itemTuple):\n\t\ttry:\n\t\t\tinqueueItem=hyperlinkQueueDao.getByTargetIdContentType(itemTuple[4],itemTuple[0],itemTuple[3])\n\t\t\tif inqueueItem:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\t\texcept Exception,e:\n\t\t\treturn False",
"def check(self):\n self.__check_request_limit()",
"def test_add_website_error(self, enabled_websites_mock):\n self.subscription.plan.allowance.return_value = 1\n with self.assertRaises(SubscriptionWebsiteLimitReached):\n self.subscription.add_website('url')\n self.assertEqual(len(self.subscription.enabled_websites()), 1)",
"def has_entry_feature(self):\n return any(v in self.video_url for v in settings.ALLOWED_OMEMBED_SITES) or self.thumbnail",
"def test_with_limited_localsite(self):\n form = MyConfigForm(integration=self.integration,\n request=self.request,\n limit_to_local_site=self.local_site_1)\n\n self.assertEqual(form.limited_to_local_site, self.local_site_1)\n self.assertNotIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group])\n self.assertEqual(\n form.fields['my_conditions'].choice_kwargs.get('local_site'),\n self.local_site_1)",
"def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False",
"def scm_ip_restrictions(self) -> Optional[Sequence['outputs.LinuxWebAppSlotSiteConfigScmIpRestriction']]:\n return pulumi.get(self, \"scm_ip_restrictions\")",
"def search_restr_site (seq:str):\n print('The restriction site is at index %s' % seq.find('site'))\n return",
"def scm_ip_restrictions(self) -> Optional[Sequence['outputs.LinuxWebAppSiteConfigScmIpRestriction']]:\n return pulumi.get(self, \"scm_ip_restrictions\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Workqueue element site restriction check (same as workRestrictions)
|
def testPassesSiteRestrictionLocationFlags(self):
# test element ala MonteCarlo
ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"], SiteBlacklist=["T1_US_FNAL"])
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
# test element with input dataset
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []}
ele['NoInputUpdate'] = True
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
# test element with input and parent dataset
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []}
ele['ParentFlag'] = True
ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": []}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_DE_DESY"]}
ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": ["T1_IT_CNAF", "T2_CH_CERN", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
# test element with input, parent and pileup dataset
ele['PileupData'] = {"/MY/DATASET/NAME": []}
ele['NoPileupUpdate'] = True
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
ele['PileupData'] = {"/MY/DATASET/NAME": ["T2_US_Nebraska", "T1_IT_CNAF"]}
self.assertFalse(ele.passesSiteRestriction("T2_US_Nebraska"))
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T1_IT_CNAF", "T2_DE_DESY"]}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
# only the pileup flag enabled now
ele['NoInputUpdate'] = False
ele['PileupData'] = {"/MY/DATASET/NAME": []}
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
self.assertTrue(ele.passesSiteRestriction("T1_IT_CNAF"))
self.assertTrue(ele.passesSiteRestriction("T2_DE_DESY"))
|
[
"def testPassesSiteRestriction(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_DE_DESY\"))",
"def testPossibleSitesLocationFlags(self):\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and no location, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but pu flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [\"T1_IT_CNAF\"])\n # test element with InputDataset and one match, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData and no location, but both flags on\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but pileup flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [])\n\n # test element with InputDataset, PileupData and ParentData with no location, but pileup flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertEqual(possibleSites(ele), [])",
"def check_allowed_site(self, site_name):\n\n if self.include_sites is not None:\n for pattern in self.include_sites:\n if pattern.match(site_name):\n break\n else:\n # no pattern matched\n LOG.debug('Site %s is not in include list.', site_name)\n return False\n\n if self.exclude_sites is not None:\n for pattern in self.exclude_sites:\n if pattern.match(site_name):\n LOG.debug('Site %s is in exclude list.', site_name)\n return False\n\n return True",
"def siteWhitelist(self):\n return self.data.constraints.sites.whitelist",
"def is_worker_allowed(self, worker_id):\n return worker_id in self.allowed_workers",
"def boundary_condition(self):\n pass",
"def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1",
"def has_special_restriction(self, user, perm):\n return False",
"def restriction(self):\n return self.__restriction",
"def limit_by_origin(self, request):\n # We're allowing all if explicitly configured\n if settings.TMS_WEBHOOK_ALLOWED_CIDR_NETS == ['*']:\n return True\n\n allowed_cidr_nets = [IPNetwork(net) for net in settings.TMS_WEBHOOK_ALLOWED_CIDR_NETS]\n ip = get_client_ip(request)\n\n for net in allowed_cidr_nets:\n if ip in net:\n return True\n raise DisallowedHost(\"Unexpected origin for a webhook request\")",
"def notInQueue(itemTuple):\n\t\ttry:\n\t\t\tinqueueItem=hyperlinkQueueDao.getByTargetIdContentType(itemTuple[4],itemTuple[0],itemTuple[3])\n\t\t\tif inqueueItem:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\t\texcept Exception,e:\n\t\t\treturn False",
"def check(self):\n self.__check_request_limit()",
"def test_add_website_error(self, enabled_websites_mock):\n self.subscription.plan.allowance.return_value = 1\n with self.assertRaises(SubscriptionWebsiteLimitReached):\n self.subscription.add_website('url')\n self.assertEqual(len(self.subscription.enabled_websites()), 1)",
"def has_entry_feature(self):\n return any(v in self.video_url for v in settings.ALLOWED_OMEMBED_SITES) or self.thumbnail",
"def test_with_limited_localsite(self):\n form = MyConfigForm(integration=self.integration,\n request=self.request,\n limit_to_local_site=self.local_site_1)\n\n self.assertEqual(form.limited_to_local_site, self.local_site_1)\n self.assertNotIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group])\n self.assertEqual(\n form.fields['my_conditions'].choice_kwargs.get('local_site'),\n self.local_site_1)",
"def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False",
"def scm_ip_restrictions(self) -> Optional[Sequence['outputs.LinuxWebAppSlotSiteConfigScmIpRestriction']]:\n return pulumi.get(self, \"scm_ip_restrictions\")",
"def search_restr_site (seq:str):\n print('The restriction site is at index %s' % seq.find('site'))\n return",
"def scm_ip_restrictions(self) -> Optional[Sequence['outputs.LinuxWebAppSiteConfigScmIpRestriction']]:\n return pulumi.get(self, \"scm_ip_restrictions\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Workqueue element data location check, using the input and PU data location flags
|
def testPossibleSitesLocationFlags(self):
ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and no location, but input flag on
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []}
ele['NoInputUpdate'] = True
self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and one match, but input flag on
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_IT_CNAF", "T2_CH_CERN"]}
self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and one match, but pu flag on
ele['NoInputUpdate'] = False
ele['NoPileupUpdate'] = True
self.assertEqual(possibleSites(ele), ["T1_IT_CNAF"])
# test element with InputDataset and one match, but both flags on
ele['NoInputUpdate'] = True
self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and ParentData and no location, but both flags on
ele['ParentFlag'] = True
ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": []}
self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and ParentData and no location, but input flag on
ele['NoPileupUpdate'] = False
self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and ParentData and no location, but pileup flag on
ele['NoInputUpdate'] = False
ele['NoPileupUpdate'] = True
self.assertEqual(possibleSites(ele), [])
# test element with InputDataset, PileupData and ParentData with no location, but pileup flag on
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": ["T1_US_FNAL", "T2_CH_CERN", "T2_DE_DESY"]}
ele['ParentData'] = {"/MY/BLOCK2/NAME#002590494c06": ["T2_DE_DESY"]}
ele['PileupData'] = {"/MY/DATASET/NAME": []}
self.assertEqual(possibleSites(ele), ["T2_DE_DESY"])
# test element with InputDataset, PileupData and ParentData with no location, but both flags on
ele['NoInputUpdate'] = True
self.assertItemsEqual(possibleSites(ele), ["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset, PileupData and ParentData with no location, but input flag on
ele['NoPileupUpdate'] = False
self.assertEqual(possibleSites(ele), [])
|
[
"def testPassesSiteRestrictionLocationFlags(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n\n # test element with input dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input and parent dataset\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T1_IT_CNAF\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n\n # test element with input, parent and pileup dataset\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n ele['NoPileupUpdate'] = True\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n ele['PileupData'] = {\"/MY/DATASET/NAME\": [\"T2_US_Nebraska\", \"T1_IT_CNAF\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T2_US_Nebraska\"))\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T1_IT_CNAF\", \"T2_DE_DESY\"]}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))\n # only the pileup flag enabled now\n ele['NoInputUpdate'] = False\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_CH_CERN\"))\n self.assertTrue(ele.passesSiteRestriction(\"T1_IT_CNAF\"))\n self.assertTrue(ele.passesSiteRestriction(\"T2_DE_DESY\"))",
"def perform_is_skip_and_return_value_patch_available(self, data, addr):\n\t\treturn False",
"def perform_is_skip_and_return_zero_patch_available(self, data, addr):\n\t\treturn False",
"def _data_in_prefetch_buffers(self, offset):\n k = [i for i in self._prefetch_data.keys() if i <= offset]\n if len(k) == 0:\n return None\n index = max(k)\n buf_offset = offset - index\n if buf_offset >= len(self._prefetch_data[index]):\n # it's not here\n return None\n return index",
"def check_box(volume,point,is_queued_map,is_visited_map):\n list_not_visited=[]\n list_not_queued = []\n list_are_near = []\n\n if point[0]==1227 and point[1]==735 and point[2]==27:\n pass\n\n\n for x in xrange(-1, 2):\n\n # Edgecase for x\n if point[0] + x < 0 or point[0] + x > volume.shape[0] - 1:\n continue\n\n for y in xrange(-1, 2):\n\n # Edgecase for y\n if point[1] + y < 0 or point[1] + y > volume.shape[1] - 1:\n continue\n\n for z in xrange(-1, 2):\n\n # Edgecase for z\n if point[2] + z < 0 or point[2] + z > volume.shape[2] - 1:\n continue\n\n # Dont look at the middle point\n if x == 0 and y == 0 and z == 0:\n continue\n\n # TODO case if loop, all are queued but not visited\n if volume[point[0] + x, point[1] + y, point[2] + z] == 1:\n\n\n list_are_near.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n if is_queued_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_queued.extend([[point[0] + x, point[1] + y, point[2] + z]])\n if is_visited_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_visited.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n is_visited_map[point[0],point[1],point[2]]=1\n return list_not_queued,list_not_visited,is_visited_map,list_are_near",
"def _check_data_point(cube, metadata):\n point_index = []\n\n for dim_length in cube.shape:\n point_index.append(int(random.random() * dim_length))\n\n point_index = tuple(point_index)\n\n try:\n point_cube = cube[point_index]\n _data_point = point_cube.data\n except Exception:\n msg = 'Unable to extract data point {} from file: {}'.format(\n point_index, metadata['basename'])\n raise FileValidationError(msg)\n else:\n return True",
"def containsOffset(self, offset: long) -> bool:\n ...",
"def _check_data_args(self, query, reference):\r\n assert query is not None, \"Query data is 'None'.\"\r\n assert reference is not None, \"Reference data is 'None'.\"",
"def loadBlockQueue(input_queue, county_fips, config, start_time):\n try:\n temp_time = time.localtime()\n county_counter = 0\n for c in county_fips:\n input_queue.put((c))\n county_counter += 1\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 6 OF 13 - COMPLETED LOADING INPUT \n QUEUE WITH COUNTY DATA\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message,temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return True, county_counter\n\n except:\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 6 OF 13 - FAILED TO LOADING QUEUE WITH\n COUNTY DATA\n \"\"\"\n my_message = ' '.join(my_message.split()) + '\\n' + traceback.format_exc()\n print(nbmf.logMessage(my_message,temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return False, None",
"def perform_is_valid_offset(self, addr: int) -> bool:\n\t\tdata = self.read(addr, 1)\n\t\treturn (data is not None) and (len(data) == 1)",
"def check_ship_pos(data, x, y, size, horiz):\n if horiz:\n for i in range(y, y+size):\n if not check_prop_place(data, x, i):\n return False\n return True\n else:\n for i in range(x, x+size):\n if not check_prop_place(data, i, y):\n return False\n return True",
"def isUndefinedData(program: ghidra.program.model.listing.Program, addr: ghidra.program.model.address.Address) -> bool:\n ...",
"def is_local(queue):\n _setup()\n return queue in [dest(0) for dest in cupsd.getDests()]",
"async def _check_latch_data(self, key, data):\n process = False\n latching_entry = self.latch_map.get(key)\n if latching_entry[Constants.LATCH_STATE] == Constants.LATCH_ARMED:\n # Has the latching criteria been met\n if latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_EQ:\n if data == latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_GT:\n if data > latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_GTE:\n if data >= latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_LT:\n if data < latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n elif latching_entry[Constants.LATCHED_THRESHOLD_TYPE] == \\\n Constants.LATCH_LTE:\n if data <= latching_entry[Constants.LATCH_DATA_TARGET]:\n process = True\n if process:\n latching_entry[Constants.LATCHED_DATA] = data\n await self._process_latching(key, latching_entry)",
"def analyse_pump_event(self, data , freq = 4000, pressure_threshold = 1000, gradient_threshold = -400*1000):\r\n from numpy import gradient, nanmedian, nan, isnan ,argmin,argwhere\r\n from logging import error\r\n info(f'analyse_pump_event data shape: {data.shape}')\r\n try:\r\n target = data[:,0]\r\n grad = gradient(target)\r\n grad_min = nanmin(grad)\r\n idx_min = argmin(grad)\r\n if grad_min < (gradient_threshold/freq) and nanmedian(target) > pressure_threshold:\r\n flag = True\r\n else:\r\n flag = False\r\n except:\r\n info(f'except data shape: {data.shape}')\r\n target = data[:,0]\r\n info(target)\r\n error(traceback.format_exc())\r\n flag, idx_min, grad_min = False, 0, 0\r\n\r\n\r\n\r\n return flag, idx_min, grad_min",
"def __validate_node_data(self, data):\n\n # skipping check of 'grapheap_node_id' optimisation key\n if all(key in data for key in self.optimisation_keys[1:]):\n return True\n\n else:\n missing_keys = [\n x for x in self.optimisation_keys[1:] if x not in data]\n raise ValueError(\"Grapheap Error: \" + str(missing_keys) +\n \" optimisation keys missing in data\")",
"def check_pileupread( pileupread ):\n check = True\n if pileupread.alignment.is_duplicate:\n check = False\n elif pileupread.is_del:\n check = False\n elif pileupread.is_refskip:\n check = False\n elif not pileupread.query_position:\n check = False\n elif pileupread.alignment.mapq < int(cfg['Driver']['mapq']):\n check = False\n elif pileupread.alignment.query_qualities[pileupread.query_position] < int(cfg['Driver']['base_phred_quality']):\n check = False\n\n return( check )",
"def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False",
"def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test run get_most_volatile() with stock prices from a file.
|
def test_run(filename='prices.csv'):
prices = pd.read_csv(filename, parse_dates=['date'])
print("Most volatile stock: {}".format(get_most_volatile(prices)))
|
[
"def get_most_expensive_cars(table):\n cur, con = database.connect_to_database()\n query = \"SELECT t.* FROM \" + table + \" t WHERE t.price = \\\n (select max(subt.price) from \" + table + \" subt);\"\n return pandas.read_sql_query(query, con)",
"def download_all():\r\n f = open('stock_symbols.txt', 'r')\r\n fout = open('../data/stocks_read.txt', 'w')\r\n count_max = 500\r\n count = 0\r\n for stock_symbol in f:\r\n stock_symbol = stock_symbol.strip()\r\n try:\r\n stock_download(stock_symbol)\r\n fout.write(stock_symbol + '\\n')\r\n except:\r\n print(\"was not able to read file \", stock_symbol)\r\n count = count + 1\r\n if count >= count_max:\r\n break\r\n f.close()\r\n fout.close",
"def test_low_stockprice_high_interest(self):\n stock_prices = np.array([[5, 4, 4, 2],\n [5, 3, 3, 3],\n [5, 4, 2, 2],\n [5, 3, 3, 1]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)",
"def test_get_result_top_files(self):\n pass",
"def process_orders(file, output_file, total_rows = TOTAL_ROWS, start_from = 0, files = 0):\n secondsPerYear = float(365*24*60*60)\n rows = 0\n r = .02676\n f = open(file, \"r\")\n\n\n eof = False\n\n py_dir = os.path.join(\"..\", \"..\", \"python_ref\")\n oil_dir = os.path.join(\"..\", \"..\", \"oil\")\n cd_dir = \"C:/Users/Xinna/c_data_files\" \n\n if not os.path.isdir(py_dir):\n os.mkdir(py_dir)\n if not os.path.isdir(oil_dir):\n os.mkdir(oil_dir)\n if not os.path.isdir(cd_dir):\n os.mkdir(cd_dir)\n\n while not eof:\n print(\"working on file %d\" %files)\n file_num = str(files).zfill(5)\n\n ref_file = os.path.join(py_dir,\"%s_python_ref.txt\" %(file_num))\n rf = open(ref_file, \"w\");\n option_id_list = os.path.join(oil_dir, \"%s_id_list.txt\" %(file_num))\n oil = open(option_id_list, \"w\")\n output_file = os.path.join(cd_dir, \"%s_c_data.txt\" %(file_num))\n output = open(output_file, 'w')\n\n #necessary for the benchmark files\n output.write(\"%d\\n\" %(total_rows))\n\n while (total_rows > rows):\n\n line = f.readline()\n if not line:\n eof = True\n print(\"oh no\")\n break\n\n [t_date, stock_id, stock_symbol, expiry, strike, call_put, style, symbol, \n price_bid, price_ask, date_bid, date_ask, size_bid, \n size_ask, exchange_bid, exchange_ask, volume, iv, price_opt,delta, \n gamma, theta, vega, rho, pre_iv, implied_yield, dump_time, calc_date] = line.split(\",\")\n \n # we should figure out what to do with stock / option id \n expiry = expiry + \" 17:00:00\" #to add the expiration time\n expiration_dt = datetime.datetime.strptime(expiry, \"%Y-%m-%d %H:%M:%S\")\n calc_dt = datetime.datetime.strptime(calc_date.strip(), \"%Y-%m-%d %H:%M:%S\")\n\n expiration_time = expiration_dt - calc_dt\n expiration_yrs = expiration_time.seconds / (secondsPerYear)\n h = sha256()\n g = symbol.encode('utf-8')\n h.update(g)\n option_id = h.hexdigest()\n option_id = int(option_id, 16)\n\n option_id = option_id & 0xfffffffe #convert to 32 bit\n\n if call_put == 'C':\n call_put_mask = 0x00000000;\n else:\n call_put_mask = 0x00000001;\n\n option_id = option_id | call_put_mask;\n\n x = order.Order(float(price_opt), float(strike), r, float(iv), expiration_yrs, call_put, option_id)\n pkt = x.pkt()\n \n try:\n if call_put == 'C':\n answer = blackscholes.callPrice(float(price_opt), float(strike), r, float(iv), expiration_yrs)\n else:\n call_price = blackscholes.callPrice(float(price_opt), float(strike), r, float(iv), expiration_yrs)\n answer = blackscholes.put_price(float(price_opt), call_price, float(strike), r, expiration_yrs)\n except:\n answer = 0\n\n ref_str = \"%u, %f, %i\\r\\n\" %(option_id, answer, 0)\n rf.write(ref_str)\n\n oil_str = \"%u, %s, %s, %s, %s, %s\\r\\n\" %(option_id, stock_symbol, expiry, strike, call_put, t_date)\n oil.write(oil_str)\n\n s = x.s\n output.write(s)\n rows += 1\n\n files += 1\n oil.close()\n rf.close() \n output.close()\n rows = 0\n\n f.close()\n return 0",
"def test_stock_price_should_give_the_latest_price(self):\n self.goog.update(datetime(2015, 5, 28), price=10)\n self.goog.update(datetime(2015, 5, 29), price=8.4)\n self.assertAlmostEqual(8.4, self.goog.price, delta=0.0001)",
"def get_high_100(ticker, parsed_response): \n #request_url = f\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={ticker}&apikey={api_key}\"\n #response = requests.get(request_url)\n #parsed_response = json.loads(response.text)\n tsd = parsed_response[\"Time Series (Daily)\"]\n dates = list(tsd.keys()) # sort\n\n high_prices = []\n\n for date in dates:\n high_price = tsd[date][\"2. high\"]\n high_prices.append(float(high_price))\n\n #max of all the high prices over the last 100 days\n recent_high = max(high_prices)\n\n return recent_high",
"def analyze_volume_timepoint():\n dp = data_provider.DataProvider('./data/intra_day/', False)\n day_int_val = 20180502\n symbol_list = dp.get_symbol_list_for_a_day(day_int_val)\n dp.load_one_day_data(day_int_val)\n dp.generate_eligible_list()\n live_trade_folder = os.path.join('./live_trade/', str(day_int_val) + '/')\n \n volume_list, timepoint_list, trade_cash_list = [], [], []\n\n # we assume that for large symbols, at least $20K should be traded within one minute.\n large_symbol_threshold = 20000 * 60 * 6.5\n\n num_small_symbol = 0\n\n for symbol in symbol_list:\n one_symbol_data = dp.get_one_symbol_data(symbol)\n total_volume, total_trade_cash_flow = 0, 0\n for one_time_data in one_symbol_data.data:\n total_volume += one_time_data.volume\n total_trade_cash_flow += one_time_data.volume * one_time_data.open\n volume_list.append(total_volume)\n\n # load from live trade folder as well\n live_crawl_file = os.path.join(live_trade_folder, symbol + '.pb')\n if os.path.isfile(live_crawl_file):\n fid = open(live_crawl_file)\n content = fid.read()\n fid.close()\n one_stock_data = stock_pb2.ServingCrawledData()\n one_stock_data.ParseFromString(content)\n total_volume_live = one_stock_data.data[-1].total_volume\n print('Symbol: {0}'.format(symbol))\n print('Total volume from historical price: {0}, from live crawl: {1}'.format(total_volume, total_volume_live))\n\n if total_trade_cash_flow > 1e8:\n print('Symbol: {0}, total trade cash flow: ${1}M'.format(symbol, total_trade_cash_flow/1e6))\n else:\n trade_cash_list.append(total_trade_cash_flow)\n\n if total_trade_cash_flow < large_symbol_threshold:\n num_small_symbol += 1\n timepoint_list.append(len(one_symbol_data.data))\n\n print('Total number of small symbols: {0}'.format(num_small_symbol))\n plt.plot(timepoint_list, volume_list, '.')\n plt.xlim(0, max(timepoint_list))\n plt.xlabel('Number of time points')\n plt.ylim(0, max(volume_list))\n plt.ylabel('Daily traded volume')\n plt.grid()\n plt.show()\n plt.clf()\n #plt.plot(trade_cash_list)\n plt.hist(trade_cash_list, bins = 50)\n plt.grid()\n plt.show()",
"def read_freq(bfile, plinkexe, freq_threshold=0.1, maxmem=1700, threads=1):\n high = 1 - freq_threshold\n low = freq_threshold\n if not os.path.isfile('%s.frq.gz' % bfile):\n nname = os.path.split(bfile)[-1]\n frq = ('%s --bfile %s --freq gz --keep-allele-order --out %s --memory '\n '%d --threads %d')\n line = frq % (plinkexe, bfile, nname, maxmem, threads)\n o, e = executeLine(line)\n frq = pd.read_table('%s.frq.gz' % nname, delim_whitespace=True)\n else:\n frq = pd.read_table('%s.frq.gz' % bfile, delim_whitespace=True)\n # filter MAFs greater than 1 - freq_threshold and smaller than freq_threshold\n return frq[(frq.MAF < high) & (frq.MAF > low)]",
"def compute_highs(context):\n if context.is_debug:\n start_time = time()\n \n for market in context.prices.items:\n context.twenty_day_high[market] = context\\\n .prices[market]\\\n .high[-context.twenty_day_breakout-1:-1]\\\n .max()\n context.fifty_five_day_high[market] = context\\\n .prices[market]\\\n .high[-context.fifty_five_day_breakout-1:-1]\\\n .max()\n \n if context.is_test:\n assert(len(context.twenty_day_high) > 0)\n assert(len(context.fifty_five_day_high) > 0)\n \n if context.is_debug:\n time_taken = (time() - start_time) * 1000\n log.debug('Executed in %f ms.' % time_taken)\n assert(time_taken < 1024)",
"def test_lowest_price_many_listings(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_MANY_RESULTS))\n self.assertEqual('0,03€', listings)",
"def stock_market(no_profiles: int) -> tuple:\n all_companies = []\n Stocks = namedtuple(\"Stocks\", 'name symbol open high close company_weight')\n MkValue_ = random.uniform(1000, 50000, 100)\n wts_ = random.uniform(0, 1, 100)\n wts_ = wts_/sum(wts_)\n\n for _ in range(100):\n name = fake.company()\n open_ = round(MkValue_[_]*wts_[_],2)\n close = round(open_ * random.uniform(0.7, 1.15), 2)\n high = round(open_ * random.uniform(0.85, 1.15), 2)\n if high < open_:\n high = open_\n if high < close:\n high = close\n\n all_companies.append(\n Stocks(name=name, symbol=symbol(name), open=open_, high=round(high, 2), close=round(close, 2), company_weight=round(wts_[_], 4)))\n\n stock_index = round(\n sum(x.open * x.company_weight for x in all_companies), 4)\n highest_for_day = round(\n sum(x.high * x.company_weight for x in all_companies), 2)\n lowest_close_for_day = round(\n sum(x.close * x.company_weight for x in all_companies), 2)\n\n # print(f\"\\n------------------------------------Top 100 listed companies on Fake Stock Exchange------------------------------------\")\n # [print(x) for x in sorted(all_companies, key=lambda x:x.symbol)]\n # print(f\"\\n--------------Main details on {date.today()}--------------\")\n # print(f\"\\nStart of the day: {stock_index}\")\n # print(f\"Highest for the day: {highest_for_day}\")\n # print(f\"Lowest close for the day: {lowest_close_for_day}\")\n return sorted(all_companies, key=lambda x: x.symbol), stock_index, highest_for_day, lowest_close_for_day",
"def getStockPrices(ticker, frequency=\"monthly\", update=False):\n name = ticker + \"_\" + frequency # Name of data in cache\n prices = None\n # If there is no cached version of the pickle, or update flag is on, download price data and cache it\n if update or not util.pickleExists(name):\n try:\n prices = Quandl.get(STOCK_DATA_SOURCE + ticker, collapse=frequency, authtoken=\"xx_T2u2fsQ_MjyZjTb6E\")\n util.savePickle(prices, name)\n # Catch various connection errors\n except:\n return -1\n # Otherwise, use most recent cache entry\n else:\n prices = util.getMostRecentPickle(name)\n\n # Return closing prices\n return prices.get(\"Close\")",
"def get_most_and_least_expensive_high_review_product(df):\n try:\n df3 = merge_metadata(df)\n product_filter = df3['overall'] >= 4.0\n high_reviewed_products = df3[product_filter]\n # print high_reviewed_products[:10]\n # The data contained NaN so we use the nanmax/min funtions to get max/min\n most_exp = round(np.nanmax(high_reviewed_products['price'])[0], 2)\n least_exp = round(np.nanmin(high_reviewed_products['price'])[0], 2)\n\n most_exp_prod = df3.loc[df3['price'] == most_exp, 'asin'].iloc[0]\n least_exp_prod = df3.loc[df3['price'] == least_exp, 'asin'].iloc[0]\n write_text_tofile(\"Most Expensive Product: \" + str(most_exp_prod) + \", Price: \" + str(most_exp))\n write_text_tofile(\"Least Expensive Product: \" + str(least_exp_prod) + \", Price: \" + str(least_exp))\n return {most_exp_prod: most_exp, least_exp_prod: least_exp}\n except Exception as e:\n print \"Error getting most and least expensive high review product\"\n print str(e)\n pass",
"def sorted_fruit_quantity(f):\n # skip the header of the file\n move_cursor(f)\n # put all the quantities into a list\n # expected output: [5, 10, 3, 15]\n # read the file line by line\n output = []\n for line in f:\n line_list = line.split() # [\"Apple\",\"5\"]\n output.append(int(line_list[1]))\n # sort the list in descending order\n # expected output: [15, 10, 5, 3]\n output.sort(reverse=True)\n # only select the highest two quantities in the list and return them\n # expected output: [15, 10]\n # slicing\n # Hint: ending pos is the index of the first element that I don't want to include\n # in the final result\n return output[0:2]",
"def main():\n # store config deets\n tp = config['TRADE_OPTIONS']['TP']\n sl = config['TRADE_OPTIONS']['SL']\n enable_tsl = config['TRADE_OPTIONS']['ENABLE_TSL']\n tsl = config['TRADE_OPTIONS']['TSL']\n ttp = config['TRADE_OPTIONS']['TTP']\n pairing = config['TRADE_OPTIONS']['PAIRING']\n qty = config['TRADE_OPTIONS']['QUANTITY']\n frequency = config['TRADE_OPTIONS']['RUN_EVERY']\n test_mode = config['TRADE_OPTIONS']['TEST']\n\n all_coins = get_all_coins()\n coin_seen_dict = generate_coin_seen_dict(all_coins)\n\n while True:\n try:\n\n # check if the order file exists and load the current orders\n # basically the sell block and update TP and SL logic\n if os.path.isfile('order.json'):\n order = load_order('order.json')\n\n for coin in list(order):\n\n # store some necesarry trade info for a sell\n stored_price = float(order[coin]['price'])\n coin_tp = order[coin]['tp']\n coin_sl = order[coin]['sl']\n volume = order[coin]['volume']\n symbol = coin.split(pairing)[0]\n\n\n last_price = get_price(symbol, pairing)\n\n # update stop loss and take profit values if threshold is reached\n if float(last_price) > stored_price + (stored_price*coin_tp /100) and enable_tsl:\n # increase as absolute value for TP\n new_tp = float(last_price) + (float(last_price)*ttp /100)\n # convert back into % difference from when the coin was bought\n new_tp = float( (new_tp - stored_price) / stored_price*100)\n\n # same deal as above, only applied to trailing SL\n new_sl = float(last_price) - (float(last_price)*tsl /100)\n new_sl = float((new_sl - stored_price) / stored_price*100)\n\n # new values to be added to the json file\n order[coin]['tp'] = new_tp\n order[coin]['sl'] = new_sl\n store_order('order.json', order)\n\n print(f'updated tp: {round(new_tp, 3)} and sl: {round(new_sl, 3)}')\n\n # close trade if tsl is reached or trail option is not enabled\n elif float(last_price) < stored_price - (stored_price*sl /100) or float(last_price) > stored_price + (stored_price*tp /100) and not enable_tsl:\n\n try:\n\n # sell for real if test mode is set to false\n if not test_mode:\n sell = create_order(coin, coin['volume'], 'SELL')\n\n\n print(f\"sold {coin} at {(float(last_price) - stored_price) / float(stored_price)*100}\")\n\n # remove order from json file\n order.pop(coin)\n store_order('order.json', order)\n\n except Exception as e:\n print(e)\n\n # store sold trades data\n else:\n if os.path.isfile('sold.json'):\n sold_coins = load_order('sold.json')\n\n else:\n sold_coins = {}\n\n if not test_mode:\n sold_coins[coin] = sell\n store_order('sold.json', sold_coins)\n else:\n sold_coins[coin] = {\n 'symbol':coin,\n 'price':last_price,\n 'volume':volume,\n 'time':datetime.timestamp(datetime.now()),\n 'profit': float(last_price) - stored_price,\n 'relative_profit': round((float(last_price) - stored_price) / stored_price*100, 3)\n }\n\n store_order('sold.json', sold_coins)\n\n else:\n order = {}\n\n # check if new coins are listed\n new_coins = get_new_coins(coin_seen_dict)\n\n # the buy block and logic pass\n if len(new_coins) > 0:\n\n print(f'New coins detected: {new_coins}')\n\n for coin in new_coins:\n\n # buy if the coin hasn't already been bought\n if coin['symbol'] not in order and pairing in coin['symbol']:\n symbol_only = coin['symbol'].split(pairing)[0]\n print(f\"Preparing to buy {coin['symbol']}\")\n\n price = get_price(symbol_only, pairing)\n volume = convert_volume(coin['symbol'], qty, price)\n\n try:\n # Run a test trade if true\n if config['TRADE_OPTIONS']['TEST']:\n order[coin['symbol']] = {\n 'symbol':symbol_only+pairing,\n 'price':price,\n 'volume':volume,\n 'time':datetime.timestamp(datetime.now()),\n 'tp': tp,\n 'sl': sl\n }\n\n print('PLACING TEST ORDER')\n\n # place a live order if False\n else:\n order[coin['symbol']] = create_order(symbol_only+pairing, volume, 'BUY')\n order[coin['symbol']]['tp'] = tp\n order[coin['symbol']]['sl'] = sl\n\n except Exception as e:\n print(e)\n\n else:\n print(f\"Order created with {volume} on {coin['symbol']}\")\n\n store_order('order.json', order)\n else:\n print(f\"New coin detected, but {coin['symbol']} is currently in portfolio, or {pairing} does not match\")\n\n else:\n pass\n\n except Exception as e:\n print(e)",
"def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)",
"def test_get_result_top_file(self):\n pass",
"def read_and_count(counter, filename, max_lines):\n # list and counter for baskets\n baskets = []\n n_baskets = 0\n # open file and read lines\n with open(filename) as fp:\n # loop through every line in file\n for line in fp:\n # break if reached % of file needed\n if n_baskets >= max_lines:\n break\n # remove trailing char and split by space to get list of numbers\n line = line.rstrip().split(\" \")\n # create basket from line\n basket = MiningLibrary.basket_to_int(line)\n # add basket to list\n baskets.append(basket)\n # update counter occurence for count of single items\n counter = MiningLibrary.count_single(counter, basket)\n # increment counter\n n_baskets += 1\n # return the baskets and updated counter\n return (counter, n_baskets, baskets)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True if expires is not equal to orig_expires.
|
def updated(self):
return self.expires != self.orig_expires
|
[
"def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])\n assert (exp - lm).seconds == 3600",
"def is_expired(self):\n if not jwt_oauth2_settings.PUBLIC_KEY_EXPIRE_DAYS:\n return False\n else:\n delta = timezone.now() - self.pub_key_last_updated\n return delta.days >= jwt_oauth2_settings.PUBLIC_KEY_EXPIRE_DAYS",
"def __isAbsExpired(self):\n return self.abs_exp_date<time.time()",
"def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False",
"def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True",
"def is_token_expiring(self) -> bool:\n token_total_lifetime: timedelta = datetime.utcnow() - self.oauth.access_token_received_datetime\n token_updated_expires_in: int = self.oauth.access_token_expires_in_seconds - token_total_lifetime.seconds\n return False if token_updated_expires_in > self.refresh_token_safe_delta else True",
"def _about_to_expire(self, secret: Secret) -> bool:\n return secret.is_expired(datetime.now(UTC) + self.expiry_margin)",
"def has_expired(self):\n return datetime.datetime.now() > self._cert.not_valid_after",
"def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()",
"def is_expired(self):\n return self.x509.has_expired()",
"def IsAbandoned(self):\r\n return self.expiration is not None and self.expiration <= time.time()",
"def is_expired(self):\n try:\n _ = self == self # This calls __eq__ and fails if the slot has expired\n return False\n except ReferenceError:\n return True",
"def is_expired(self):\n return int(time.time()) - self.time > self.interval",
"def _has_expired(self):\n try:\n expires = datetime.fromtimestamp(\n os.stat(self.lockfile).st_mtime\n )\n except OSError as e:\n if e in self.NOT_EXIST_ERRORS:\n return False\n raise\n return datetime.now() > expires",
"def _has_expired(self):\r\n expired = False\r\n if hasattr(self, 'Expiration'):\r\n now = datetime.datetime.utcnow()\r\n expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')\r\n expired = (now >= expiration)\r\n else:\r\n raise ValueError(\"ERROR: Request for expired property, but no Expiration in HIT!\")\r\n return expired",
"def _is_file_expired(self, filename):\n if not os.path.exists(filename):\n return True\n mtime = os.stat(filename).st_mtime\n expire_time = mtime + self._cache_ttl\n now = time.time()\n return expire_time < now",
"def is_access_expired(self) -> bool:\n entitlement_contract = self.cfg.entitlements.get(self.name, {})\n # TODO(No expiry per resource in MVP yet)\n expire_str = entitlement_contract.get('expires')\n if not expire_str:\n return False\n expiry = datetime.strptime(expire_str, '%Y-%m-%dT%H:%M:%S.%fZ')\n if expiry >= datetime.utcnow():\n return False\n return True",
"def test_expires_soon(self):\n now = timezone.now()\n window = SparkSettings().RENEW_TOKEN_WINDOW\n cur = self.factory.build(access_token='good',\n expires_at=now + timedelta(seconds=window*2))\n exp = self.factory.build(access_token='expired',\n expires_at=now + timedelta(seconds=window/2))\n self.assertFalse(cur.expires_soon())\n self.assertTrue(exp.expires_soon())",
"def is_valid(self):\n return self.is_signed and not self.is_expired"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Called to move/remove one item. Returns True if the item was purged, False if it was moved to self.new_expiry.
|
def remove_one(self):
item = self.expiry.pop(0)
if item.updated:
self.new_expiry.append(item)
return
del self.index[item.target]
return
|
[
"def consume(self, item, index):\n\n slot = self.holdables[index]\n\n # Can't really remove things from an empty slot...\n if slot is None:\n return False\n\n if slot.holds(item):\n self.holdables[index] = slot.decrement()\n return True\n\n return False",
"def remove(self, item):\n with self.mutex:\n if item not in self.list:\n return False\n\n self.list.remove(item)\n return True",
"def remove(self, item):\n try:\n self.pool.remove(item)\n except KeyError:\n return False\n return True",
"def pop(self) -> bool:\n if len(self) == 0:\n return False\n self.pop_item()\n return True",
"def _has_expired(self, item):\n with self.lock:\n if item.ttl != 0:\n if time() > item.timestamp + item.ttl:\n print(\"Object %d has expired and will be removed from cache\", self.info(item.name))\n logging.debug(\"Object %s has expired and will be removed from cache\", self.info(item.name) )\n self._cache.pop(item.name)\n return True\n return False",
"def remove(self,item):\r\n raise AbstractError\r\n return False",
"def drop(self, item: Item) -> bool:\n if item in self.bag:\n self.__bag.remove(item)\n self.room._add_item(item)\n return True\n return False",
"def is_expired(self, key, now=None, remove=False):\n with self._lock:\n if now is None:\n now = time.time()\n # pylint: disable=unused-variable\n expire, _value = self._values[key]\n if expire is None:\n return False\n expired = expire < now\n if expired and remove:\n self.__delitem__(key)\n return expired",
"def pickup(self, item: Item) -> bool:\n if len(self.bag) >= 5:\n return False\n\n if self.__room._take(item):\n self.__bag.append(item)\n return True\n\n raise Exception(f\"{item} was not found in {self.room}\")",
"def _remove_expired(self):\n with self.__lock:\n is_changed = False\n for k in list(self._d.keys()):\n if self._d[k].is_expired():\n log.debug(\"removing expired item: {}\".format(self._d[k]))\n del self[k]\n is_changed = True\n\n if (is_changed is True) and (self.is_persistent):\n # save changed cache file\n self.save()",
"def keep_item(self, content_item):\n return self._content_item_comparison_weak(\n content_item, self.touch_content_item\n )",
"def delete_item(self, key):\n self.is_empty = True if len(self.dict) is 1 else False\n return self.dict.pop(key)",
"def purging() -> bool:\r\n return _purge",
"def _expire_item(self, key):\n (timeout, callback) = self._timeouts[key]\n now = time.time()\n if timeout <= now:\n item = dict.pop(self, key)\n del self._timeouts[key]\n if callback:\n try:\n callback(key, item)\n except TypeError:\n try:\n callback(key)\n except TypeError:\n callback()\n return None\n else:\n return timeout - now",
"def expire(self, current_time=None):\r\n if not self._queue:\r\n return\r\n\r\n if current_time is None:\r\n current_time = time()\r\n\r\n while self._queue:\r\n # Top most item is not expired yet\r\n top = self._queue[0]\r\n\r\n # Early exit if item was not promoted and its expiration time\r\n # is greater than now.\r\n if top.promoted is None and top.expiry_date > current_time:\r\n break\r\n\r\n # Pop item from the stack\r\n top = heappop(self._queue)\r\n\r\n need_reschedule = (top.promoted is not None\r\n and top.promoted > current_time)\r\n\r\n # Give chance to reschedule\r\n if not need_reschedule:\r\n top.promoted = None\r\n top.on_delete(False)\r\n\r\n need_reschedule = (top.promoted is not None\r\n and top.promoted > current_time)\r\n\r\n # If item is promoted and expiration time somewhere in future\r\n # just reschedule it\r\n if need_reschedule:\r\n top.expiry_date = top.promoted\r\n top.promoted = None\r\n heappush(self._queue, top)\r\n else:\r\n del self._items[top.session_id]",
"def inventory_remove(self, item):\n if (item in self.ItemList):\n self.ItemList.remove(item)\n return 0\n # Item not found.\n return 1",
"def delete_entry(self, key):\n cached_entry = self._container.get(key, None)\n if not cached_entry:\n return False\n del self._container[key]\n return not cached_entry.is_expired",
"def _expire_item(self, key):\r\n (timeout, callback) = self._timeouts[key]\r\n now = time.time()\r\n if timeout <= now:\r\n item = dict.pop(self, key)\r\n del self._timeouts[key]\r\n if callback:\r\n try:\r\n callback(key, item)\r\n except TypeError:\r\n try:\r\n callback(key)\r\n except TypeError:\r\n callback()\r\n return None\r\n else:\r\n return timeout - now",
"def remove(self, item: T):\n if item in self.queue:\n del self.queue[item]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Purge stuff from the cache which is expired/oldest. Stuff is purged which is older than TTL or if the total number of entries is in excess of MAX_ASSOCS.
|
def purge(self):
if not self.index:
return
now = time()
while self.expiry[0].orig_expires <= now or len(self.index) > MAX_ASSOCS:
self.remove_one()
if not self.expiry:
if not self.index:
return
self.rotate_lists()
return
|
[
"def _clean_cache(self):\r\n query = _AppEngineUtilities_Cache.all()\r\n query.filter('timeout < ', datetime.datetime.now())\r\n results = query.fetch(self.max_hits_to_clean)\r\n db.delete(results)\r\n #for result in results:\r\n # result.delete()\r",
"def purge(self, key, ttl):\n mors_to_purge = []\n now = time.time()\n with self._mor_lock:\n # Don't change the dict during iteration!\n # First collect the names of the Mors to remove...\n for name, mor in iteritems(self._mor[key]):\n age = now - mor['creation_time']\n if age > ttl:\n mors_to_purge.append(name)\n\n # ...then actually remove the Mors from the cache.\n for name in mors_to_purge:\n del self._mor[key][name]",
"def purgeExpiredRecords(self):\n if hasattr(self, \"_test_time\"):\n now = self._test_time\n else:\n now = time.time()\n\n for indexType in self._cache:\n for key, (cachedTime, _ignore_record) in self._cache[indexType].items():\n if now - self._expireSeconds > cachedTime:\n del self._cache[indexType][key]",
"def cache_clean(self):\n\t\tnow = time.time()\n\t\tkeys_for_removal = collections.deque()\n\t\tfor key, (_, expiration) in self.__cache.items():\n\t\t\tif expiration < now:\n\t\t\t\tkeys_for_removal.append(key)\n\t\tfor key in keys_for_removal:\n\t\t\tdel self.__cache[key]",
"def clean_up(cls):\n for s,c in list(cls.section_cache.items()):\n for id in list(c.keys()):\n if cls._is_expired(s, id):\n cls._expire(c, s, c[id])\n log('Cleaned expired slates')",
"def purge_expired (aging_hash, interval=aging_hash_interval):\n\n expired = []\n for k, v in aging_hash.items():\n set_time = v[0]\n if (time.time() - set_time) > aging_hash_interval:\n expired.append(k)\n for ex_k in expired:\n del aging_hash[ex_k]",
"def purgeExpiredRequests( self ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE ExpirationTime < UTC_TIMESTAMP()\"\n return self._update( cmd )",
"def _prune_cache(self):\n default_expiry = datetime.datetime.utcnow() - datetime.timedelta(minutes=self.cache_resources_for)\n for resource_id, resource in self.local_resource_status.items():\n if 'cache_until' in resource:\n if datetime.datetime.utcnow() > resource['cache_until']:\n self._delete_cache(resource_id)\n elif resource['last_accessed'] < default_expiry:\n self._delete_cache(resource_id)",
"def clean_cache(self):\n current_time = time.time()\n query = {'das.expire': { '$lt':current_time} }\n self.logdb.insert('merge', {'delete': self.merge.find(query).count()})\n self.merge.remove(query)\n self.logdb.insert('cache', {'delete': self.col.find(query).count()})\n self.col.remove(query)",
"def _remove_expired(self):\n with self.__lock:\n is_changed = False\n for k in list(self._d.keys()):\n if self._d[k].is_expired():\n log.debug(\"removing expired item: {}\".format(self._d[k]))\n del self[k]\n is_changed = True\n\n if (is_changed is True) and (self.is_persistent):\n # save changed cache file\n self.save()",
"def clean_cache(self):\n timer = Timer()\n entries = []\n for file_in_cache in self.find_archives():\n cache_metadata = self.read_metadata(file_in_cache)\n last_accessed = cache_metadata.get('last-accessed', 0)\n entries.append((last_accessed, file_in_cache))\n to_remove = sorted(entries)[:-self.cache_limit]\n if to_remove:\n for last_used, file_in_cache in to_remove:\n logger.debug(\"Removing archive from cache: %s\", file_in_cache)\n metadata_file = self.get_metadata_file(file_in_cache)\n self.context.execute('rm', '-f', file_in_cache, metadata_file)\n logger.verbose(\"Took %s to remove %s from cache.\",\n timer, pluralize(len(to_remove), \"archive\"))\n else:\n logger.verbose(\"Wasted %s checking whether cache needs to be cleaned (it doesn't).\", timer)",
"def purge_nonupdated():\n global last_purge\n \n print \"Starting purge.\"\n \n todel = []\n for uid, session in session_dict.iteritems():\n delta = datetime.datetime.now() - session.last_update\n if (delta.days * 86400 + delta.seconds) > PURGE_THRESHOLD:\n todel.append(uid)\n models.DBSession.objects.get(id=session.dbid).delete()\n \n counter = 0\n for key in todel:\n \n del session_dict[key]\n counter += 1\n print \"Purged \" + str(counter) + \" sessions.\"\n last_purge = datetime.datetime.now()",
"def cull(self):\n self.lock.acquire()\n try:\n #remove dead references from the expired cache\n keys = self.expiredCache.keys()\n for key in keys:\n if self.expiredCache[key]() is None:\n self.expiredCache.pop(key, None)\n\n keys = self.cache.keys()\n for i in xrange(self.cullOffset, len(keys), self.cullFraction):\n id = keys[i]\n # create a weakref, then remove from the cache\n obj = ref(self.cache[id])\n del self.cache[id]\n\n #the object may have been gc'd when removed from the cache\n #above, no need to place in expiredCache\n if obj() is not None:\n self.expiredCache[id] = obj\n # This offset tries to balance out which objects we\n # expire, so no object will just hang out in the cache\n # forever.\n self.cullOffset = (self.cullOffset + 1) % self.cullFraction\n finally:\n self.lock.release()",
"def garbage_collect(cls, days=None):\n days = (\n getattr(settings, \"WAGTAILSEARCH_HITS_MAX_AGE\", 7) if days is None else days\n )\n min_date = timezone.now().date() - datetime.timedelta(days)\n\n cls.objects.filter(date__lt=min_date).delete()",
"def purge_cache(max_age, cache_dir):\r\n\r\n for path in _find_collection_dirs_to_purge(get_collection_dirs(cache_dir), time.time() - max_age):\r\n _rmtree(path)",
"def expireAll(self):\n if not self.doCache:\n return\n self.lock.acquire()\n try:\n for key, value in self.cache.items():\n self.expiredCache[key] = ref(value)\n self.cache = {}\n finally:\n self.lock.release()",
"def clean_local_cache(self):\n to_expire = []\n now = int(time())\n\n try:\n for k, (_, _, grace) in six.iteritems(self._local_cache):\n if now > grace:\n to_expire.append(k)\n except RuntimeError:\n # It's possible for the dictionary to be mutated in another thread\n # while iterating, but this case is rare, so instead of making a\n # copy and iterating that, it's more efficient to just let it fail\n # gracefully. It'll just get re-run later.\n return\n\n for k in to_expire:\n try:\n del self._local_cache[k]\n except KeyError:\n # This could only exist in a race condition\n # where another thread has already deleted this key,\n # but we'll guard ourselves against it Justin Case.\n pass",
"def _purge_old(self):\n now = dt_util.utcnow()\n\n _LOGGER.debug(\n \"%s: purging records older then %s(%s)\",\n self.entity_id,\n dt_util.as_local(now - self._samples_max_age),\n self._samples_max_age,\n )\n\n while self.ages and (now - self.ages[0]) > self._samples_max_age:\n _LOGGER.debug(\n \"%s: purging record with datetime %s(%s)\",\n self.entity_id,\n dt_util.as_local(self.ages[0]),\n (now - self.ages[0]),\n )\n self.ages.popleft()\n self.states.popleft()",
"def purge(self, age_in_days=30, max_rows=100):\n self._purge(age_in_days, max_rows)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get common letters of two words
|
def get_common_letters(word1: str, word2: str) -> str:
common = ''
for x, y in zip(word1, word2):
if x == y:
common += x
return common
|
[
"def common_chars(string1, string2):\n \n common = Counter(string1.casefold()) & Counter(string2.casefold())\n return sum(common.values())",
"def num_common_letters(goal_word, guess):\n \"*** YOUR CODE HERE ***\"\n a=get_list(goal_word)\n b=get_list(guess)\n i=0\n num=0\n while i<len(a):\n j=0\n while j<len(b):\n if a[i]==b[j]:\n k=0\n judge=0\n while k<j:\n if b[k]==b[j]:\n judge=1\n k=k+1\n if judge==0:\n num=num+1\n j=j+1\n i=i+1\n return num",
"def words_in_common(words1, words2):\n\n # TODO: replace this with your code",
"def num_common_letters(goal_word, guess):\n \"*** YOUR CODE HERE ***\"\n non_repeated, result = [], 0\n guess_letters, goal = get_list(guess), get_list(goal_word)\n #this function gets rid of the repeated letters in guess\n for letter in range (0, len(guess_letters)):\n if not guess_letters[letter] in non_repeated:\n non_repeated += guess_letters[letter]\n letter += 1\n\n #Goes through the list goal to see if each letter in non_repeated is present\n #If so, it adds 1 to result \n for letter in range(0, len(non_repeated)):\n for l in range(0,len(goal)): \n if non_repeated[letter] == goal[l]:\n result += 1\n l+= 1\n letter += 1\n return result",
"def words_in_both(first_word, second_word):\n common_words = set() # Empty set to mutate\n first_word_list = first_word.split() # Split both string inputs\n second_word_list = second_word.split()\n\n lower_first_list = [x.lower() for x in first_word_list] # Ensures both are lowercase\n lower_second_list = [x.lower() for x in second_word_list]\n\n for word in lower_first_list: # iterates through every word in the first list\n if word in lower_second_list: # checks if word is same in second list\n common_words.add(word) # add common word to the empty set\n return common_words",
"def get_shared_prefix(word1: str, word2: str) -> str:\n shared_prefix = \"\"\n for char1, char2 in zip(word1, word2):\n if char1 == char2:\n shared_prefix += char1\n else:\n break\n return shared_prefix",
"def uses_only(word1,word2):\n for letter in word1:\n if not letter in word2: return False\n return True",
"def find_overlapping_letters(a: str, b: str) -> str:\n best = \"\"\n for i in range(len(a) - 1, -1, -1):\n substr = a[i:]\n if b.find(substr) == 0 and len(substr) > len(best):\n best = substr\n \n return best",
"def uses_all(word1, word2):\n for letter in word2:\n if not letter in word1: return False\n return True",
"def shared_words(text1, text2):\r\n\r\n list1 = tokenize(text1.strip(' '))\r\n list2 = tokenize(text2.strip(' '))\r\n\r\n list3 = set(list1) & set(list2)\r\n list3.remove(' ');\r\n\r\n return list3",
"def common_words(first, second):\n\n # Split the strings into lists of words\n first_words = first.split(',')\n second_words = second.split(',')\n\n duplicate_words = []\n\n # Check if there are duplicate words in the lists\n for item in first_words:\n if item in second_words:\n duplicate_words.append(item) # Create a list of the duplicate words\n\n result = ','.join(sorted(duplicate_words))\n\n if len(duplicate_words) == 0:\n print \"There are no common words in the two strings.\"\n\n return result",
"def find_common_words(set1, set2, common):\n for w1 in set1:\n for w2 in set2:\n if w1[0] == w2[0]:\n common.append(w1[0])\n break",
"def get_alphanumeric_intersection(set1, set2):\n set1_clean = [x.upper() for x in set1]\n set2_clean = [x.upper() for x in set2]\n set1_clean = set([re.sub('[\\W_]', '', x) for x in set1_clean])\n set2_clean = set([re.sub('[\\W_]', '', x) for x in set2_clean])\n return set.intersection(set1_clean, set2_clean)",
"def commonCharacterCount(s1, s2):\n return sum(min(s1.count(x),s2.count(x)) for x in set(s1))",
"def alphabetical_diff(c1, c2):\r\n return abs(string.ascii_lowercase.index(c1) - string.ascii_lowercase.index(c2))",
"def equal_words(word1, word2):\n word1 = word1.lower()\n word2 = word2.lower()\n return remove_polish_characters(word1) == remove_polish_characters(word2)",
"def word_difference(word1,word2):\n assert len(word1) == len(word2)\n\n count = 0 \n for c1, c2 in zip(word1, word2):\n if c1 != c2:\n count +=1\n return count",
"def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score",
"def difference(word1: str, word2: str):\n\n count = 0\n for c1, c2 in zip(word1, word2):\n if c1 != c2:\n count += 1\n return count"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the filtering for a plan by its metal level to only match silver level plans.
|
def test_filtering_plans_by_metal_level_matches_only_silver(self):
silver_plan_inputs = [
{
'plan_id': '05276NA2900195',
'state': 'MI',
'metal_level': 'Silver',
'rate': '283.39',
'rate_area': '1'
},
{
'plan_id': '05276NA2900195',
'state': 'MI',
'metal_level': 'silver',
'rate': '283.39',
'rate_area': '1'
}
]
non_silver_plan_inputs = [
{
'plan_id': '68493CI1477769',
'state': 'SC',
'metal_level': 'Bronze',
'rate': '214.57',
'rate_area': '21'
},
{
'plan_id': '09812TP4606635',
'state': 'NV',
'metal_level': 'Platinum',
'rate': '331.363599',
'rate_area': '1'
},
{
'plan_id': '11698OD6718414',
'state': 'SC',
'metal_level': 'Gold',
'rate': '269.54',
'rate_area': '8'
},
{
'plan_id': '70547DK6596753',
'state': 'FL',
'metal_level': 'Catastrophic',
'rate': '241.1',
'rate_area': '57'
}
]
for silver_plan in silver_plan_inputs:
result = filter_plan_metal_level(silver_plan, DESIRED_METAL_LEVEL)
self.assertEqual(True, result)
for non_silver_plan in non_silver_plan_inputs:
result = filter_plan_metal_level(
non_silver_plan,
DESIRED_METAL_LEVEL
)
self.assertEqual(False, result)
|
[
"def test_instrument_inventory_filtering():\n filt = 'GR150R'\n data = mm.instrument_inventory('niriss',\n add_filters={'filter': filt},\n return_data=True)\n\n filters = [row['filter'] for row in data['data']]\n\n assert all([i == filt for i in filters])",
"def _check_filters(self, level):\n if(self.filters == Filters.NoFilter):\n return True\n else:\n return (self.filters & level.filters == 0)",
"def pollinated_filter(tile):\n current_sprite = tile.contains_sprite\n if current_sprite and current_sprite.type == \"plant\" and current_sprite.is_pollinated:\n return False\n else:\n return True",
"def filter_cap(stock):\n return stock['Class'] == 'Small'",
"def test_filters():\n\n csvpath =Path('../data/daily_rate_sheet.csv')\n bank_data = fileio.load_csv(csvpath)\n current_credit_score = 750\n debt = 1500\n income = 4000\n loan = 210000\n home_value = 250000\n\n monthly_debt_ratio = 0.375\n\n loan_to_value_ratio = 0.84\n\n filtered_data = max_loan_size.filter_max_loan_size(loan, bank_data)\n filtered_data = credit_score.filter_credit_score(current_credit_score, filtered_data)\n filtered_data = debt_to_income.filter_debt_to_income(monthly_debt_ratio, filtered_data) \n filtered_data = loan_to_value.filter_loan_to_value(loan_to_value_ratio, filtered_data)\n \n assert len(filtered_data) == 6",
"def test_get_rate_plan_by_product(self):\n pass",
"def _filter_level(self, min_level=1, max_level=20):\n low_level = self._data['level'] >= min_level\n high_level = self._data['level'] <= max_level\n return self._data[low_level & high_level]",
"def vol_filter(self,vol_series,threshold=\"25%\"):\n \n thres_vol = vol_series.describe()[threshold]\n valid_asset = list(vol_series[vol_series>=thres_vol].index)\n \n self.update_universe = valid_asset",
"def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r",
"def test_search_meal_plan_free(self):\n pass",
"def test_get_rate_plan_by_product_and_rate_plan(self):\n pass",
"def filter_out_reduced_healing(raw_heals):\n max_heal = max(raw_heals)\n threshold = 0.75 * max_heal\n\n selector = raw_heals > threshold\n\n return raw_heals[selector]",
"def filterLevelSlot(self, level, shown):\r\n\r\n if shown:\r\n self.model.removeFilter(level)\r\n else:\r\n self.model.addFilter(level)",
"def test_filter(self):\n fs = ProductFilterSet(data={'description': 'apple'})\n filtered_qs = fs.filter(self.qs)\n for product in filtered_qs:\n self.assertTrue('apple' in product.description.lower())",
"def use_block_quality(self, searcher, matcher=None):\n\n use = (self.usequality\n and not searcher.weighting.use_final\n and not self.should_add_all())\n if matcher:\n use = use and matcher.supports_block_quality()\n return use",
"def _filter(self, filter_condition):",
"def test_stealable(self):\r\n prod = Product(name='Test Product',\r\n weight=100, price=1,\r\n flammability=0.5)\r\n self.assertEqual(prod.stealability(), \"Not so stealable...\")",
"def _is_filter_match(self, arb_id):\n if not self.sw_filters:\n # Filtering done on HW or driver level or no filtering\n return True\n for can_filter in self.sw_filters:\n if not (arb_id ^ can_filter['can_id']) & can_filter['can_mask']:\n return True\n return False",
"def test_fails_filters_high_maf(self):\n \n # check th\n for pop in self.pops:\n var = self.var\n var.info[pop] = \"0.0101\"\n self.assertFalse(var.passes_filters())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that the zipcode data is cleaned properly and contains only unique rate areas.
|
def test_clean_zipcode_data_is_unique(self):
input = {
'11111': [('NY', '5')],
'22222': [('WI', '2')],
'33333': [('WI', '2'), ('NY', '5')],
'44444': [('WI', '2'), ('WI', '2')],
'55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],
'66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],
'77777': [
('WI', '2'),
('WI', '2'),
('NY', '5'),
('NY', '5'),
('CA', '7')
]
}
expected = {
'11111': [('NY', '5')],
'22222': [('WI', '2')],
'33333': [('WI', '2'), ('NY', '5')],
'44444': [('WI', '2')],
'55555': [('WI', '2'), ('NY', '5')],
'66666': [('WI', '2'), ('NY', '5')],
'77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]
}
cleaned_rate_areas = clean_zipcode_rate_areas(input)
# Compare each set of rate areas for every zipcode; sort the values to
# make sure we're comparing the data correctly.
for zipcode, rate_areas in cleaned_rate_areas.items():
self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))
|
[
"def test_zip_detail_bad(self):\n city, state = get_city_and_state('99990')\n self.assertEqual('', city)\n self.assertEqual('', state)",
"def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_validate_location_zip_alpha(self):\n answer = wunderground_scraper.validate_location('4OO65')\n self.assertEqual(answer, False)",
"def test_post_bad_zipcode(self):\n response = self._index()\n form = response.forms[0]\n # Fill in all required fields.\n form.fields['billing_amount'][0].value = '40.00 option 1'\n form.fields['billing_email'][0].value = 'foo@bar.com'\n form.fields['billing_name'][0].value = 'name o. card'\n form.fields['billing_street'][0].value = '123 fake st'\n form.fields['billing_zip'][0].value = '8230'\n form.fields['billing_card_number'][0].value = '5105105105105100'\n form.fields['billing_expiration_month'][0].value = '06'\n form.fields['billing_expiration_year'][0].value = EXP_YEAR\n # Submit it and check for errors.\n response = form.submit()\n form = response.forms[0]\n doc = HTML(response.body)\n # Check form-errors for presence of error text.\n form_errors = CSSSelector('#form-errors')(doc)\n assert len(form_errors) == 1\n assert form_errors[0].text != ''\n # Check ZIP code for presence of error text.\n zip_errors = CSSSelector('#billing_zip-errors')(doc)\n assert len(zip_errors) == 1\n assert zip_errors[0].text != ''",
"def test_search_zip_post_code(self):\n pass",
"def test_zip_detail_good(self):\n city, state = get_city_and_state('12550')\n self.assertEqual('Newburgh', city)\n self.assertEqual('NY', state)",
"def test_update_zip_post_code(self):\n pass",
"def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_clean_plan_rates_sorts_and_makes_data_unique(self):\n\n input = {\n ('IN', '1'): [\n '304.5',\n '422.28',\n '386.79',\n '382.7',\n '332.21',\n '422.28',\n '382.7'\n ],\n ('SD', '2'): [\n '279.4',\n '250.14',\n '270.13',\n '274.56',\n '247.67',\n '279.4',\n '270.13'\n ],\n ('FL', '63'): [\n '398.14',\n '330.9',\n '324.61',\n '398.14',\n '345.91',\n '214.32',\n '330.9'\n ],\n ('FL', '54'): [\n '428.03',\n '294.87',\n '339.6',\n '409.72',\n '294.44'\n ]\n }\n\n expected = {\n ('IN', '1'): [\n '304.5',\n '332.21',\n '382.7',\n '386.79',\n '422.28'\n ],\n ('SD', '2'): [\n '247.67',\n '250.14',\n '270.13',\n '274.56',\n '279.4'\n ],\n ('FL', '63'): [\n '214.32',\n '324.61',\n '330.9',\n '345.91',\n '398.14'\n ],\n ('FL', '54'): [\n '294.44',\n '294.87',\n '339.6',\n '409.72',\n '428.03'\n ]\n }\n\n cleaned_plan_data = clean_plan_rates(input)\n self.assertEqual(expected, cleaned_plan_data)",
"def test_valids(self):\n for postcode, expected_result in self.valid_postcodes:\n self.failUnlessEqual(validate_postcode(postcode), expected_result)",
"def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_city_no_zip(self):\n self.prep_location()\n self.loc.location_city = 'Fishkill'\n self.loc.save()\n # Creates coords if city or zip is populated.\n coords = GEOCODE_LOCATION.get_coordinate(location=self.loc)\n self.assertTrue(coords)",
"def _4_validate_zipCode_field(self):\n ###To check for the error message if Zip code is not entered.###\n driver.find_element_by_id(\"city\").send_keys(columns['city'][0])\n driver.find_element_by_id(\"zip_code\").clear()\n driver.find_element_by_id(\"btnSignUp\").click()\n print\"Verifying whether appropriate error message gets displayed if Zip code is not entered.\"\n errors = driver.find_elements_by_xpath(\"//html/body/div[3]/div[2]/div/div/form/div/div[6]/label[2]\")\n if errors != []:\n self.assertEqual(\"This field is required.\", str(errors[0].get_attribute('innerHTML')),\n \"Error message is not appropriate.\")\n print \"Appropriate error message is displayed.\"",
"def test_group_zips(database, monkeypatch):\n sess = database.session\n\n # Testing with a threshold of a simple majority\n test_threshold = 0.51\n monkeypatch.setattr(read_zips, 'MULTIPLE_LOCATION_THRESHOLD_PERCENTAGE', test_threshold)\n monkeypatch.setattr(load_location_data, 'MULTIPLE_LOCATION_THRESHOLD_PERCENTAGE', test_threshold)\n\n test_data = []\n\n # Only difference is the zip_last4, these will be merged together\n zip_same1 = Zips(zip5='12345', zip_last4='6789', state_abbreviation='VA', county_number='000',\n congressional_district_no='01')\n zip_same2 = Zips(zip5='12345', zip_last4='6780', state_abbreviation='VA', county_number='000',\n congressional_district_no='01')\n\n # Different states, same everything else\n zip_state1 = Zips(zip5='54321', zip_last4='6789', state_abbreviation='VA', county_number='000',\n congressional_district_no='01')\n zip_state2 = Zips(zip5='54321', zip_last4='6780', state_abbreviation='WA', county_number='000',\n congressional_district_no='01')\n\n # Different county codes, same everything else\n zip_county1 = Zips(zip5='11111', zip_last4='1111', state_abbreviation='VA', county_number='000',\n congressional_district_no='01')\n zip_county2 = Zips(zip5='11111', zip_last4='1112', state_abbreviation='VA', county_number='001',\n congressional_district_no='01')\n\n # Everything matches except for congressional district\n zip_cd1 = Zips(zip5='22222', zip_last4='2222', state_abbreviation='VA', county_number='000',\n congressional_district_no='01')\n zip_cd2 = Zips(zip5='22222', zip_last4='2223', state_abbreviation='VA', county_number='000',\n congressional_district_no='02')\n\n # Different states, different congressional district\n zip_state_cd1 = Zips(zip5='33333', zip_last4='3333', state_abbreviation='VA', county_number='000',\n congressional_district_no='01')\n zip_state_cd2 = Zips(zip5='33333', zip_last4='3334', state_abbreviation='WA', county_number='000',\n congressional_district_no='02')\n\n # Null congressional district\n zip_null_cd = Zips(zip5='44444', zip_last4='4444', state_abbreviation='WA', county_number='000',\n congressional_district_no=None)\n\n test_data.extend([zip_same1, zip_same2, zip_state1, zip_state2, zip_county1, zip_county2, zip_cd1, zip_cd2,\n zip_state_cd1, zip_state_cd2, zip_null_cd])\n\n # cd_state_grouped data - group by state, threshold overwritten to 100%\n # Split among 3, 01 having 66% => 90\n cd_state_grouped_thirds_1 = Zips(zip5='00000', zip_last4='0001', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_state_grouped_thirds_2 = Zips(zip5='00000', zip_last4='0002', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_state_grouped_thirds_3 = Zips(zip5='00000', zip_last4='0003', state_abbreviation='AA', county_number='000',\n congressional_district_no='02')\n cd_state_grouped_thirds_null = Zips(zip5='00000', zip_last4='0004', state_abbreviation='AA', county_number='000',\n congressional_district_no=None)\n # Split among 2, 50% => 90\n cd_state_grouped_half_1 = Zips(zip5='00001', zip_last4='0001', state_abbreviation='AB', county_number='000',\n congressional_district_no='01')\n cd_state_grouped_half_2 = Zips(zip5='00001', zip_last4='0002', state_abbreviation='AB', county_number='000',\n congressional_district_no='01')\n # Not split, 100% => 01\n cd_state_grouped_match = Zips(zip5='00002', zip_last4='0001', state_abbreviation='AC', county_number='000',\n congressional_district_no='01')\n test_data.extend([cd_state_grouped_match, cd_state_grouped_half_1, cd_state_grouped_half_2,\n cd_state_grouped_thirds_1, cd_state_grouped_thirds_2, cd_state_grouped_thirds_3,\n cd_state_grouped_thirds_null])\n\n # cd_zips_grouped data - group by state + zip\n # Split among 3, 01 having 66% => 01\n cd_zips_grouped_thirds_1 = Zips(zip5='00003', zip_last4='0001', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_zips_grouped_thirds_2 = Zips(zip5='00003', zip_last4='0002', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_zips_grouped_thirds_3 = Zips(zip5='00003', zip_last4='0003', state_abbreviation='AA', county_number='000',\n congressional_district_no='02')\n cd_zips_grouped_thirds_null = Zips(zip5='00003', zip_last4='0004', state_abbreviation='AA', county_number='000',\n congressional_district_no=None)\n # Split among 2, 50% => 90\n cd_zips_grouped_half_1 = Zips(zip5='00004', zip_last4='0001', state_abbreviation='AB', county_number='000',\n congressional_district_no='01')\n cd_zips_grouped_half_2 = Zips(zip5='00004', zip_last4='0002', state_abbreviation='AB', county_number='000',\n congressional_district_no='02')\n # Not split, 100% => 01\n cd_zips_grouped_match = Zips(zip5='00005', zip_last4='0001', state_abbreviation='AC', county_number='000',\n congressional_district_no='01')\n test_data.extend([cd_zips_grouped_match, cd_zips_grouped_half_1, cd_zips_grouped_half_2, cd_zips_grouped_thirds_1,\n cd_zips_grouped_thirds_2, cd_zips_grouped_thirds_3, cd_zips_grouped_thirds_null])\n\n # cd_city_grouped data - group by state + city\n # Split among 3, 01 having 66% => 01\n cd_city_grouped_thirds_1 = Zips(zip5='00006', zip_last4='0001', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_city_grouped_thirds_2 = Zips(zip5='00006', zip_last4='0002', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_city_grouped_thirds_3 = Zips(zip5='00006', zip_last4='0003', state_abbreviation='AA', county_number='000',\n congressional_district_no='02')\n cd_city_grouped_thirds_null = Zips(zip5='00006', zip_last4='0004', state_abbreviation='AA', county_number='000',\n congressional_district_no=None)\n cd_zip_city_1 = ZipCity(zip_code='00006', state_code='AA', city_name='Test City 1')\n # Split among 2, 50% => 90\n cd_city_grouped_half_1 = Zips(zip5='00007', zip_last4='0001', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_city_grouped_half_2 = Zips(zip5='00007', zip_last4='0002', state_abbreviation='AA', county_number='000',\n congressional_district_no='02')\n cd_zip_city_2 = ZipCity(zip_code='00007', state_code='AA', city_name='Test City 2')\n # Not split, 100% => 01\n cd_city_grouped_match = Zips(zip5='00008', zip_last4='0001', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_zip_city_3 = ZipCity(zip_code='00008', state_code='AA', city_name='Test City 3')\n test_data.extend([cd_city_grouped_match, cd_city_grouped_half_1, cd_city_grouped_half_2, cd_city_grouped_thirds_1,\n cd_city_grouped_thirds_2, cd_city_grouped_thirds_3, cd_zip_city_1, cd_zip_city_2, cd_zip_city_3,\n cd_city_grouped_thirds_null])\n\n # cd_county_grouped data - group by state + county\n # Split among 3, 01 having 66% => 01\n cd_county_grouped_thirds_1 = Zips(zip5='00009', zip_last4='0001', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_county_grouped_thirds_2 = Zips(zip5='00009', zip_last4='0002', state_abbreviation='AA', county_number='000',\n congressional_district_no='01')\n cd_county_grouped_thirds_3 = Zips(zip5='00009', zip_last4='0003', state_abbreviation='AA', county_number='000',\n congressional_district_no='02')\n cd_county_grouped_thirds_null = Zips(zip5='00009', zip_last4='0004', state_abbreviation='AA', county_number='000',\n congressional_district_no=None)\n # Split among 2, 50% => 90\n cd_county_grouped_half_1 = Zips(zip5='00010', zip_last4='0001', state_abbreviation='AA', county_number='001',\n congressional_district_no='01')\n cd_county_grouped_half_2 = Zips(zip5='00010', zip_last4='0002', state_abbreviation='AA', county_number='001',\n congressional_district_no='02')\n # Not split, 100% => 01\n cd_county_grouped_match = Zips(zip5='00011', zip_last4='0001', state_abbreviation='AA', county_number='002',\n congressional_district_no='01')\n test_data.extend([cd_county_grouped_match, cd_county_grouped_half_1, cd_county_grouped_half_2,\n cd_county_grouped_thirds_1, cd_county_grouped_thirds_2, cd_county_grouped_thirds_3,\n cd_county_grouped_thirds_null])\n\n sess.add_all(test_data)\n sess.commit()\n\n tables = [CDStateGrouped, CDZipsGrouped, CDCityGrouped, CDCountyGrouped, ZipsGrouped]\n tables = [table.__table__ for table in tables]\n # Creating the temp tables to use for testing\n sess.execute(\"\"\"\n CREATE TABLE temp_zips AS\n SELECT * FROM zips;\n CREATE TABLE temp_zip_city AS\n SELECT * FROM zip_city;\n \"\"\")\n for table in tables:\n sess.execute(f\"CREATE TABLE temp_{table} (LIKE {table} INCLUDING ALL);\")\n sess.commit()\n\n # Populate the tables\n generate_zips_grouped(sess)\n generate_cd_state_grouped(sess)\n generate_cd_zips_grouped(sess)\n generate_cd_county_grouped(sess)\n generate_cd_city_grouped(sess)\n\n # Moving into zips_grouped for easier parsing\n for table in tables:\n sess.execute(f\"\"\"\n INSERT INTO {table}\n SELECT *\n FROM temp_{table}\n \"\"\")\n sess.commit()\n\n # Combined first set of zips\n zips = sess.query(ZipsGrouped).filter_by(zip5=zip_same1.zip5).all()\n assert len(zips) == 1\n assert zips[0].zip5 == zip_same1.zip5\n assert zips[0].state_abbreviation == zip_same1.state_abbreviation\n assert zips[0].county_number == zip_same1.county_number\n assert zips[0].congressional_district_no == zip_same1.congressional_district_no\n\n # Different states, same everything else\n zips = sess.query(ZipsGrouped).filter_by(zip5=zip_state1.zip5).order_by(ZipsGrouped.state_abbreviation).all()\n assert len(zips) == 2\n assert zips[0].zip5 == zip_state1.zip5\n assert zips[0].state_abbreviation == zip_state1.state_abbreviation\n assert zips[0].county_number == zip_state1.county_number\n assert zips[0].congressional_district_no == zip_state1.congressional_district_no\n assert zips[1].zip5 == zip_state2.zip5\n assert zips[1].state_abbreviation == zip_state2.state_abbreviation\n assert zips[1].county_number == zip_state2.county_number\n assert zips[1].congressional_district_no == zip_state2.congressional_district_no\n\n # Different counties, same everything else\n zips = sess.query(ZipsGrouped).filter_by(zip5=zip_county1.zip5).order_by(ZipsGrouped.county_number).all()\n assert len(zips) == 2\n assert zips[0].zip5 == zip_county1.zip5\n assert zips[0].state_abbreviation == zip_county1.state_abbreviation\n assert zips[0].county_number == zip_county1.county_number\n assert zips[0].congressional_district_no == zip_county1.congressional_district_no\n assert zips[1].zip5 == zip_county2.zip5\n assert zips[1].state_abbreviation == zip_county2.state_abbreviation\n assert zips[1].county_number == zip_county2.county_number\n assert zips[1].congressional_district_no == zip_county2.congressional_district_no\n\n # Different congressional districts\n zips = sess.query(ZipsGrouped).filter_by(zip5=zip_cd1.zip5).all()\n assert len(zips) == 1\n assert zips[0].zip5 == zip_cd1.zip5\n assert zips[0].state_abbreviation == zip_cd1.state_abbreviation\n assert zips[0].county_number == zip_cd1.county_number\n assert zips[0].congressional_district_no == '90'\n\n # Different states, different congressional districts\n zips = sess.query(ZipsGrouped).filter_by(zip5=zip_state_cd1.zip5).order_by(ZipsGrouped.state_abbreviation).all()\n assert len(zips) == 2\n assert zips[0].zip5 == zip_state_cd1.zip5\n assert zips[0].state_abbreviation == zip_state_cd1.state_abbreviation\n assert zips[0].county_number == zip_state_cd1.county_number\n assert zips[0].congressional_district_no == '90'\n assert zips[1].zip5 == zip_state_cd2.zip5\n assert zips[1].state_abbreviation == zip_state_cd2.state_abbreviation\n assert zips[1].county_number == zip_state_cd2.county_number\n assert zips[1].congressional_district_no == '90'\n\n # Null congressional district\n zips = sess.query(ZipsGrouped).filter_by(zip5=zip_null_cd.zip5).all()\n assert len(zips) == 1\n assert zips[0].zip5 == zip_null_cd.zip5\n assert zips[0].state_abbreviation == zip_null_cd.state_abbreviation\n assert zips[0].county_number == zip_null_cd.county_number\n assert zips[0].congressional_district_no == '90'\n\n # CDStateGrouped\n # Thirds - threshold overwritten to 100%\n cds = sess.query(CDStateGrouped).filter_by(state_abbreviation=cd_state_grouped_thirds_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == '90'\n # Half\n cds = sess.query(CDStateGrouped).filter_by(state_abbreviation=cd_state_grouped_half_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == '90'\n # Match\n cds = sess.query(CDStateGrouped).filter_by(state_abbreviation=cd_state_grouped_match.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_state_grouped_match.congressional_district_no\n\n # CDZipsGrouped\n # Thirds\n cds = sess.query(CDZipsGrouped).filter_by(zip5=cd_zips_grouped_thirds_1.zip5,\n state_abbreviation=cd_zips_grouped_thirds_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_zips_grouped_thirds_1.congressional_district_no\n # Half\n cds = sess.query(CDZipsGrouped).filter_by(zip5=cd_zips_grouped_half_1.zip5,\n state_abbreviation=cd_zips_grouped_half_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == '90'\n # Match\n cds = sess.query(CDZipsGrouped).filter_by(zip5=cd_zips_grouped_match.zip5,\n state_abbreviation=cd_zips_grouped_match.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_state_grouped_match.congressional_district_no\n\n # CDCityGrouped\n # Thirds\n cds = sess.query(CDCityGrouped).filter_by(city_name=cd_zip_city_1.city_name,\n state_abbreviation=cd_city_grouped_thirds_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_city_grouped_thirds_1.congressional_district_no\n # Half\n cds = sess.query(CDCityGrouped).filter_by(city_name=cd_zip_city_2.city_name,\n state_abbreviation=cd_city_grouped_half_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == '90'\n # Match\n cds = sess.query(CDCityGrouped).filter_by(city_name=cd_zip_city_3.city_name,\n state_abbreviation=cd_city_grouped_match.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_city_grouped_match.congressional_district_no\n\n # CDCountyGrouped\n # Thirds\n cds = sess.query(CDCountyGrouped).filter_by(county_number=cd_county_grouped_thirds_1.county_number,\n state_abbreviation=cd_county_grouped_thirds_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_county_grouped_thirds_1.congressional_district_no\n # Half\n cds = sess.query(CDCountyGrouped).filter_by(county_number=cd_county_grouped_half_1.county_number,\n state_abbreviation=cd_county_grouped_half_1.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == '90'\n # Match\n cds = sess.query(CDCountyGrouped).filter_by(county_number=cd_county_grouped_match.county_number,\n state_abbreviation=cd_county_grouped_match.state_abbreviation).all()\n assert len(cds) == 1\n assert cds[0].congressional_district_no == cd_county_grouped_match.congressional_district_no",
"def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)",
"def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False",
"def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_zip_state_requirements(self):\n form_data = self.form_data(clear=['billing_state'])\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(billing_state='')\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(clear=['billing_zip'])\n form = DonationPaymentForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data = self.form_data(clear=['billing_state', 'billing_zip'])\n form_data['country'] = 'CAN'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests the plan ratea data is cleaned properly and is returned with sorted unique values for each rate area.
|
def test_clean_plan_rates_sorts_and_makes_data_unique(self):
input = {
('IN', '1'): [
'304.5',
'422.28',
'386.79',
'382.7',
'332.21',
'422.28',
'382.7'
],
('SD', '2'): [
'279.4',
'250.14',
'270.13',
'274.56',
'247.67',
'279.4',
'270.13'
],
('FL', '63'): [
'398.14',
'330.9',
'324.61',
'398.14',
'345.91',
'214.32',
'330.9'
],
('FL', '54'): [
'428.03',
'294.87',
'339.6',
'409.72',
'294.44'
]
}
expected = {
('IN', '1'): [
'304.5',
'332.21',
'382.7',
'386.79',
'422.28'
],
('SD', '2'): [
'247.67',
'250.14',
'270.13',
'274.56',
'279.4'
],
('FL', '63'): [
'214.32',
'324.61',
'330.9',
'345.91',
'398.14'
],
('FL', '54'): [
'294.44',
'294.87',
'339.6',
'409.72',
'428.03'
]
}
cleaned_plan_data = clean_plan_rates(input)
self.assertEqual(expected, cleaned_plan_data)
|
[
"def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))",
"def test_get_all_rate_plans(self):\n pass",
"def test_sorting_ascending_by_area():",
"def test_sorting_ascending_by_price_and_area():",
"def test_sorting_descending_by_area():",
"def test_sorting_descending_by_price_and_area():",
"def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_get_rate_plan_by_product_and_rate_plan(self):\n pass",
"def test_get_rate_plan_by_product(self):\n pass",
"def test_remove_taxation_strategy_from_rate_plan(self):\n pass",
"def test_filters():\n\n csvpath =Path('../data/daily_rate_sheet.csv')\n bank_data = fileio.load_csv(csvpath)\n current_credit_score = 750\n debt = 1500\n income = 4000\n loan = 210000\n home_value = 250000\n\n monthly_debt_ratio = 0.375\n\n loan_to_value_ratio = 0.84\n\n filtered_data = max_loan_size.filter_max_loan_size(loan, bank_data)\n filtered_data = credit_score.filter_credit_score(current_credit_score, filtered_data)\n filtered_data = debt_to_income.filter_debt_to_income(monthly_debt_ratio, filtered_data) \n filtered_data = loan_to_value.filter_loan_to_value(loan_to_value_ratio, filtered_data)\n \n assert len(filtered_data) == 6",
"def sort_by_area():\n # Create a list where index --> neuron and value --> area\n matched = [areas_from_channels[int(c)] for c in channels]\n # Find the indices (aka neurons) where they have a score < 2\n bad_indices = [i for i, score in enumerate(quality) if score[0] < 2]\n # Create a dictionary to sort neurons according to areas\n d = {}\n for index, area in enumerate(matched): # Iterate index and value together\n # Discard bad recordings\n if index not in bad_indices:\n # If the area is already a key then append this neuron index\n if area in d.keys():\n d[area].append(index)\n # Else create a new key for a single element list\n else:\n d[area] = [index]\n return d",
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_create_rate_plan(self):\n pass",
"def get_remove_list(db_path, file, include_data, airline, can_limit, zs_limit, processed_direc):\n\n z_score_path = '%s%s_%s_Zdata_%s.csv'%(processed_direc, file,airline,include_data) \n #df_score = pd.read_csv(raw_file_drop, index_col=\"Date\")\n df_score = pd.read_csv(z_score_path, index_col = \"Day_of_Year\")\n df_score.index = pd.to_datetime(df_score.index)\n airport_list = df_score.columns.tolist()\n \n df = atn_analysis.raw_query(db_path,file,airline)\n\n df = df[df['Origin_Airport_Code'].isin(airport_list)] # Filtering to make sure airports are equal in both directions\n df = df[df['Destination_Airport_Code'].isin(airport_list)]\n by_origin_count = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].count()\n by_origin = df.groupby(['Flight_Date', 'Origin_Airport_Code'], as_index=False)[['Can_Status']].sum()\n by_origin.Can_Status = by_origin.Can_Status / by_origin_count.Can_Status\n #print(by_origin)\n df_score[\"idx\"] = df_score.index\n df_score = pd.melt(df_score, id_vars='idx', value_vars=airport_list)\n df_score = df_score.sort_values(['idx', 'variable'], ascending=[True, True])\n df_score.columns = [\"Date\", \"Airports\", \"Z_Score\"]\n df_score.set_index('Date')\n df_score[\"Cancellations\"] = by_origin.Can_Status\n\n ### Creating the or conditions. First is the percentage of delayed flights and the second is the z-score\n df_score[\"Z_score_9901\"] = np.where((df_score['Cancellations'] > can_limit) | (df_score['Z_Score'] > zs_limit), 1, 0)\n #print(df_score)\n\n ### Creating pivot table for easy manipulation. This creates the date as the index with the properties corresponding to\n ### it and finally repeats this trend for all airports being considered.\n df_pivot = df_score.pivot_table('Z_score_9901', ['Date'], 'Airports')\n #print(df_pivot)\n\n s = np.asarray(np.where(df_pivot == 1, ['{}'.format(x) for x in df_pivot.columns], '')).tolist()\n\n\n s_nested = []\n for k in s:\n p = list(filter(None,k))\n \n #p = filter(None,k)\n s_nested.append(p)\n #s_nested.extend(p)\n\n\n return s_nested",
"def test_retire_rate_plan(self):\n pass",
"def test_summarize_results(self):\n lambda_zpes = [0.95, 0.98]\n levels_of_theory = [Level(method='CBS-QB3'),\n Level(method='B2PLYPD3', basis='Def2TZVP'),\n ]\n times = ['3', '5']\n overall_time = '8.5'\n base_path = os.path.join(ARC_PATH, 'Projects', 'scaling_factors_arc_testing_delete_after_usage')\n\n summarize_results(lambda_zpes=lambda_zpes,\n levels=levels_of_theory,\n zpe_dicts=self.zpe_dicts,\n times=times,\n overall_time=overall_time,\n base_path=base_path)\n\n info_file_path = os.path.join(base_path, 'scaling_factors_0.info')\n self.assertTrue(os.path.isfile(info_file_path))\n with open(info_file_path, 'r') as f:\n lines = f.readlines()\n self.assertIn('CITATIONS:\\n', lines)\n self.assertIn('Level of theory: cbs-qb3, software: gaussian (composite)\\n', lines)\n self.assertIn('Level of theory: b2plypd3/def2tzvp, software: gaussian (dft)\\n', lines)\n self.assertIn('The following species from the standard set did not converge at this level:\\n', lines)\n self.assertIn(\" ['CO2']\\n\", lines)\n self.assertIn('Scale Factor for Fundamental Frequencies = 0.955\\n', lines)\n self.assertIn('Scale Factor for Harmonic Frequencies = 0.994\\n', lines)\n self.assertIn('You may copy-paste the following harmonic frequency scaling factor(s) to the RMG-database repository\\n', lines)\n self.assertIn(\"\"\" \"LevelOfTheory(method='cbs-qb3')\": 0.963, # [4]\\n\"\"\", lines)\n self.assertIn(\"\"\" \"LevelOfTheory(method='b2plypd3',basis='def2tzvp')\": 0.994, # [4]\\n\"\"\", lines)\n self.assertIn('Scaling factors calculation for 2 levels of theory completed (elapsed time: 8.5).\\n', lines)",
"def _get_unique_values(self, data):\r\n print ( \"=\" * 200 )\r\n print ( \"{:10} {:25}\".format ( \"Feature Name\".upper (),\r\n \"Unique values\".upper () ) )\r\n for feature_name in self.missing_values.index.values:\r\n print ( \"{:25}\".format ( feature_name ), end = \"\" )\r\n print ( data [ feature_name ].unique () [ :10 ], end = \",\" )\r\n print ()",
"def perarea():\r\n return ['tons_ac', 'lbs_ac', 'kg_ha', 'kg_m^2', 'tonnes_ha', 'kg_km^2',\r\n 'tonnes_km^2']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that when the conditions are right, a zipcode is properly mapped to a rate.
|
def test_zipcode_is_successfully_mapped(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'11111': [('NY', '5')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = '294.87'
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
cleaned_zipcode_data_input,
cleaned_plan_data_input
)
self.assertEqual(expected, slcsp_rate)
|
[
"def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_update_zip_post_code(self):\n pass",
"def test_search_zip_post_code(self):\n pass",
"def test_post_bad_zipcode(self):\n response = self._index()\n form = response.forms[0]\n # Fill in all required fields.\n form.fields['billing_amount'][0].value = '40.00 option 1'\n form.fields['billing_email'][0].value = 'foo@bar.com'\n form.fields['billing_name'][0].value = 'name o. card'\n form.fields['billing_street'][0].value = '123 fake st'\n form.fields['billing_zip'][0].value = '8230'\n form.fields['billing_card_number'][0].value = '5105105105105100'\n form.fields['billing_expiration_month'][0].value = '06'\n form.fields['billing_expiration_year'][0].value = EXP_YEAR\n # Submit it and check for errors.\n response = form.submit()\n form = response.forms[0]\n doc = HTML(response.body)\n # Check form-errors for presence of error text.\n form_errors = CSSSelector('#form-errors')(doc)\n assert len(form_errors) == 1\n assert form_errors[0].text != ''\n # Check ZIP code for presence of error text.\n zip_errors = CSSSelector('#billing_zip-errors')(doc)\n assert len(zip_errors) == 1\n assert zip_errors[0].text != ''",
"def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))",
"def test_company_EU_GR_vies_tax(self):\n self.assertEqual(self.policy.get_tax_rate(\"123456\", \"GR\"), (24, False))",
"def test_zip_detail_good(self):\n city, state = get_city_and_state('12550')\n self.assertEqual('Newburgh', city)\n self.assertEqual('NY', state)",
"def test_post_business_exchange_rates(self):\n pass",
"def test_get_business_exchange_rates(self):\n pass",
"def test_rate_always_formatted_to_two_decimal_places(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_inputs = [\n {('NY', '5'): ['294.24', '294']},\n {('NY', '5'): ['294.24', '294.7']},\n {('NY', '5'): ['294.24', '294.3452']},\n {('NY', '5'): ['294.24', '294.24']}\n ]\n\n # NOTE: Formatting a decimal.Decimal value will result in rounding.\n expected_results = ['294.00', '294.70', '294.35', '294.24']\n\n for i, cleaned_plan_data_input in enumerate(cleaned_plan_data_inputs):\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected_results[i], slcsp_rate)",
"def test_validate_location_zip_alpha(self):\n answer = wunderground_scraper.validate_location('4OO65')\n self.assertEqual(answer, False)",
"def test_valids(self):\n for postcode, expected_result in self.valid_postcodes:\n self.failUnlessEqual(validate_postcode(postcode), expected_result)",
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_searchAPI_singlezipcode(self):\n\n get_url = 'https://api-stage.handstandapp.com/api/v2/search?zip_code=90405&lng=-118.413549&lat=34.070766&date=2018-12-25&time=22:30:00&goal=Boxing,Kids%20Yoga,Bootcamp'\n\n\n\n host_address = 'https://stage.handstandapp.com'\n r = requests.get(get_url)\n a=r.json()\n if r.status_code != 200:\n print(Fore.RED + r.headers[\"content-type\"])\n print(Fore.RED + \"[ERROR] when calling[\" + get_url + \"] got back HTTP response code:\" + str(r.status_code))\n print(Fore.RED + json.dumps(r.json(), indent=4))\n elif r.headers[\"content-type\"] != 'application/json':\\\n print(Fore.RED + \"[ERROR] when calling [\" + get_url + \"] got back non JSON data:\" + r.headers['content-type'])\n else:\n print(Fore.GREEN + \"[SUCCESS]when calling[\" + get_url + \"]\")\n for i in range(1, len(a['data'])):\n trainer_zip= a['data'][i]['zip']\n dst=a['data'][i]['max_distance']\n t_zip=(trainer_zip)\n url1 = \"http://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=\"+t_zip+\"&destinations=90405/mile\"\n r1 = requests.get(url1)\n\n a1 = r1.json()\n b = a1['rows'][0]['elements'][0]['distance']['text']\n b1 = b[0:4].strip()\n c=b1\n try:\n if int(dst)>=int(c):\n print(Fore.BLUE+'Valid trainer list','--->','Trainer_ID-',a['data'][i]['id'],' ' 'MAX_DST-',dst,' ' 'ACTUAL_DST-', c,' ' 'ZIPCODE-', t_zip,)\n else:\n print(Fore.RED+' Wrong trainer list','--->','Trainer_ID-',a['data'][i]['id'],' ' 'MAX_DST-',dst,' ' 'ACTUAL_DST-', c,' ' 'ZIPCODE-', t_zip,)\n except:pass",
"def test_rate_cost_type_valid(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\n \"unit\": \"USD\",\n \"value\": 0.22,\n \"usage\": {\"usage_start\": None, \"usage_end\": None},\n \"cost_type\": \"Infrastructure\",\n }\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = [\n {\n \"unit\": \"USD\",\n \"value\": 0.22,\n \"usage\": {\"usage_start\": None, \"usage_end\": None},\n \"cost_type\": \"Supplementary\",\n }\n ]\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n serializer.save()",
"def calculate(fromZip, toZip, weight):\n url = ('https://secure.shippingapis.com/ShippingAPI.dll?' +\n 'API=RateV4' +\n '&XML=<RateV4Request USERID=\"751JOSHU0578\">' +\n '<Revision>2</Revision>' +\n '<Package ID=\"1ST\">' +\n '<Service>First Class Commercial</Service>' +\n '<FirstClassMailType>Parcel</FirstClassMailType>' +\n '<ZipOrigination>' +\n fromZip +\n '</ZipOrigination>' +\n '<ZipDestination>' +\n toZip +\n '</ZipDestination>' +\n '<Pounds>0</Pounds>' +\n '<Ounces>' +\n weight +\n '</Ounces>' +\n '<Container>Rectangular</Container>' +\n '<Size>Variable</Size>' +\n '<Machinable>true</Machinable>' +\n '</Package></RateV4Request>')\n\n zonePriceList = list()\n r = requests.get(url) #intializes Response object\n soup = str(BeautifulSoup(r.text, 'html.parser')) #intializes Soup object\n\n rateIndex = soup.index('<commercialrate>')\n slicedSoup = soup[rateIndex:] #slices everything before priceIndex in Soup\n\n if '<zone>' in soup:\n #retrieving indicies\n zoneIndex = soup.index('<zone>')\n priceIndex = slicedSoup.index('<commercialrate>')\n dotIndex = slicedSoup.index('.')\n\n\n zonePriceList.append(soup[zoneIndex+6]) #append Zone\n if slicedSoup[priceIndex+17] == '.': #append Price\n zonePriceList.append(slicedSoup[priceIndex+16] +\n slicedSoup[dotIndex] + slicedSoup[dotIndex+1] + slicedSoup[dotIndex+2])\n\n elif slicedSoup[priceIndex+17] != '.': #guard against inflation when prices > $10\n zonePriceList.append(slicedSoup[priceIndex+16]+ slicedSoup[priceIndex+17] +\n slicedSoup[dotIndex] + slicedSoup[dotIndex+1] + slicedSoup[dotIndex+2])\n\n return zonePriceList\n else:\n return \" No zone or price found\"",
"def compare_zipcodes(s1, s2):\n\n # check if the zipcode are identical (return 1 or 0)\n sim = (s1 == s2).astype(float)\n\n # check the first 2 numbers of the distinct comparisons\n sim[(sim == 0) & (s1.str[0:2] == s2.str[0:2])] = 0.5\n\n return sim",
"def test_get_exchange_rates(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that if no matching rate is found for a zipcode, an empty string is returned instead per the exercise instructions.
|
def test_no_rate_found_is_empty_string(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'22222': [('NH', '12')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
cleaned_zipcode_data_input,
cleaned_plan_data_input
)
self.assertEqual(expected, slcsp_rate)
|
[
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n for incorrect_zipcode in incorrect_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n incorrect_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)\n\n for non_string_zipcode in non_string_zipcodes:\n slcsp_rate = retrieve_slcsp_for_zipcode(\n non_string_zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def process_zip_code(input_zip: str) -> str:\n int(input_zip)\n\n return input_zip.zfill(5)",
"def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False",
"def zip_code(self, value):\n regex = config.get('validators', 'zip_code')\n zipcode = re.search(regex,\n value)\n if not zipcode:\n raise ZipCodeError(\"ZipCodeError: 'zip_code' must be 5 non-float digits\")\n else:\n self._zip_code = value",
"def test_search_zip_post_code(self):\n pass",
"def validate_zipcode(zipcode):\n return re.match(r'^[0-9]{8}$', zipcode)",
"def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def zip_code(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\treturn element.element_value",
"def get_closest_station_by_zipcode(zipcode):\n\n station_lookup_method_by_zipcode = lookup_usaf_station_by_zipcode(zipcode)\n try:\n station, warnings, lat, lon = _get_closest_station_by_zcta_ranked(zipcode)\n\n isd_metadata = get_isd_file_metadata(str(station))\n if len(isd_metadata) == 0:\n logging.warning(\"Zipcode %s mapped to station %s, but no ISD metadata was found.\" % (zipcode, station))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedUSAFIDError as e:\n logging.warning(\"Closest station %s is not a recognized station. Using backup-method station %s for zipcode %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode,\n zipcode))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedZCTAError as e:\n logging.warning(\"Unrecognized ZCTA %s\" % e)\n return None\n\n if str(station) != station_lookup_method_by_zipcode:\n logging.debug(\"Previously would have selected station %s instead of %s for zip code %s\" % (\n station_lookup_method_by_zipcode,\n str(station),\n zipcode))\n\n if warnings:\n logging.warning(\"Station %s is %d meters over maximum %d meters (%d meters) (zip code %s is at lat/lon %f, %f)\" % (\n str(station),\n int(warnings[0].data['distance_meters'] - warnings[0].data['max_distance_meters']),\n int(warnings[0].data['max_distance_meters']),\n int(warnings[0].data['distance_meters']),\n zipcode,\n lat,\n lon,\n ))\n logging.warning(\"Closest station %s is too far. Using backup-method station %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode))\n return station_lookup_method_by_zipcode\n\n return str(station)",
"def find_fips_code(zip_code):\n\n url = 'https://api.vericred.com/zip_counties'\n\n payload = {\"zip_prefix\": zip_code}\n\n req = requests.get(url, params=payload, headers=HEADERS)\n req = req.json()\n\n counties = req['counties'][0]\n\n fips_code = counties.get('fips_code')\n\n return fips_code",
"def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county",
"def lookup_usaf_station_by_zipcode(zipcode):\n\n usaf = zipcode_usaf.get(zipcode, None)\n return usaf",
"def test_zip_detail_bad(self):\n city, state = get_city_and_state('99990')\n self.assertEqual('', city)\n self.assertEqual('', state)",
"def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None",
"def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False",
"def test_post_bad_zipcode(self):\n response = self._index()\n form = response.forms[0]\n # Fill in all required fields.\n form.fields['billing_amount'][0].value = '40.00 option 1'\n form.fields['billing_email'][0].value = 'foo@bar.com'\n form.fields['billing_name'][0].value = 'name o. card'\n form.fields['billing_street'][0].value = '123 fake st'\n form.fields['billing_zip'][0].value = '8230'\n form.fields['billing_card_number'][0].value = '5105105105105100'\n form.fields['billing_expiration_month'][0].value = '06'\n form.fields['billing_expiration_year'][0].value = EXP_YEAR\n # Submit it and check for errors.\n response = form.submit()\n form = response.forms[0]\n doc = HTML(response.body)\n # Check form-errors for presence of error text.\n form_errors = CSSSelector('#form-errors')(doc)\n assert len(form_errors) == 1\n assert form_errors[0].text != ''\n # Check ZIP code for presence of error text.\n zip_errors = CSSSelector('#billing_zip-errors')(doc)\n assert len(zip_errors) == 1\n assert zip_errors[0].text != ''"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that a rate is not returned when a zipcode is given in a format that is not 5 digits.
|
def test_only_five_digit_zipcodes_match(self):
incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']
non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]
cleaned_zipcode_data_input = {'11111': [('NY', '5')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
for incorrect_zipcode in incorrect_zipcodes:
slcsp_rate = retrieve_slcsp_for_zipcode(
incorrect_zipcode,
cleaned_zipcode_data_input,
cleaned_plan_data_input
)
self.assertEqual(expected, slcsp_rate)
for non_string_zipcode in non_string_zipcodes:
slcsp_rate = retrieve_slcsp_for_zipcode(
non_string_zipcode,
cleaned_zipcode_data_input,
cleaned_plan_data_input
)
self.assertEqual(expected, slcsp_rate)
|
[
"def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False",
"def validate_zipcode(zipcode):\n return re.match(r'^[0-9]{8}$', zipcode)",
"def zip_code(self, value):\n regex = config.get('validators', 'zip_code')\n zipcode = re.search(regex,\n value)\n if not zipcode:\n raise ZipCodeError(\"ZipCodeError: 'zip_code' must be 5 non-float digits\")\n else:\n self._zip_code = value",
"def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def out_of_range(postcode):\n\n valid_zipcode = ['940', '945', '950', '951']\n if first_3_digit.search(postcode).group() not in valid_zipcode:\n return True\n else:\n return False",
"def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False",
"def test_post_bad_zipcode(self):\n response = self._index()\n form = response.forms[0]\n # Fill in all required fields.\n form.fields['billing_amount'][0].value = '40.00 option 1'\n form.fields['billing_email'][0].value = 'foo@bar.com'\n form.fields['billing_name'][0].value = 'name o. card'\n form.fields['billing_street'][0].value = '123 fake st'\n form.fields['billing_zip'][0].value = '8230'\n form.fields['billing_card_number'][0].value = '5105105105105100'\n form.fields['billing_expiration_month'][0].value = '06'\n form.fields['billing_expiration_year'][0].value = EXP_YEAR\n # Submit it and check for errors.\n response = form.submit()\n form = response.forms[0]\n doc = HTML(response.body)\n # Check form-errors for presence of error text.\n form_errors = CSSSelector('#form-errors')(doc)\n assert len(form_errors) == 1\n assert form_errors[0].text != ''\n # Check ZIP code for presence of error text.\n zip_errors = CSSSelector('#billing_zip-errors')(doc)\n assert len(zip_errors) == 1\n assert zip_errors[0].text != ''",
"def test_zipcode_is_successfully_mapped(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = '294.87'\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True",
"def process_zip_code(input_zip: str) -> str:\n int(input_zip)\n\n return input_zip.zfill(5)",
"def test_validate_location_zip_alpha(self):\n answer = wunderground_scraper.validate_location('4OO65')\n self.assertEqual(answer, False)",
"def test_zip_detail_bad(self):\n city, state = get_city_and_state('99990')\n self.assertEqual('', city)\n self.assertEqual('', state)",
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))",
"def test_search_zip_post_code(self):\n pass",
"def validate_zipcode_on_before(req, resp, params):\n zipcode = req.get_param('zip_code') or None\n if not zipcode:\n raise falcon.HTTPError(\n falcon.HTTP_400, 'A `zip_code` param must be informed.')\n if not validate_zipcode(zipcode):\n raise falcon.HTTPError(\n falcon.HTTP_400,\n 'The `zip_code` param must be an integer valid zipcode '\n 'with 8 digits.')",
"def clean_incident_zip(zipcode):\n zipcode = str(zipcode).replace('.0', '')[:5]\n try:\n zipcode = int(zipcode)\n except:\n return None\n # Pad it on the left with '0's\n zipcode = '{:05}'.format(zipcode)\n return zipcode",
"def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid",
"def is_valid_postcode(postcode):\n if len(postcode) != 6 or postcode[:2] != \"72\":\n return False\n return postcode.isdigit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that an empty string is returned if no plan areas exist for a given zipcode.
|
def test_empty_string_returned_if_no_plan_areas_exist(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'11111': []}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
cleaned_zipcode_data_input,
cleaned_plan_data_input
)
self.assertEqual(expected, slcsp_rate)
|
[
"def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_zip_detail_bad(self):\n city, state = get_city_and_state('99990')\n self.assertEqual('', city)\n self.assertEqual('', state)",
"def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_validate_location_zip_alpha(self):\n answer = wunderground_scraper.validate_location('4OO65')\n self.assertEqual(answer, False)",
"def test_zip_detail_good(self):\n city, state = get_city_and_state('12550')\n self.assertEqual('Newburgh', city)\n self.assertEqual('NY', state)",
"def _4_validate_zipCode_field(self):\n ###To check for the error message if Zip code is not entered.###\n driver.find_element_by_id(\"city\").send_keys(columns['city'][0])\n driver.find_element_by_id(\"zip_code\").clear()\n driver.find_element_by_id(\"btnSignUp\").click()\n print\"Verifying whether appropriate error message gets displayed if Zip code is not entered.\"\n errors = driver.find_elements_by_xpath(\"//html/body/div[3]/div[2]/div/div/form/div/div[6]/label[2]\")\n if errors != []:\n self.assertEqual(\"This field is required.\", str(errors[0].get_attribute('innerHTML')),\n \"Error message is not appropriate.\")\n print \"Appropriate error message is displayed.\"",
"def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))",
"def resolve_zip_area_code(element, zcta_data, zcta_cache):\n pos = element['pos']\n old_zip = None\n if 'address' in element:\n if 'postcode' in element['address']:\n old_zip = element['address']['postcode']\n new_zip = None\n\n for z_cache_key in zcta_cache.keys():\n #As per my investigation shapely point expects lon / lat order of geocoord\n if zcta5_import.coord_within_ziparea((pos[1], pos[0]), zcta_cache[z_cache_key]):\n new_zip = z_cache_key\n logging.debug(\"Resolved ZIP area %s from cache\", new_zip)\n break\n \n if new_zip is None:\n for z_key in zcta_data.keys():\n #As per my investigation shapely point expects lon / lat order of geocoord\n if zcta5_import.coord_within_ziparea((pos[1], pos[0]), zcta_data[z_key]):\n new_zip = z_key\n logging.debug(\"Resolved ZIP area %s from ZCTA dataset\", new_zip)\n break\n \n if new_zip is not None:\n if new_zip != old_zip:\n logging.warn('Resolved different ZIP for the location %s (OLD: \"%s\", NEW: \"%s\")', element['id'], old_zip, new_zip)\n \n return new_zip",
"def test_search_zip_post_code(self):\n pass",
"def validate_zipcode(zipcode):\n return re.match(r'^[0-9]{8}$', zipcode)",
"def test_post_bad_zipcode(self):\n response = self._index()\n form = response.forms[0]\n # Fill in all required fields.\n form.fields['billing_amount'][0].value = '40.00 option 1'\n form.fields['billing_email'][0].value = 'foo@bar.com'\n form.fields['billing_name'][0].value = 'name o. card'\n form.fields['billing_street'][0].value = '123 fake st'\n form.fields['billing_zip'][0].value = '8230'\n form.fields['billing_card_number'][0].value = '5105105105105100'\n form.fields['billing_expiration_month'][0].value = '06'\n form.fields['billing_expiration_year'][0].value = EXP_YEAR\n # Submit it and check for errors.\n response = form.submit()\n form = response.forms[0]\n doc = HTML(response.body)\n # Check form-errors for presence of error text.\n form_errors = CSSSelector('#form-errors')(doc)\n assert len(form_errors) == 1\n assert form_errors[0].text != ''\n # Check ZIP code for presence of error text.\n zip_errors = CSSSelector('#billing_zip-errors')(doc)\n assert len(zip_errors) == 1\n assert zip_errors[0].text != ''",
"def getArea(self, countrycode):\n for area in self.listAreas():\n if countrycode in [c.isocc for c in area.countries]:\n return area.name\n return None",
"def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None",
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def test_find_city_when_not_present(self):\n route_temp = Route()\n route_temp.start_address = \"i live in austin\"\n city_n_p = helper_functions.find_city(self.load_data(), route_temp)\n self.assertEqual(None, city_n_p)",
"def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False",
"def test_city_no_zip(self):\n self.prep_location()\n self.loc.location_city = 'Fishkill'\n self.loc.save()\n # Creates coords if city or zip is populated.\n coords = GEOCODE_LOCATION.get_coordinate(location=self.loc)\n self.assertTrue(coords)",
"def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county",
"def test_incomplete_polygons():\n assert not query_row(db_conf, 'osm_landusages', 30004)\n assert not query_row(db_conf, 'osm_landusages', 30006)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests that an empty string is returned if more than one plan area exists for a given zipcode.
|
def test_empty_string_returned_if_too_many_plan_areas_exist(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
cleaned_zipcode_data_input,
cleaned_plan_data_input
)
self.assertEqual(expected, slcsp_rate)
|
[
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def test_zip_detail_bad(self):\n city, state = get_city_and_state('99990')\n self.assertEqual('', city)\n self.assertEqual('', state)",
"def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('WI', '2'), ('NY', '5'), ('NY', '5')],\n '77777': [\n ('WI', '2'),\n ('WI', '2'),\n ('NY', '5'),\n ('NY', '5'),\n ('CA', '7')\n ]\n }\n\n expected = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2')],\n '55555': [('WI', '2'), ('NY', '5')],\n '66666': [('WI', '2'), ('NY', '5')],\n '77777': [('WI', '2'), ('NY', '5'), ('CA', '7')]\n }\n\n cleaned_rate_areas = clean_zipcode_rate_areas(input)\n\n # Compare each set of rate areas for every zipcode; sort the values to\n # make sure we're comparing the data correctly.\n for zipcode, rate_areas in cleaned_rate_areas.items():\n self.assertEqual(sorted(rate_areas), sorted(expected[zipcode]))",
"def test_zip_detail_good(self):\n city, state = get_city_and_state('12550')\n self.assertEqual('Newburgh', city)\n self.assertEqual('NY', state)",
"def resolve_zip_area_code(element, zcta_data, zcta_cache):\n pos = element['pos']\n old_zip = None\n if 'address' in element:\n if 'postcode' in element['address']:\n old_zip = element['address']['postcode']\n new_zip = None\n\n for z_cache_key in zcta_cache.keys():\n #As per my investigation shapely point expects lon / lat order of geocoord\n if zcta5_import.coord_within_ziparea((pos[1], pos[0]), zcta_cache[z_cache_key]):\n new_zip = z_cache_key\n logging.debug(\"Resolved ZIP area %s from cache\", new_zip)\n break\n \n if new_zip is None:\n for z_key in zcta_data.keys():\n #As per my investigation shapely point expects lon / lat order of geocoord\n if zcta5_import.coord_within_ziparea((pos[1], pos[0]), zcta_data[z_key]):\n new_zip = z_key\n logging.debug(\"Resolved ZIP area %s from ZCTA dataset\", new_zip)\n break\n \n if new_zip is not None:\n if new_zip != old_zip:\n logging.warn('Resolved different ZIP for the location %s (OLD: \"%s\", NEW: \"%s\")', element['id'], old_zip, new_zip)\n \n return new_zip",
"def _4_validate_zipCode_field(self):\n ###To check for the error message if Zip code is not entered.###\n driver.find_element_by_id(\"city\").send_keys(columns['city'][0])\n driver.find_element_by_id(\"zip_code\").clear()\n driver.find_element_by_id(\"btnSignUp\").click()\n print\"Verifying whether appropriate error message gets displayed if Zip code is not entered.\"\n errors = driver.find_elements_by_xpath(\"//html/body/div[3]/div[2]/div/div/form/div/div[6]/label[2]\")\n if errors != []:\n self.assertEqual(\"This field is required.\", str(errors[0].get_attribute('innerHTML')),\n \"Error message is not appropriate.\")\n print \"Appropriate error message is displayed.\"",
"def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county",
"def test_validate_location_zip_alpha(self):\n answer = wunderground_scraper.validate_location('4OO65')\n self.assertEqual(answer, False)",
"def out_of_range(postcode):\n\n valid_zipcode = ['940', '945', '950', '951']\n if first_3_digit.search(postcode).group() not in valid_zipcode:\n return True\n else:\n return False",
"def validate_zipcode(zipcode):\n return re.match(r'^[0-9]{8}$', zipcode)",
"def getArea(self, countrycode):\n for area in self.listAreas():\n if countrycode in [c.isocc for c in area.countries]:\n return area.name\n return None",
"def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None",
"def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False",
"def valid_zipcode(line):\n zipcode = line.o_zip_code\n invalid_zip = len(zipcode) not in [5, 9] and zipcode.isdigit()\n if invalid_zip:\n rule = 'Zipcode length'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True",
"def test_search_zip_post_code(self):\n pass",
"def test_no_rate_found_is_empty_string(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'22222': [('NH', '12')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zipcode,\n cleaned_zipcode_data_input,\n cleaned_plan_data_input\n )\n\n self.assertEqual(expected, slcsp_rate)",
"def find_zip_codes(self, zip_code):\n zip_code = str(zip_code).strip()\n cursor = self.households.find({\"addresses.zip_code\":zip_code})\n results = [Household.from_dict(dct) for dct in cursor]\n\n cursor = self.businesses.find({\"address.zip_code\":zip_code})\n results += [Business.from_dict(dct) for dct in cursor]\n\n return results",
"def match_city(self, city, dpt_code, zip_code = None):\n city = format_str_city_insee(city)\n dpt_code = dpt_code.rjust(2, '0')\n if zip_code:\n zip_code.rjust(5, '0')\n # Based on zip code and city name\n ls_matching = []\n found_indicator = False\n if zip_code:\n if zip_code in self.dict_corr_zip_insee:\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_match')\n # If no exact zip, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_in_match(es)')\n # Based on dpt code and city name\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_match')\n # If no exact dpt, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_in_match(es)')\n # No match\n return (None, 'no_match')",
"def valid_zip_sum(line):\n zipcode = line.o_zip_code\n if not sum(int(x) for x in zipcode) <= 20:\n rule = 'Zipcode sum'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|